file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
stats.py
|
#!/usr/bin/env python3
# Usage:
# ./stats.py results.csv chart.svg
# ./stats.py official.csv official_chart.svg
import argparse
import csv
import json
import subprocess
UNKNOWN = 0
PASSED = 1
FAILED = 2
CRASHED = 3
PARTIAL = 4
OUT_OF_SCOPE = 5
class RowData:
|
parser = argparse.ArgumentParser()
parser.add_argument('input', help='CSV file')
parser.add_argument('output', help='SVG file')
args = parser.parse_args()
rows = []
with open(args.input, 'r') as f:
for row in csv.reader(f):
if row[0] == 'title':
continue
file_name = row[0]
flags = [int(row[1]), int(row[2]), int(row[3]), int(row[4]),
int(row[5]), int(row[6]), int(row[7]), int(row[8]),
int(row[9])]
rows.append(RowData(file_name, flags))
passed = [0, 0, 0, 0, 0, 0, 0, 0, 0]
for row in rows:
for idx, flag in enumerate(row.flags):
if flag == PASSED or flag == UNKNOWN:
passed[idx] = passed[idx] + 1
barh_data = json.dumps(
{
"items_font": {
"family": "Arial",
"size": 12
},
"items": [
{
"name": "resvg 0.14.1",
"value": passed[2]
},
{
"name": "Chromium r856583",
"value": passed[0]
},
{
"name": "Firefox 87",
"value": passed[1]
},
{
"name": "Inkscape 1.0.2",
"value": passed[4]
},
{
"name": "librsvg 2.51.1",
"value": passed[5]
},
{
"name": "Batik 1.14",
"value": passed[3]
},
{
"name": "SVG.NET 3.2.3",
"value": passed[6]
},
{
"name": "QtSvg 5.15.2",
"value": passed[8]
},
{
"name": "wxSvg 1.5.11",
"value": passed[7]
}
],
"hor_axis": {
"title": "Tests passed",
"round_tick_values": True,
"width": 700,
"max_value": len(rows)
}
}, indent=4)
with open('chart.json', 'w') as f:
f.write(barh_data)
try:
subprocess.check_call(['./barh', 'chart.json', 'site/images/' + args.output])
except FileNotFoundError:
print('Error: \'barh\' executable is not found.\n'
'You should build https://github.com/RazrFalcon/barh '
'and link resultig binary to the current directory.')
|
def __init__(self, name, flags):
self.name = name
self.flags = flags
|
pwd_tog.rs
|
#[doc = "Reader of register PWD_TOG"]
pub type R = crate::R<u32, super::PWD_TOG>;
#[doc = "Writer for register PWD_TOG"]
pub type W = crate::W<u32, super::PWD_TOG>;
#[doc = "Register PWD_TOG `reset()`'s with value 0x001e_1c00"]
impl crate::ResetValue for super::PWD_TOG {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0x001e_1c00
}
}
#[doc = "Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TXPWDFS_A {
#[doc = "0: Normal operation."]
_0,
#[doc = "1: Power-down the USB full-speed drivers. This turns off the current starvation sources and puts the drivers into high-impedance output"]
_1,
}
impl From<TXPWDFS_A> for bool {
#[inline(always)]
fn from(variant: TXPWDFS_A) -> Self {
match variant {
TXPWDFS_A::_0 => false,
TXPWDFS_A::_1 => true,
}
}
}
#[doc = "Reader of field `TXPWDFS`"]
pub type TXPWDFS_R = crate::R<bool, TXPWDFS_A>;
impl TXPWDFS_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TXPWDFS_A {
match self.bits {
false => TXPWDFS_A::_0,
true => TXPWDFS_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == TXPWDFS_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == TXPWDFS_A::_1
}
}
#[doc = "Write proxy for field `TXPWDFS`"]
pub struct TXPWDFS_W<'a> {
w: &'a mut W,
}
impl<'a> TXPWDFS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
|
self.bit(variant.into())
}
}
#[doc = "Normal operation."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(TXPWDFS_A::_0)
}
#[doc = "Power-down the USB full-speed drivers. This turns off the current starvation sources and puts the drivers into high-impedance output"]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(TXPWDFS_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
#[doc = "Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TXPWDIBIAS_A {
#[doc = "0: Normal operation"]
_0,
#[doc = "1: Power-down the USB PHY current bias block for the transmitter. This bit should be set only when the USB is in suspend mode. This effectively powers down the entire USB transmit path"]
_1,
}
impl From<TXPWDIBIAS_A> for bool {
#[inline(always)]
fn from(variant: TXPWDIBIAS_A) -> Self {
match variant {
TXPWDIBIAS_A::_0 => false,
TXPWDIBIAS_A::_1 => true,
}
}
}
#[doc = "Reader of field `TXPWDIBIAS`"]
pub type TXPWDIBIAS_R = crate::R<bool, TXPWDIBIAS_A>;
impl TXPWDIBIAS_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TXPWDIBIAS_A {
match self.bits {
false => TXPWDIBIAS_A::_0,
true => TXPWDIBIAS_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == TXPWDIBIAS_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == TXPWDIBIAS_A::_1
}
}
#[doc = "Write proxy for field `TXPWDIBIAS`"]
pub struct TXPWDIBIAS_W<'a> {
w: &'a mut W,
}
impl<'a> TXPWDIBIAS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: TXPWDIBIAS_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Normal operation"]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(TXPWDIBIAS_A::_0)
}
#[doc = "Power-down the USB PHY current bias block for the transmitter. This bit should be set only when the USB is in suspend mode. This effectively powers down the entire USB transmit path"]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(TXPWDIBIAS_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11);
self.w
}
}
#[doc = "Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TXPWDV2I_A {
#[doc = "0: Normal operation."]
_0,
#[doc = "1: Power-down the USB PHY transmit V-to-I converter and the current mirror"]
_1,
}
impl From<TXPWDV2I_A> for bool {
#[inline(always)]
fn from(variant: TXPWDV2I_A) -> Self {
match variant {
TXPWDV2I_A::_0 => false,
TXPWDV2I_A::_1 => true,
}
}
}
#[doc = "Reader of field `TXPWDV2I`"]
pub type TXPWDV2I_R = crate::R<bool, TXPWDV2I_A>;
impl TXPWDV2I_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TXPWDV2I_A {
match self.bits {
false => TXPWDV2I_A::_0,
true => TXPWDV2I_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == TXPWDV2I_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == TXPWDV2I_A::_1
}
}
#[doc = "Write proxy for field `TXPWDV2I`"]
pub struct TXPWDV2I_W<'a> {
w: &'a mut W,
}
impl<'a> TXPWDV2I_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: TXPWDV2I_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Normal operation."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(TXPWDV2I_A::_0)
}
#[doc = "Power-down the USB PHY transmit V-to-I converter and the current mirror"]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(TXPWDV2I_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u32) & 0x01) << 12);
self.w
}
}
#[doc = "Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RXPWDENV_A {
#[doc = "0: Normal operation."]
_0,
#[doc = "1: Power-down the USB high-speed receiver envelope detector (squelch signal)"]
_1,
}
impl From<RXPWDENV_A> for bool {
#[inline(always)]
fn from(variant: RXPWDENV_A) -> Self {
match variant {
RXPWDENV_A::_0 => false,
RXPWDENV_A::_1 => true,
}
}
}
#[doc = "Reader of field `RXPWDENV`"]
pub type RXPWDENV_R = crate::R<bool, RXPWDENV_A>;
impl RXPWDENV_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RXPWDENV_A {
match self.bits {
false => RXPWDENV_A::_0,
true => RXPWDENV_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == RXPWDENV_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == RXPWDENV_A::_1
}
}
#[doc = "Write proxy for field `RXPWDENV`"]
pub struct RXPWDENV_W<'a> {
w: &'a mut W,
}
impl<'a> RXPWDENV_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: RXPWDENV_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Normal operation."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(RXPWDENV_A::_0)
}
#[doc = "Power-down the USB high-speed receiver envelope detector (squelch signal)"]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(RXPWDENV_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17);
self.w
}
}
#[doc = "Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RXPWD1PT1_A {
#[doc = "0: Normal operation"]
_0,
#[doc = "1: Power-down the USB full-speed differential receiver."]
_1,
}
impl From<RXPWD1PT1_A> for bool {
#[inline(always)]
fn from(variant: RXPWD1PT1_A) -> Self {
match variant {
RXPWD1PT1_A::_0 => false,
RXPWD1PT1_A::_1 => true,
}
}
}
#[doc = "Reader of field `RXPWD1PT1`"]
pub type RXPWD1PT1_R = crate::R<bool, RXPWD1PT1_A>;
impl RXPWD1PT1_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RXPWD1PT1_A {
match self.bits {
false => RXPWD1PT1_A::_0,
true => RXPWD1PT1_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == RXPWD1PT1_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == RXPWD1PT1_A::_1
}
}
#[doc = "Write proxy for field `RXPWD1PT1`"]
pub struct RXPWD1PT1_W<'a> {
w: &'a mut W,
}
impl<'a> RXPWD1PT1_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: RXPWD1PT1_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Normal operation"]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(RXPWD1PT1_A::_0)
}
#[doc = "Power-down the USB full-speed differential receiver."]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(RXPWD1PT1_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18);
self.w
}
}
#[doc = "Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RXPWDDIFF_A {
#[doc = "0: Normal operation."]
_0,
#[doc = "1: Power-down the USB high-speed differential receiver"]
_1,
}
impl From<RXPWDDIFF_A> for bool {
#[inline(always)]
fn from(variant: RXPWDDIFF_A) -> Self {
match variant {
RXPWDDIFF_A::_0 => false,
RXPWDDIFF_A::_1 => true,
}
}
}
#[doc = "Reader of field `RXPWDDIFF`"]
pub type RXPWDDIFF_R = crate::R<bool, RXPWDDIFF_A>;
impl RXPWDDIFF_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RXPWDDIFF_A {
match self.bits {
false => RXPWDDIFF_A::_0,
true => RXPWDDIFF_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == RXPWDDIFF_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == RXPWDDIFF_A::_1
}
}
#[doc = "Write proxy for field `RXPWDDIFF`"]
pub struct RXPWDDIFF_W<'a> {
w: &'a mut W,
}
impl<'a> RXPWDDIFF_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: RXPWDDIFF_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Normal operation."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(RXPWDDIFF_A::_0)
}
#[doc = "Power-down the USB high-speed differential receiver"]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(RXPWDDIFF_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 19)) | (((value as u32) & 0x01) << 19);
self.w
}
}
#[doc = "This bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RXPWDRX_A {
#[doc = "0: Normal operation"]
_0,
#[doc = "1: Power-down the entire USB PHY receiver block except for the full-speed differential receiver"]
_1,
}
impl From<RXPWDRX_A> for bool {
#[inline(always)]
fn from(variant: RXPWDRX_A) -> Self {
match variant {
RXPWDRX_A::_0 => false,
RXPWDRX_A::_1 => true,
}
}
}
#[doc = "Reader of field `RXPWDRX`"]
pub type RXPWDRX_R = crate::R<bool, RXPWDRX_A>;
impl RXPWDRX_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RXPWDRX_A {
match self.bits {
false => RXPWDRX_A::_0,
true => RXPWDRX_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
*self == RXPWDRX_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
*self == RXPWDRX_A::_1
}
}
#[doc = "Write proxy for field `RXPWDRX`"]
pub struct RXPWDRX_W<'a> {
w: &'a mut W,
}
impl<'a> RXPWDRX_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: RXPWDRX_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Normal operation"]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(RXPWDRX_A::_0)
}
#[doc = "Power-down the entire USB PHY receiver block except for the full-speed differential receiver"]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(RXPWDRX_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 20)) | (((value as u32) & 0x01) << 20);
self.w
}
}
impl R {
#[doc = "Bit 10 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled"]
#[inline(always)]
pub fn txpwdfs(&self) -> TXPWDFS_R {
TXPWDFS_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bit 11 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled"]
#[inline(always)]
pub fn txpwdibias(&self) -> TXPWDIBIAS_R {
TXPWDIBIAS_R::new(((self.bits >> 11) & 0x01) != 0)
}
#[doc = "Bit 12 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled"]
#[inline(always)]
pub fn txpwdv2i(&self) -> TXPWDV2I_R {
TXPWDV2I_R::new(((self.bits >> 12) & 0x01) != 0)
}
#[doc = "Bit 17 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled"]
#[inline(always)]
pub fn rxpwdenv(&self) -> RXPWDENV_R {
RXPWDENV_R::new(((self.bits >> 17) & 0x01) != 0)
}
#[doc = "Bit 18 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled"]
#[inline(always)]
pub fn rxpwd1pt1(&self) -> RXPWD1PT1_R {
RXPWD1PT1_R::new(((self.bits >> 18) & 0x01) != 0)
}
#[doc = "Bit 19 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled"]
#[inline(always)]
pub fn rxpwddiff(&self) -> RXPWDDIFF_R {
RXPWDDIFF_R::new(((self.bits >> 19) & 0x01) != 0)
}
#[doc = "Bit 20 - This bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled"]
#[inline(always)]
pub fn rxpwdrx(&self) -> RXPWDRX_R {
RXPWDRX_R::new(((self.bits >> 20) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 10 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled"]
#[inline(always)]
pub fn txpwdfs(&mut self) -> TXPWDFS_W {
TXPWDFS_W { w: self }
}
#[doc = "Bit 11 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled"]
#[inline(always)]
pub fn txpwdibias(&mut self) -> TXPWDIBIAS_W {
TXPWDIBIAS_W { w: self }
}
#[doc = "Bit 12 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled"]
#[inline(always)]
pub fn txpwdv2i(&mut self) -> TXPWDV2I_W {
TXPWDV2I_W { w: self }
}
#[doc = "Bit 17 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled"]
#[inline(always)]
pub fn rxpwdenv(&mut self) -> RXPWDENV_W {
RXPWDENV_W { w: self }
}
#[doc = "Bit 18 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled"]
#[inline(always)]
pub fn rxpwd1pt1(&mut self) -> RXPWD1PT1_W {
RXPWD1PT1_W { w: self }
}
#[doc = "Bit 19 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled"]
#[inline(always)]
pub fn rxpwddiff(&mut self) -> RXPWDDIFF_W {
RXPWDDIFF_W { w: self }
}
#[doc = "Bit 20 - This bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of USBPHY_CTRL is enabled"]
#[inline(always)]
pub fn rxpwdrx(&mut self) -> RXPWDRX_W {
RXPWDRX_W { w: self }
}
}
|
pub fn variant(self, variant: TXPWDFS_A) -> &'a mut W {
{
|
client.py
|
import requests
from bs4 import BeautifulSoup
class Client:
def __init__(self, base_url, user, passw, cert=None, cafile=None):
self.base_url = base_url
self.session = requests.Session()
self.session.auth = (user, passw)
self.session.cert = cert
self.session.verify = cafile
def exists(self, url):
response = self.session.request('PROPFIND', self.base_url + url)
code = response.status_code
if code == 404:
return False
else:
return True
def propfind(self, url):
return_value = {}
response = self.session.request('PROPFIND', self.base_url + url)
code = response.status_code
if code == 404:
print('Could not find ' + url)
return return_value
elif code != 200 and code != 207:
print('Propfind failed for ' + url + ': unknown error (' + str(code) + ')')
return return_value
soup = BeautifulSoup(response.text, 'lxml')
return_value['is_dir'] = False
return_value['entries'] = []
metadata = soup.find('response')
if metadata.find('propstat').find('prop').find('resourcetype').find('collection') != None:
return_value['is_dir'] = True
first = True
for file in soup.find_all('response'):
# First entry is the file itself, subsequent entries are directory entries
if first:
first = False
continue
return_value['entries'].append(file.find('href').text)
return return_value
def mkdir(self, url, recursive=False):
if url[-1] == '/':
url = url[:-1]
# Since this is the base case for recursion, don't print any errors
if self.exists(url):
return
parent = '/'.join(url.split('/')[:-1])
if not self.exists(parent):
if recursive == False:
print('Could not create directory ' + url + ', parent does not exist')
return
else:
self.mkdir(parent, True)
response = self.session.request('MKCOL', self.base_url + url)
code = response.status_code
if code == 201:
return
elif code == 405:
print('Could not create ' + url + ': already exists')
else:
print('Could not create ' + url + ': unknown error (' + str(code) + ')')
def upload(self, url, file):
data = file.read()
parent = '/'.join(url.split('/')[:-1])
self.mkdir(parent, True)
|
def download(self, url, out):
response = self.session.get(self.base_url + '/' + url)
out.write(response.content)
# Traverse folder recursively, returning a list of absolute filenames
def traverse(self, folder):
entries = self.propfind(folder)['entries']
results = []
for entry in entries:
# if folder, recurse
if entry[-1] == '/':
results = results + self.traverse(entry)
else:
results.append(entry)
return results
|
print('Uploading: ' + url)
self.session.put(self.base_url + url, data=data, headers={'Content-Type': 'application/octet-stream'})
|
split-expense.page.ts
|
import { Component, OnInit } from '@angular/core';
import { AbstractControl, FormArray, FormBuilder, FormGroup, Validators } from '@angular/forms';
import { ActivatedRoute, Router } from '@angular/router';
import { NavController, PopoverController } from '@ionic/angular';
import { isNumber } from 'lodash';
import * as moment from 'moment';
import { forkJoin, from, iif, noop, Observable, of, throwError } from 'rxjs';
import { catchError, concatMap, finalize, map, switchMap, tap } from 'rxjs/operators';
import { CategoriesService } from 'src/app/core/services/categories.service';
import { DateService } from 'src/app/core/services/date.service';
import { FileService } from 'src/app/core/services/file.service';
import { OfflineService } from 'src/app/core/services/offline.service';
import { TransactionService } from 'src/app/core/services/transaction.service';
import { SplitExpenseService } from 'src/app/core/services/split-expense.service';
import { SplitExpenseStatusComponent } from './split-expense-status/split-expense-status.component';
import { TransactionsOutboxService } from 'src/app/core/services/transactions-outbox.service';
@Component({
selector: 'app-split-expense',
templateUrl: './split-expense.page.html',
styleUrls: ['./split-expense.page.scss'],
})
export class
|
implements OnInit {
splitExpensesFormArray = new FormArray([]);
fg: FormGroup;
splitType: string;
txnFields: any;
amount: number;
currency: string;
totalSplitAmount: number;
remainingAmount: number;
categories$: Observable<any>;
costCenters$: Observable<any>;
isCorporateCardsEnabled$: Observable<boolean>;
transaction: any;
fileObjs: any[];
fileUrls: any[];
maxDate: string;
minDate: string;
selectedCCCTransaction: any;
saveSplitExpenseLoading: boolean;
errorMessage: string;
showErrorBlock: boolean;
constructor(
private activatedRoute: ActivatedRoute,
private formBuilder: FormBuilder,
private offlineService: OfflineService,
private categoriesService: CategoriesService,
private dateService: DateService,
private splitExpenseService: SplitExpenseService,
private popoverController: PopoverController,
private transactionService: TransactionService,
private fileService: FileService,
private navController: NavController,
private router: Router,
private transactionsOutboxService: TransactionsOutboxService
) {}
ngOnInit() {}
goBack() {
this.navController.back();
}
onChangeAmount(splitExpenseForm, index) {
if (!splitExpenseForm.controls.amount._pendingChange || !this.amount || !isNumber(splitExpenseForm.value.amount)) {
return;
}
if (this.splitExpensesFormArray.length === 2) {
const otherIndex = index === 0 ? 1 : 0;
const otherSplitExpenseForm = this.splitExpensesFormArray.at(otherIndex);
const amount = parseFloat((this.amount - splitExpenseForm.value.amount).toFixed(3));
const percentage = parseFloat(((amount / this.amount) * 100).toFixed(3));
otherSplitExpenseForm.patchValue(
{
amount,
percentage,
},
{ emitEvent: false }
);
}
let percentage = (splitExpenseForm.value.amount / this.amount) * 100;
percentage = parseFloat(percentage.toFixed(3));
splitExpenseForm.patchValue({
percentage,
});
this.getTotalSplitAmount();
}
onChangePercentage(splitExpenseForm, index) {
if (
!splitExpenseForm.controls.percentage._pendingChange ||
!this.amount ||
!isNumber(splitExpenseForm.value.percentage)
) {
return;
}
if (this.splitExpensesFormArray.length === 2) {
const otherIndex = index === 0 ? 1 : 0;
const otherSplitExpenseForm = this.splitExpensesFormArray.at(otherIndex);
const percentage = Math.min(100, Math.max(0, 100 - splitExpenseForm.value.percentage));
const amount = parseFloat(((this.amount * percentage) / 100).toFixed(3));
otherSplitExpenseForm.patchValue(
{
amount,
percentage,
},
{ emitEvent: false }
);
}
let amount = (this.amount * splitExpenseForm.value.percentage) / 100;
amount = parseFloat(amount.toFixed(3));
splitExpenseForm.patchValue({
amount,
});
this.getTotalSplitAmount();
}
getTotalSplitAmount() {
if (this.splitExpensesFormArray.value.length > 1) {
const amounts = this.splitExpensesFormArray.value.map((obj) => obj.amount);
const totalSplitAmount = amounts.reduce((acc, curr) => acc + curr);
this.totalSplitAmount = parseFloat(totalSplitAmount.toFixed(3)) || 0;
const remainingAmount = this.amount - this.totalSplitAmount;
this.remainingAmount = parseFloat(remainingAmount.toFixed(3)) || 0;
}
}
setUpSplitExpenseBillable(splitExpense) {
if (splitExpense.project && this.txnFields && this.txnFields.billable) {
return this.txnFields.billable.default_value;
}
return this.transaction.billable;
}
setUpSplitExpenseTax(splitExpense) {
if (this.transaction.tax_amount && this.transaction.amount) {
return (this.transaction.tax_amount * splitExpense.percentage) / 100;
} else {
return this.transaction.tax_amount;
}
}
generateSplitEtxnFromFg(splitExpenseValue) {
// Fixing the date format here as the transaction object date is a string
this.transaction.from_dt =
this.transaction?.from_dt && this.dateService.getUTCDate(new Date(this.transaction.from_dt));
this.transaction.to_dt = this.transaction?.to_dt && this.dateService.getUTCDate(new Date(this.transaction.to_dt));
return {
...this.transaction,
org_category_id: splitExpenseValue.category && splitExpenseValue.category.id,
project_id: splitExpenseValue.project && splitExpenseValue.project.project_id,
cost_center_id: splitExpenseValue.cost_center && splitExpenseValue.cost_center.id,
currency: splitExpenseValue.currency,
amount: splitExpenseValue.amount,
source: 'MOBILE',
billable: this.setUpSplitExpenseBillable(splitExpenseValue),
tax_amount: this.setUpSplitExpenseTax(splitExpenseValue),
};
}
uploadNewFiles(files) {
const fileObjs = [];
files.forEach((file) => {
if (
file.type &&
(file.type.indexOf('image') > -1 ||
file.type.indexOf('jpeg') > -1 ||
file.type.indexOf('jpg') > -1 ||
file.type.indexOf('png') > -1)
) {
file.type = 'image';
} else if (file.type && file.type.indexOf('pdf') > -1) {
file.type = 'pdf';
}
fileObjs.push(from(this.transactionsOutboxService.fileUpload(file.url, file.type)));
});
return iif(() => fileObjs.length !== 0, forkJoin(fileObjs), of(null));
}
uploadFiles(files) {
if (!this.transaction.id) {
return this.uploadNewFiles(files).pipe(
map((files) => {
this.fileObjs = files;
return this.fileObjs;
})
);
} else {
return this.getAttachedFiles(this.transaction.id);
}
}
createAndLinkTxnsWithFiles(splitExpenses) {
const splitExpense$: any = {
txns: this.splitExpenseService.createSplitTxns(this.transaction, this.totalSplitAmount, splitExpenses),
};
if (this.fileObjs && this.fileObjs.length > 0) {
splitExpense$.files = this.splitExpenseService.getBase64Content(this.fileObjs);
}
return forkJoin(splitExpense$).pipe(
switchMap((data: any) => {
const txnIds = data.txns.map((txn) => txn.id);
return this.splitExpenseService.linkTxnWithFiles(data).pipe(map(() => txnIds));
})
);
}
async showSplitExpenseStatusPopup(isSplitSuccessful: boolean) {
const splitExpenseStatusPopup = await this.popoverController.create({
component: SplitExpenseStatusComponent,
componentProps: {
isSplitSuccessful,
},
});
await splitExpenseStatusPopup.present();
const { data } = await splitExpenseStatusPopup.onWillDismiss();
if (isSplitSuccessful) {
this.router.navigate(['/', 'enterprise', 'my_expenses']);
}
}
getAttachedFiles(transactionId) {
return this.fileService.findByTransactionId(transactionId).pipe(
map((uploadedFiles) => {
this.fileObjs = uploadedFiles;
return this.fileObjs;
})
);
}
save() {
if (this.splitExpensesFormArray.valid) {
this.showErrorBlock = false;
if (this.amount && this.amount !== this.totalSplitAmount) {
this.showErrorBlock = true;
this.errorMessage = 'Split amount cannot be more than ' + this.amount + '.';
setTimeout(() => {
this.showErrorBlock = false;
}, 2500);
return;
}
let canCreateNegativeExpense = true;
this.isCorporateCardsEnabled$.subscribe((isCorporateCardsEnabled) => {
canCreateNegativeExpense = this.splitExpensesFormArray.value.reduce((defaultValue, splitExpenseValue) => {
const negativeAmountPresent = splitExpenseValue.amount && splitExpenseValue.amount <= 0;
if (!isCorporateCardsEnabled && negativeAmountPresent) {
defaultValue = false && defaultValue;
}
return defaultValue;
}, true);
if (!canCreateNegativeExpense) {
this.showErrorBlock = true;
this.errorMessage = 'Amount should be greater than 0.01';
setTimeout(() => {
this.showErrorBlock = false;
}, 2500);
return;
}
this.saveSplitExpenseLoading = true;
const generatedSplitEtxn = [];
this.splitExpensesFormArray.value.forEach((splitExpenseValue) => {
generatedSplitEtxn.push(this.generateSplitEtxnFromFg(splitExpenseValue));
});
const uploadFiles$ = this.uploadFiles(this.fileUrls);
uploadFiles$
.pipe(
concatMap(() => this.createAndLinkTxnsWithFiles(generatedSplitEtxn)),
concatMap((res) => {
const observables$ = [];
if (this.transaction.id) {
observables$.push(this.transactionService.delete(this.transaction.id));
}
if (this.transaction.corporate_credit_card_expense_group_id) {
observables$.push(this.transactionService.matchCCCExpense(res[0], this.selectedCCCTransaction.id));
}
if (observables$.length === 0) {
observables$.push(of(true));
}
return forkJoin(observables$);
}),
tap((res) => {
this.showSplitExpenseStatusPopup(true);
}),
catchError((err) => {
this.showSplitExpenseStatusPopup(false);
return throwError(err);
}),
finalize(() => {
this.saveSplitExpenseLoading = false;
})
)
.subscribe(noop);
});
} else {
this.splitExpensesFormArray.markAllAsTouched();
}
}
getActiveCategories() {
const allCategories$ = this.offlineService.getAllEnabledCategories();
return allCategories$.pipe(map((catogories) => this.categoriesService.filterRequired(catogories)));
}
ionViewWillEnter() {
this.offlineService.getHomeCurrency().subscribe((homeCurrency) => {
const currencyObj = JSON.parse(this.activatedRoute.snapshot.params.currencyObj);
const orgSettings$ = this.offlineService.getOrgSettings();
this.splitType = this.activatedRoute.snapshot.params.splitType;
this.txnFields = JSON.parse(this.activatedRoute.snapshot.params.txnFields);
this.transaction = JSON.parse(this.activatedRoute.snapshot.params.txn);
this.fileUrls = JSON.parse(this.activatedRoute.snapshot.params.fileObjs);
this.selectedCCCTransaction = JSON.parse(this.activatedRoute.snapshot.params.selectedCCCTransaction);
if (this.splitType === 'categories') {
this.categories$ = this.getActiveCategories().pipe(
map((categories) => categories.map((category) => ({ label: category.displayName, value: category })))
);
} else if (this.splitType === 'cost centers') {
const orgSettings$ = this.offlineService.getOrgSettings();
const orgUserSettings$ = this.offlineService.getOrgUserSettings();
this.costCenters$ = forkJoin({
orgSettings: orgSettings$,
orgUserSettings: orgUserSettings$,
}).pipe(
switchMap(({ orgSettings, orgUserSettings }) => {
if (orgSettings.cost_centers.enabled) {
return this.offlineService.getAllowedCostCenters(orgUserSettings);
} else {
return of([]);
}
}),
map((costCenters) =>
costCenters.map((costCenter) => ({
label: costCenter.name,
value: costCenter,
}))
)
);
}
this.isCorporateCardsEnabled$ = orgSettings$.pipe(
map(
(orgSettings) =>
orgSettings.corporate_credit_card_settings && orgSettings.corporate_credit_card_settings.enabled
)
);
this.isCorporateCardsEnabled$.subscribe((isCorporateCardsEnabled) => {
this.setValuesForCCC(currencyObj, homeCurrency, isCorporateCardsEnabled);
});
});
}
setValuesForCCC(currencyObj: any, homeCurrency: any, isCorporateCardsEnabled: boolean) {
this.setAmountAndCurrency(currencyObj, homeCurrency);
let amount1 = this.amount > 0.0001 || isCorporateCardsEnabled ? this.amount * 0.6 : null; // 60% split
let amount2 = this.amount > 0.0001 || isCorporateCardsEnabled ? this.amount * 0.4 : null; // 40% split
const percentage1 = this.amount ? 60 : null;
const percentage2 = this.amount ? 40 : null;
amount1 = amount1 ? parseFloat(amount1.toFixed(3)) : amount1;
amount2 = amount2 ? parseFloat(amount2.toFixed(3)) : amount2;
this.add(amount1, this.currency, percentage1, null);
this.add(amount2, this.currency, percentage2, null);
this.getTotalSplitAmount();
const today = new Date();
const minDate = new Date('Jan 1, 2001');
const maxDate = this.dateService.addDaysToDate(today, 1);
this.minDate = minDate.getFullYear() + '-' + (minDate.getMonth() + 1) + '-' + minDate.getDate();
this.maxDate = maxDate.getFullYear() + '-' + (maxDate.getMonth() + 1) + '-' + maxDate.getDate();
}
setAmountAndCurrency(currencyObj: any, homeCurrency: any) {
this.amount = currencyObj && (currencyObj.orig_amount || currencyObj.amount);
this.currency = (currencyObj && (currencyObj.orig_currency || currencyObj.currency)) || homeCurrency;
}
customDateValidator(control: AbstractControl) {
const today = new Date();
const minDate = moment(new Date('Jan 1, 2001'));
const maxDate = moment(new Date(today)).add(1, 'day');
const passedInDate = control.value && moment(new Date(control.value));
if (passedInDate) {
return passedInDate.isBetween(minDate, maxDate)
? null
: {
invalidDateSelection: true,
};
}
}
add(amount?, currency?, percentage?, txnDt?) {
if (!txnDt) {
const dateOfTxn = this.transaction?.txn_dt;
const today: any = new Date();
txnDt = dateOfTxn ? new Date(dateOfTxn) : today;
txnDt = moment(txnDt).format('yyyy-MM-DD');
}
const fg = this.formBuilder.group({
amount: [amount, Validators.required],
currency: [currency],
percentage: [percentage],
txn_dt: [txnDt, Validators.compose([Validators.required, this.customDateValidator])],
});
if (this.splitType === 'categories') {
fg.addControl('category', this.formBuilder.control('', [Validators.required]));
} else if (this.splitType === 'projects') {
fg.addControl('project', this.formBuilder.control('', [Validators.required]));
} else if (this.splitType === 'cost centers') {
fg.addControl('cost_center', this.formBuilder.control('', [Validators.required]));
}
this.splitExpensesFormArray.push(fg);
this.getTotalSplitAmount();
}
remove(index: number) {
this.splitExpensesFormArray.removeAt(index);
if (this.splitExpensesFormArray.length === 2) {
const firstSplitExpenseForm = this.splitExpensesFormArray.at(0);
const lastSplitExpenseForm = this.splitExpensesFormArray.at(1);
const percentage = Math.min(100, Math.max(0, 100 - firstSplitExpenseForm.value.percentage));
const amount = parseFloat(((this.amount * percentage) / 100).toFixed(3));
lastSplitExpenseForm.patchValue(
{
amount,
percentage,
},
{ emitEvent: false }
);
}
this.getTotalSplitAmount();
}
}
|
SplitExpensePage
|
incrementControl.tsx
|
import * as React from 'react';
import { ActionCreator } from 'redux';
interface Props{
counter:number
onClick(val): void,
}
export default class
|
extends React.Component<Props,{}>{
constructor(props, context) {
super(props, context);
}
handleClick(){
debugger;
this.props.onClick(1);
}
render() {
return <div>
<h2>{'Score 1 ' + this.props.counter}</h2>
<input type="button" id="increment-btn"
value="Increment"
onClick={this.handleClick.bind(this)}/>
</div>
}
}
|
IncrementCounterComponent
|
mjpeg_on_movement.py
|
# MJPEG Video Recording on Movement Example
#
# Note: You will need an SD card to run this example.
#
# You can use your OpenMV Cam to record mjpeg files. You can either feed the
# recorder object JPEG frames or RGB565/Grayscale frames. Once you've finished
# recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then
# the built-in video player will work too.
#
# This example demonstrates using frame differencing with your OpenMV Cam to do
# motion detection. After motion is detected your OpenMV Cam will take video.
import sensor, image, time, mjpeg, pyb, os
RED_LED_PIN = 1
BLUE_LED_PIN = 3
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
if not "temp" in os.listdir():
|
while(True):
pyb.LED(RED_LED_PIN).on()
print("About to save background image...")
sensor.skip_frames(time = 2000) # Give the user time to get ready.
pyb.LED(RED_LED_PIN).off()
sensor.snapshot().save("temp/bg.bmp")
print("Saved background image - Now detecting motion!")
pyb.LED(BLUE_LED_PIN).on()
diff = 10 # We'll say we detected motion after 10 frames of motion.
while(diff):
img = sensor.snapshot()
img.difference("temp/bg.bmp")
stats = img.statistics()
# Stats 5 is the max of the lighting color channel. The below code
# triggers when the lighting max for the whole image goes above 20.
# The lighting difference maximum should be zero normally.
if (stats[5] > 20):
diff -= 1
m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng())
clock = time.clock() # Tracks FPS.
print("You're on camera!")
for i in range(200):
clock.tick()
m.add_frame(sensor.snapshot())
print(clock.fps())
m.close(clock.fps())
pyb.LED(BLUE_LED_PIN).off()
print("Restarting...")
|
os.mkdir("temp") # Make a temp directory
|
CalculatorKey.js
|
import React, { Component } from "react";
import { StyleSheet, Text, TouchableHighlight } from "react-native";
import { dynamicFontSize } from "../utils/helpers";
export class CalculatorKey extends Component {
render() {
|
const {
color,
isDirty,
text,
textDirty,
type,
style,
handlePress,
orientation
} = this.props;
return (
<TouchableHighlight
style={[styles.container, style, { backgroundColor: color }]}
onPress={() => handlePress(text, type)}
>
<Text style={[styles.key, dynamicFontSize(orientation, 35)]}>
{isDirty ? textDirty : text}
</Text>
</TouchableHighlight>
);
}
}
const styles = StyleSheet.create({
container: {
alignItems: "center",
justifyContent: "center",
flexShrink: 1,
borderRadius: 50,
margin: 5
},
key: {
fontSize: 40,
color: "#fff"
}
});
| |
_enzyme_treatment.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 23 22:07:01 2018
@author: yoelr
"""
from ._tank import MixTank
from ._hx import HXutility
class EnzymeTreatment(MixTank):
"""Create an EnzymeTreatment unit that is cost as a MixTank with a heat exchanger."""
_N_outs = 1
#: Residence time (hr)
_tau = 1
def __init__(self, ID='', ins=None, outs=(), *, T):
super().__init__(ID, ins, outs)
self.T = T #: Operating temperature
self._heat_exchanger = he = HXutility(None, None, T=T)
self._heat_utilities = he._heat_utilities
he._ins = self._ins
he._outs = self._outs
def _run(self):
feed = self.ins[0]
out = self.outs[0]
out._mol[:] = self._mol_in
out.phase = feed.phase
out.P = feed.P
out.T = self.T
def _design(self):
super()._design()
self._heat_exchanger._design()
def _cost(self):
|
super()._cost()
he = self._heat_exchanger
he._cost()
self._Cost['Heat exchanger'] = he._Cost['Heat exchanger']
|
|
erc721_holder_upgradeable.rs
|
#[allow(dead_code)]
pub mod erc721_holder_upgradeable {
# [rustfmt :: skip] use ethcontract as ethcontract ;
#[doc = "Generated by `ethcontract`"]
#[derive(Clone)]
pub struct Contract {
methods: Methods,
}
impl Contract {
#[doc = r" Retrieves the raw contract instance used to generate the type safe"]
#[doc = r" API for this contract."]
pub fn raw_contract() -> &'static self::ethcontract::Contract {
use self::ethcontract::common::artifact::truffle::TruffleLoader;
use self::ethcontract::private::lazy_static;
use self::ethcontract::Contract;
lazy_static! {
pub static ref CONTRACT: Contract = {
# [allow (unused_mut)] let mut contract = TruffleLoader :: new () . load_contract_from_str ("{\"contractName\":\"ERC721HolderUpgradeable\",\"abi\":[{\"type\":\"function\",\"name\":\"onERC721Received\",\"inputs\":[{\"name\":\"\",\"type\":\"address\"},{\"name\":\"\",\"type\":\"address\"},{\"name\":\"\",\"type\":\"uint256\"},{\"name\":\"\",\"type\":\"bytes\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bytes4\"}],\"constant\":false,\"stateMutability\":\"nonpayable\"}],\"bytecode\":\"608060405234801561001057600080fd5b506101a8806100206000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c8063150b7a0214610030575b600080fd5b61004e61003e366004610087565b630a85bd0160e11b949350505050565b6040516001600160e01b0319909116815260200160405180910390f35b80356001600160a01b038116811461008257600080fd5b919050565b6000806000806080858703121561009c578384fd5b6100a58561006b565b93506100b36020860161006b565b925060408501359150606085013567ffffffffffffffff808211156100d6578283fd5b818701915087601f8301126100e9578283fd5b8135818111156100fb576100fb61015c565b604051601f8201601f19908116603f011681019083821181831017156101235761012361015c565b816040528281528a602084870101111561013b578586fd5b82602086016020830137918201602001949094529598949750929550505050565b634e487b7160e01b600052604160045260246000fdfea2646970667358221220342c0bd91686785ded2dca5f95c7df8164400847174560e139e1c4ee09b68d0c64736f6c63430008030033\",\"networks\":{},\"devdoc\":{\"details\":null,\"methods\":{}},\"userdoc\":{\"details\":null,\"methods\":{}}}") . expect ("valid contract JSON") ;
contract
};
}
&CONTRACT
}
#[doc = r" Creates a new contract instance with the specified `web3`"]
#[doc = r" provider at the given `Address`."]
#[doc = r""]
#[doc = r" Note that this does not verify that a contract with a matching"]
#[doc = r" `Abi` is actually deployed at the given address."]
pub fn at<F, B, T>(
web3: &self::ethcontract::web3::api::Web3<T>,
address: self::ethcontract::Address,
) -> Self
where
F: std::future::Future<
Output = Result<self::ethcontract::json::Value, self::ethcontract::web3::Error>,
> + Send
+ 'static,
B: std::future::Future<
Output = Result<
Vec<Result<self::ethcontract::json::Value, self::ethcontract::web3::Error>>,
self::ethcontract::web3::Error,
>,
> + Send
+ 'static,
T: self::ethcontract::web3::Transport<Out = F>
+ self::ethcontract::web3::BatchTransport<Batch = B>
+ Send
+ Sync
+ 'static,
{
Contract::with_deployment_info(web3, address, None)
}
#[doc = r" Creates a new contract instance with the specified `web3` provider with"]
#[doc = r" the given `Abi` at the given `Address` and an optional transaction hash."]
#[doc = r" This hash is used to retrieve contract related information such as the"]
#[doc = r" creation block (which is useful for fetching all historic events)."]
#[doc = r""]
#[doc = r" Note that this does not verify that a contract with a matching `Abi` is"]
#[doc = r" actually deployed at the given address nor that the transaction hash,"]
#[doc = r" when provided, is actually for this contract deployment."]
pub fn with_deployment_info<F, B, T>(
web3: &self::ethcontract::web3::api::Web3<T>,
address: self::ethcontract::Address,
deployment_information: Option<ethcontract::common::DeploymentInformation>,
) -> Self
where
F: std::future::Future<
Output = Result<self::ethcontract::json::Value, self::ethcontract::web3::Error>,
> + Send
+ 'static,
B: std::future::Future<
Output = Result<
Vec<Result<self::ethcontract::json::Value, self::ethcontract::web3::Error>>,
self::ethcontract::web3::Error,
>,
> + Send
+ 'static,
T: self::ethcontract::web3::Transport<Out = F>
+ self::ethcontract::web3::BatchTransport<Batch = B>
+ Send
+ Sync
+ 'static,
{
use self::ethcontract::transport::DynTransport;
use self::ethcontract::web3::api::Web3;
use self::ethcontract::Instance;
let transport = DynTransport::new(web3.transport().clone());
let web3 = Web3::new(transport);
let abi = Self::raw_contract().abi.clone();
let instance = Instance::with_deployment_info(web3, abi, address, deployment_information);
Contract::from_raw(instance)
}
#[doc = r" Creates a contract from a raw instance."]
fn from_raw(instance: self::ethcontract::dyns::DynInstance) -> Self {
let methods = Methods { instance };
Contract { methods }
}
#[doc = r" Returns the contract address being used by this instance."]
pub fn address(&self) -> self::ethcontract::Address {
self.raw_instance().address()
}
#[doc = r" Returns the deployment information of the contract"]
#[doc = r" if it is known, `None` otherwise."]
pub fn deployment_information(&self) -> Option<ethcontract::common::DeploymentInformation> {
self.raw_instance().deployment_information()
}
#[doc = r" Returns a reference to the default method options used by this"]
#[doc = r" contract."]
pub fn defaults(&self) -> &self::ethcontract::contract::MethodDefaults {
&self.raw_instance().defaults
}
#[doc = r" Returns a mutable reference to the default method options used"]
#[doc = r" by this contract."]
pub fn defaults_mut(&mut self) -> &mut self::ethcontract::contract::MethodDefaults
|
#[doc = r" Returns a reference to the raw runtime instance used by this"]
#[doc = r" contract."]
pub fn raw_instance(&self) -> &self::ethcontract::dyns::DynInstance {
&self.methods.instance
}
#[doc = r" Returns a mutable reference to the raw runtime instance used by"]
#[doc = r" this contract."]
fn raw_instance_mut(&mut self) -> &mut self::ethcontract::dyns::DynInstance {
&mut self.methods.instance
}
}
impl std::fmt::Debug for Contract {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_tuple(stringify!(ERC721HolderUpgradeable))
.field(&self.address())
.finish()
}
}
impl Contract {
#[doc = "Generated by `ethcontract`"]
#[allow(clippy::too_many_arguments)]
pub fn builder<F, B, T>(
web3: &self::ethcontract::web3::api::Web3<T>,
) -> self::ethcontract::dyns::DynDeployBuilder<Self>
where
F: std::future::Future<
Output = Result<self::ethcontract::json::Value, self::ethcontract::web3::Error>,
> + Send
+ 'static,
B: std::future::Future<
Output = Result<
Vec<Result<self::ethcontract::json::Value, self::ethcontract::web3::Error>>,
self::ethcontract::web3::Error,
>,
> + Send
+ 'static,
T: self::ethcontract::web3::Transport<Out = F>
+ self::ethcontract::web3::BatchTransport<Batch = B>
+ Send
+ Sync
+ 'static,
{
use self::ethcontract::contract::DeployBuilder;
use self::ethcontract::dyns::DynTransport;
use self::ethcontract::web3::api::Web3;
let transport = DynTransport::new(web3.transport().clone());
let web3 = Web3::new(transport);
let bytecode = Self::raw_contract().bytecode.clone();
DeployBuilder::new(web3, bytecode, ()).expect("valid deployment args")
}
}
impl self::ethcontract::contract::Deploy<self::ethcontract::dyns::DynTransport> for Contract {
type Context = self::ethcontract::common::Bytecode;
fn bytecode(cx: &Self::Context) -> &self::ethcontract::common::Bytecode {
cx
}
fn abi(_: &Self::Context) -> &self::ethcontract::common::Abi {
&Self::raw_contract().abi
}
fn from_deployment(
web3: self::ethcontract::dyns::DynWeb3,
address: self::ethcontract::Address,
transaction_hash: self::ethcontract::H256,
_: Self::Context,
) -> Self {
Self::with_deployment_info(&web3, address, Some(transaction_hash.into()))
}
}
impl Contract {
#[doc = r" Returns an object that allows accessing typed method signatures."]
pub fn signatures() -> Signatures {
Signatures
}
#[doc = r" Retrieves a reference to type containing all the generated"]
#[doc = r" contract methods. This can be used for methods where the name"]
#[doc = r" would collide with a common method (like `at` or `deployed`)."]
pub fn methods(&self) -> &Methods {
&self.methods
}
}
#[doc = r" Type containing signatures for all methods for generated contract type."]
#[derive(Clone, Copy)]
pub struct Signatures;
impl Signatures {
#[doc = "Returns signature for method `onERC721Received(address,address,uint256,bytes):(bytes4)`."]
#[allow(clippy::type_complexity)]
pub fn on_erc721_received(
&self,
) -> self::ethcontract::contract::Signature<
(
self::ethcontract::Address,
self::ethcontract::Address,
self::ethcontract::U256,
self::ethcontract::tokens::Bytes<Vec<u8>>,
),
self::ethcontract::tokens::Bytes<[u8; 4]>,
> {
self::ethcontract::contract::Signature::new([21, 11, 122, 2])
}
}
#[doc = r" Type containing all contract methods for generated contract type."]
#[derive(Clone)]
pub struct Methods {
instance: self::ethcontract::dyns::DynInstance,
}
#[allow(clippy::too_many_arguments, clippy::type_complexity)]
impl Methods {
#[doc = "Generated by `ethcontract`"]
pub fn on_erc721_received(
&self,
p0: self::ethcontract::Address,
p1: self::ethcontract::Address,
p2: self::ethcontract::U256,
p3: self::ethcontract::tokens::Bytes<Vec<u8>>,
) -> self::ethcontract::dyns::DynMethodBuilder<self::ethcontract::tokens::Bytes<[u8; 4]>> {
self
.instance
.method([21, 11, 122, 2], (p0, p1, p2, p3))
.expect("generated call")
}
}
impl std::ops::Deref for Contract {
type Target = Methods;
fn deref(&self) -> &Self::Target {
&self.methods
}
}
}
pub use self::erc721_holder_upgradeable::Contract as ERC721HolderUpgradeable;
|
{
&mut self.raw_instance_mut().defaults
}
|
mod.rs
|
use rustc_ast::token::{self, Token, TokenKind};
use rustc_ast::util::comments;
use rustc_data_structures::sync::Lrc;
use rustc_errors::{error_code, DiagnosticBuilder, FatalError};
use rustc_lexer::unescape;
use rustc_lexer::Base;
use rustc_session::parse::ParseSess;
use rustc_span::symbol::{sym, Symbol};
use rustc_span::{BytePos, Pos, Span};
use log::debug;
use std::char;
use std::convert::TryInto;
mod tokentrees;
mod unescape_error_reporting;
mod unicode_chars;
use unescape_error_reporting::{emit_unescape_error, push_escaped_char};
#[derive(Clone, Debug)]
pub struct UnmatchedBrace {
pub expected_delim: token::DelimToken,
pub found_delim: Option<token::DelimToken>,
pub found_span: Span,
pub unclosed_span: Option<Span>,
pub candidate_span: Option<Span>,
}
pub struct StringReader<'a> {
sess: &'a ParseSess,
/// Initial position, read-only.
start_pos: BytePos,
/// The absolute offset within the source_map of the current character.
// FIXME(#64197): `pub` is needed by tests for now.
pub pos: BytePos,
/// Stop reading src at this index.
end_src_index: usize,
/// Source text to tokenize.
src: Lrc<String>,
override_span: Option<Span>,
}
impl<'a> StringReader<'a> {
pub fn new(
sess: &'a ParseSess,
source_file: Lrc<rustc_span::SourceFile>,
override_span: Option<Span>,
) -> Self {
if source_file.src.is_none() {
sess.span_diagnostic
.bug(&format!("cannot lex `source_file` without source: {}", source_file.name));
}
let src = (*source_file.src.as_ref().unwrap()).clone();
StringReader {
sess,
start_pos: source_file.start_pos,
pos: source_file.start_pos,
end_src_index: src.len(),
src,
override_span,
}
}
pub fn retokenize(sess: &'a ParseSess, mut span: Span) -> Self {
let begin = sess.source_map().lookup_byte_offset(span.lo());
let end = sess.source_map().lookup_byte_offset(span.hi());
// Make the range zero-length if the span is invalid.
if begin.sf.start_pos != end.sf.start_pos {
span = span.shrink_to_lo();
}
let mut sr = StringReader::new(sess, begin.sf, None);
// Seek the lexer to the right byte range.
sr.end_src_index = sr.src_index(span.hi());
sr
}
fn mk_sp(&self, lo: BytePos, hi: BytePos) -> Span {
self.override_span.unwrap_or_else(|| Span::with_root_ctxt(lo, hi))
}
/// Returns the next token, including trivia like whitespace or comments.
///
/// `Err(())` means that some errors were encountered, which can be
/// retrieved using `buffer_fatal_errors`.
pub fn next_token(&mut self) -> Token {
let start_src_index = self.src_index(self.pos);
let text: &str = &self.src[start_src_index..self.end_src_index];
if text.is_empty() {
let span = self.mk_sp(self.pos, self.pos);
return Token::new(token::Eof, span);
}
{
let is_beginning_of_file = self.pos == self.start_pos;
if is_beginning_of_file {
if let Some(shebang_len) = rustc_lexer::strip_shebang(text) {
let start = self.pos;
self.pos = self.pos + BytePos::from_usize(shebang_len);
let sym = self.symbol_from(start + BytePos::from_usize("#!".len()));
let kind = token::Shebang(sym);
let span = self.mk_sp(start, self.pos);
return Token::new(kind, span);
}
}
}
let token = rustc_lexer::first_token(text);
let start = self.pos;
self.pos = self.pos + BytePos::from_usize(token.len);
debug!("try_next_token: {:?}({:?})", token.kind, self.str_from(start));
// This could use `?`, but that makes code significantly (10-20%) slower.
// https://github.com/rust-lang/rust/issues/37939
let kind = self.cook_lexer_token(token.kind, start);
let span = self.mk_sp(start, self.pos);
Token::new(kind, span)
}
/// Report a fatal lexical error with a given span.
fn fatal_span(&self, sp: Span, m: &str) -> FatalError {
self.sess.span_diagnostic.span_fatal(sp, m)
}
/// Report a lexical error with a given span.
fn err_span(&self, sp: Span, m: &str)
|
/// Report a fatal error spanning [`from_pos`, `to_pos`).
fn fatal_span_(&self, from_pos: BytePos, to_pos: BytePos, m: &str) -> FatalError {
self.fatal_span(self.mk_sp(from_pos, to_pos), m)
}
/// Report a lexical error spanning [`from_pos`, `to_pos`).
fn err_span_(&self, from_pos: BytePos, to_pos: BytePos, m: &str) {
self.err_span(self.mk_sp(from_pos, to_pos), m)
}
fn struct_span_fatal(
&self,
from_pos: BytePos,
to_pos: BytePos,
m: &str,
) -> DiagnosticBuilder<'a> {
self.sess.span_diagnostic.struct_span_fatal(self.mk_sp(from_pos, to_pos), m)
}
fn struct_fatal_span_char(
&self,
from_pos: BytePos,
to_pos: BytePos,
m: &str,
c: char,
) -> DiagnosticBuilder<'a> {
let mut m = m.to_string();
m.push_str(": ");
push_escaped_char(&mut m, c);
self.sess.span_diagnostic.struct_span_fatal(self.mk_sp(from_pos, to_pos), &m[..])
}
/// Turns simple `rustc_lexer::TokenKind` enum into a rich
/// `librustc_ast::TokenKind`. This turns strings into interned
/// symbols and runs additional validation.
fn cook_lexer_token(&self, token: rustc_lexer::TokenKind, start: BytePos) -> TokenKind {
match token {
rustc_lexer::TokenKind::LineComment => {
let string = self.str_from(start);
// comments with only more "/"s are not doc comments
let tok = if comments::is_line_doc_comment(string) {
self.forbid_bare_cr(start, string, "bare CR not allowed in doc-comment");
token::DocComment(Symbol::intern(string))
} else {
token::Comment
};
tok
}
rustc_lexer::TokenKind::BlockComment { terminated } => {
let string = self.str_from(start);
// block comments starting with "/**" or "/*!" are doc-comments
// but comments with only "*"s between two "/"s are not
let is_doc_comment = comments::is_block_doc_comment(string);
if !terminated {
let msg = if is_doc_comment {
"unterminated block doc-comment"
} else {
"unterminated block comment"
};
let last_bpos = self.pos;
self.fatal_span_(start, last_bpos, msg).raise();
}
let tok = if is_doc_comment {
self.forbid_bare_cr(start, string, "bare CR not allowed in block doc-comment");
token::DocComment(Symbol::intern(string))
} else {
token::Comment
};
tok
}
rustc_lexer::TokenKind::Whitespace => token::Whitespace,
rustc_lexer::TokenKind::Ident | rustc_lexer::TokenKind::RawIdent => {
let is_raw_ident = token == rustc_lexer::TokenKind::RawIdent;
let mut ident_start = start;
if is_raw_ident {
ident_start = ident_start + BytePos(2);
}
let sym = nfc_normalize(self.str_from(ident_start));
if is_raw_ident {
let span = self.mk_sp(start, self.pos);
if !sym.can_be_raw() {
self.err_span(span, &format!("`{}` cannot be a raw identifier", sym));
}
self.sess.raw_identifier_spans.borrow_mut().push(span);
}
token::Ident(sym, is_raw_ident)
}
rustc_lexer::TokenKind::Literal { kind, suffix_start } => {
let suffix_start = start + BytePos(suffix_start as u32);
let (kind, symbol) = self.cook_lexer_literal(start, suffix_start, kind);
let suffix = if suffix_start < self.pos {
let string = self.str_from(suffix_start);
if string == "_" {
self.sess
.span_diagnostic
.struct_span_warn(
self.mk_sp(suffix_start, self.pos),
"underscore literal suffix is not allowed",
)
.warn(
"this was previously accepted by the compiler but is \
being phased out; it will become a hard error in \
a future release!",
)
.note(
"see issue #42326 \
<https://github.com/rust-lang/rust/issues/42326> \
for more information",
)
.emit();
None
} else {
Some(Symbol::intern(string))
}
} else {
None
};
token::Literal(token::Lit { kind, symbol, suffix })
}
rustc_lexer::TokenKind::Lifetime { starts_with_number } => {
// Include the leading `'` in the real identifier, for macro
// expansion purposes. See #12512 for the gory details of why
// this is necessary.
let lifetime_name = self.str_from(start);
if starts_with_number {
self.err_span_(start, self.pos, "lifetimes cannot start with a number");
}
let ident = Symbol::intern(lifetime_name);
token::Lifetime(ident)
}
rustc_lexer::TokenKind::Semi => token::Semi,
rustc_lexer::TokenKind::Comma => token::Comma,
rustc_lexer::TokenKind::Dot => token::Dot,
rustc_lexer::TokenKind::OpenParen => token::OpenDelim(token::Paren),
rustc_lexer::TokenKind::CloseParen => token::CloseDelim(token::Paren),
rustc_lexer::TokenKind::OpenBrace => token::OpenDelim(token::Brace),
rustc_lexer::TokenKind::CloseBrace => token::CloseDelim(token::Brace),
rustc_lexer::TokenKind::OpenBracket => token::OpenDelim(token::Bracket),
rustc_lexer::TokenKind::CloseBracket => token::CloseDelim(token::Bracket),
rustc_lexer::TokenKind::At => token::At,
rustc_lexer::TokenKind::Pound => token::Pound,
rustc_lexer::TokenKind::Tilde => token::Tilde,
rustc_lexer::TokenKind::Question => token::Question,
rustc_lexer::TokenKind::Colon => token::Colon,
rustc_lexer::TokenKind::Dollar => token::Dollar,
rustc_lexer::TokenKind::Eq => token::Eq,
rustc_lexer::TokenKind::Not => token::Not,
rustc_lexer::TokenKind::Lt => token::Lt,
rustc_lexer::TokenKind::Gt => token::Gt,
rustc_lexer::TokenKind::Minus => token::BinOp(token::Minus),
rustc_lexer::TokenKind::And => token::BinOp(token::And),
rustc_lexer::TokenKind::Or => token::BinOp(token::Or),
rustc_lexer::TokenKind::Plus => token::BinOp(token::Plus),
rustc_lexer::TokenKind::Star => token::BinOp(token::Star),
rustc_lexer::TokenKind::Slash => token::BinOp(token::Slash),
rustc_lexer::TokenKind::Caret => token::BinOp(token::Caret),
rustc_lexer::TokenKind::Percent => token::BinOp(token::Percent),
rustc_lexer::TokenKind::Unknown => {
let c = self.str_from(start).chars().next().unwrap();
let mut err =
self.struct_fatal_span_char(start, self.pos, "unknown start of token", c);
// FIXME: the lexer could be used to turn the ASCII version of unicode homoglyphs,
// instead of keeping a table in `check_for_substitution`into the token. Ideally,
// this should be inside `rustc_lexer`. However, we should first remove compound
// tokens like `<<` from `rustc_lexer`, and then add fancier error recovery to it,
// as there will be less overall work to do this way.
let token = unicode_chars::check_for_substitution(self, start, c, &mut err)
.unwrap_or_else(|| token::Unknown(self.symbol_from(start)));
err.emit();
token
}
}
}
fn cook_lexer_literal(
&self,
start: BytePos,
suffix_start: BytePos,
kind: rustc_lexer::LiteralKind,
) -> (token::LitKind, Symbol) {
match kind {
rustc_lexer::LiteralKind::Char { terminated } => {
if !terminated {
self.fatal_span_(start, suffix_start, "unterminated character literal").raise()
}
let content_start = start + BytePos(1);
let content_end = suffix_start - BytePos(1);
self.validate_char_escape(content_start, content_end);
let id = self.symbol_from_to(content_start, content_end);
(token::Char, id)
}
rustc_lexer::LiteralKind::Byte { terminated } => {
if !terminated {
self.fatal_span_(start + BytePos(1), suffix_start, "unterminated byte constant")
.raise()
}
let content_start = start + BytePos(2);
let content_end = suffix_start - BytePos(1);
self.validate_byte_escape(content_start, content_end);
let id = self.symbol_from_to(content_start, content_end);
(token::Byte, id)
}
rustc_lexer::LiteralKind::Str { terminated } => {
if !terminated {
self.fatal_span_(start, suffix_start, "unterminated double quote string")
.raise()
}
let content_start = start + BytePos(1);
let content_end = suffix_start - BytePos(1);
self.validate_str_escape(content_start, content_end);
let id = self.symbol_from_to(content_start, content_end);
(token::Str, id)
}
rustc_lexer::LiteralKind::ByteStr { terminated } => {
if !terminated {
self.fatal_span_(
start + BytePos(1),
suffix_start,
"unterminated double quote byte string",
)
.raise()
}
let content_start = start + BytePos(2);
let content_end = suffix_start - BytePos(1);
self.validate_byte_str_escape(content_start, content_end);
let id = self.symbol_from_to(content_start, content_end);
(token::ByteStr, id)
}
rustc_lexer::LiteralKind::RawStr { n_hashes, started, terminated } => {
if !started {
self.report_non_started_raw_string(start);
}
if !terminated {
self.report_unterminated_raw_string(start, n_hashes)
}
let n_hashes: u16 = self.restrict_n_hashes(start, n_hashes);
let n = u32::from(n_hashes);
let content_start = start + BytePos(2 + n);
let content_end = suffix_start - BytePos(1 + n);
self.validate_raw_str_escape(content_start, content_end);
let id = self.symbol_from_to(content_start, content_end);
(token::StrRaw(n_hashes), id)
}
rustc_lexer::LiteralKind::RawByteStr { n_hashes, started, terminated } => {
if !started {
self.report_non_started_raw_string(start);
}
if !terminated {
self.report_unterminated_raw_string(start, n_hashes)
}
let n_hashes: u16 = self.restrict_n_hashes(start, n_hashes);
let n = u32::from(n_hashes);
let content_start = start + BytePos(3 + n);
let content_end = suffix_start - BytePos(1 + n);
self.validate_raw_byte_str_escape(content_start, content_end);
let id = self.symbol_from_to(content_start, content_end);
(token::ByteStrRaw(n_hashes), id)
}
rustc_lexer::LiteralKind::Int { base, empty_int } => {
if empty_int {
self.err_span_(start, suffix_start, "no valid digits found for number");
(token::Integer, sym::integer(0))
} else {
self.validate_int_literal(base, start, suffix_start);
(token::Integer, self.symbol_from_to(start, suffix_start))
}
}
rustc_lexer::LiteralKind::Float { base, empty_exponent } => {
if empty_exponent {
let mut err = self.struct_span_fatal(
start,
self.pos,
"expected at least one digit in exponent",
);
err.emit();
}
match base {
Base::Hexadecimal => self.err_span_(
start,
suffix_start,
"hexadecimal float literal is not supported",
),
Base::Octal => {
self.err_span_(start, suffix_start, "octal float literal is not supported")
}
Base::Binary => {
self.err_span_(start, suffix_start, "binary float literal is not supported")
}
_ => (),
}
let id = self.symbol_from_to(start, suffix_start);
(token::Float, id)
}
}
}
#[inline]
fn src_index(&self, pos: BytePos) -> usize {
(pos - self.start_pos).to_usize()
}
/// Slice of the source text from `start` up to but excluding `self.pos`,
/// meaning the slice does not include the character `self.ch`.
fn str_from(&self, start: BytePos) -> &str {
self.str_from_to(start, self.pos)
}
/// Creates a Symbol from a given offset to the current offset.
fn symbol_from(&self, start: BytePos) -> Symbol {
debug!("taking an ident from {:?} to {:?}", start, self.pos);
Symbol::intern(self.str_from(start))
}
/// As symbol_from, with an explicit endpoint.
fn symbol_from_to(&self, start: BytePos, end: BytePos) -> Symbol {
debug!("taking an ident from {:?} to {:?}", start, end);
Symbol::intern(self.str_from_to(start, end))
}
/// Slice of the source text spanning from `start` up to but excluding `end`.
fn str_from_to(&self, start: BytePos, end: BytePos) -> &str {
&self.src[self.src_index(start)..self.src_index(end)]
}
fn forbid_bare_cr(&self, start: BytePos, s: &str, errmsg: &str) {
let mut idx = 0;
loop {
idx = match s[idx..].find('\r') {
None => break,
Some(it) => idx + it + 1,
};
self.err_span_(start + BytePos(idx as u32 - 1), start + BytePos(idx as u32), errmsg);
}
}
fn report_non_started_raw_string(&self, start: BytePos) -> ! {
let bad_char = self.str_from(start).chars().last().unwrap();
self.struct_fatal_span_char(
start,
self.pos,
"found invalid character; only `#` is allowed \
in raw string delimitation",
bad_char,
)
.emit();
FatalError.raise()
}
fn report_unterminated_raw_string(&self, start: BytePos, n_hashes: usize) -> ! {
let mut err = self.sess.span_diagnostic.struct_span_fatal_with_code(
self.mk_sp(start, start),
"unterminated raw string",
error_code!(E0748),
);
err.span_label(self.mk_sp(start, start), "unterminated raw string");
if n_hashes > 0 {
err.note(&format!(
"this raw string should be terminated with `\"{}`",
"#".repeat(n_hashes as usize)
));
}
err.emit();
FatalError.raise()
}
fn restrict_n_hashes(&self, start: BytePos, n_hashes: usize) -> u16 {
match n_hashes.try_into() {
Ok(n_hashes) => n_hashes,
Err(_) => {
self.fatal_span_(
start,
self.pos,
"too many `#` symbols: raw strings may be \
delimited by up to 65535 `#` symbols",
)
.raise();
}
}
}
fn validate_char_escape(&self, content_start: BytePos, content_end: BytePos) {
let lit = self.str_from_to(content_start, content_end);
if let Err((off, err)) = unescape::unescape_char(lit) {
emit_unescape_error(
&self.sess.span_diagnostic,
lit,
self.mk_sp(content_start - BytePos(1), content_end + BytePos(1)),
unescape::Mode::Char,
0..off,
err,
)
}
}
fn validate_byte_escape(&self, content_start: BytePos, content_end: BytePos) {
let lit = self.str_from_to(content_start, content_end);
if let Err((off, err)) = unescape::unescape_byte(lit) {
emit_unescape_error(
&self.sess.span_diagnostic,
lit,
self.mk_sp(content_start - BytePos(1), content_end + BytePos(1)),
unescape::Mode::Byte,
0..off,
err,
)
}
}
fn validate_str_escape(&self, content_start: BytePos, content_end: BytePos) {
let lit = self.str_from_to(content_start, content_end);
unescape::unescape_str(lit, &mut |range, c| {
if let Err(err) = c {
emit_unescape_error(
&self.sess.span_diagnostic,
lit,
self.mk_sp(content_start - BytePos(1), content_end + BytePos(1)),
unescape::Mode::Str,
range,
err,
)
}
})
}
fn validate_raw_str_escape(&self, content_start: BytePos, content_end: BytePos) {
let lit = self.str_from_to(content_start, content_end);
unescape::unescape_raw_str(lit, &mut |range, c| {
if let Err(err) = c {
emit_unescape_error(
&self.sess.span_diagnostic,
lit,
self.mk_sp(content_start - BytePos(1), content_end + BytePos(1)),
unescape::Mode::Str,
range,
err,
)
}
})
}
fn validate_raw_byte_str_escape(&self, content_start: BytePos, content_end: BytePos) {
let lit = self.str_from_to(content_start, content_end);
unescape::unescape_raw_byte_str(lit, &mut |range, c| {
if let Err(err) = c {
emit_unescape_error(
&self.sess.span_diagnostic,
lit,
self.mk_sp(content_start - BytePos(1), content_end + BytePos(1)),
unescape::Mode::ByteStr,
range,
err,
)
}
})
}
fn validate_byte_str_escape(&self, content_start: BytePos, content_end: BytePos) {
let lit = self.str_from_to(content_start, content_end);
unescape::unescape_byte_str(lit, &mut |range, c| {
if let Err(err) = c {
emit_unescape_error(
&self.sess.span_diagnostic,
lit,
self.mk_sp(content_start - BytePos(1), content_end + BytePos(1)),
unescape::Mode::ByteStr,
range,
err,
)
}
})
}
fn validate_int_literal(&self, base: Base, content_start: BytePos, content_end: BytePos) {
let base = match base {
Base::Binary => 2,
Base::Octal => 8,
_ => return,
};
let s = self.str_from_to(content_start + BytePos(2), content_end);
for (idx, c) in s.char_indices() {
let idx = idx as u32;
if c != '_' && c.to_digit(base).is_none() {
let lo = content_start + BytePos(2 + idx);
let hi = content_start + BytePos(2 + idx + c.len_utf8() as u32);
self.err_span_(lo, hi, &format!("invalid digit for a base {} literal", base));
}
}
}
}
pub fn nfc_normalize(string: &str) -> Symbol {
use unicode_normalization::{is_nfc_quick, IsNormalized, UnicodeNormalization};
match is_nfc_quick(string.chars()) {
IsNormalized::Yes => Symbol::intern(string),
_ => {
let normalized_str: String = string.chars().nfc().collect();
Symbol::intern(&normalized_str)
}
}
}
|
{
self.sess.span_diagnostic.struct_span_err(sp, m).emit();
}
|
commands.rs
|
// Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::server::Facade;
use anyhow::Error;
use futures::future::{FutureExt, LocalBoxFuture};
use serde_json::Value;
use crate::setui::facade::SetUiFacade;
use crate::setui::types::SetUiMethod;
impl Facade for SetUiFacade {
fn handle_request(
&self,
method: String,
args: Value,
) -> LocalBoxFuture<'_, Result<Value, Error>> {
setui_method_to_fidl(method, args, self).boxed_local()
}
}
/// Takes JSON-RPC method command and forwards to corresponding SetUi FIDL method.
async fn
|
(
method_name: String,
args: Value,
facade: &SetUiFacade,
) -> Result<Value, Error> {
match method_name.parse()? {
SetUiMethod::Mutate => facade.mutate(args).await,
SetUiMethod::SetNetwork => facade.set_network(args).await,
SetUiMethod::GetNetwork => facade.get_network_setting().await,
}
}
|
setui_method_to_fidl
|
pulumiTypes.go
|
// *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v1
import (
"context"
"reflect"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:[email protected]" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:[email protected]" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts [email protected] from DATA_READ logging, and [email protected] from DATA_WRITE logging.
type AuditConfig struct {
// The configuration for logging of each type of permission.
AuditLogConfigs []AuditLogConfig `pulumi:"auditLogConfigs"`
// Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.
Service *string `pulumi:"service"`
}
// AuditConfigInput is an input type that accepts AuditConfigArgs and AuditConfigOutput values.
// You can construct a concrete instance of `AuditConfigInput` via:
//
// AuditConfigArgs{...}
type AuditConfigInput interface {
pulumi.Input
ToAuditConfigOutput() AuditConfigOutput
ToAuditConfigOutputWithContext(context.Context) AuditConfigOutput
}
// Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:[email protected]" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:[email protected]" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts [email protected] from DATA_READ logging, and [email protected] from DATA_WRITE logging.
type AuditConfigArgs struct {
// The configuration for logging of each type of permission.
AuditLogConfigs AuditLogConfigArrayInput `pulumi:"auditLogConfigs"`
// Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.
Service pulumi.StringPtrInput `pulumi:"service"`
}
func (AuditConfigArgs) ElementType() reflect.Type {
return reflect.TypeOf((*AuditConfig)(nil)).Elem()
}
func (i AuditConfigArgs) ToAuditConfigOutput() AuditConfigOutput {
return i.ToAuditConfigOutputWithContext(context.Background())
}
func (i AuditConfigArgs) ToAuditConfigOutputWithContext(ctx context.Context) AuditConfigOutput {
return pulumi.ToOutputWithContext(ctx, i).(AuditConfigOutput)
}
// AuditConfigArrayInput is an input type that accepts AuditConfigArray and AuditConfigArrayOutput values.
// You can construct a concrete instance of `AuditConfigArrayInput` via:
//
// AuditConfigArray{ AuditConfigArgs{...} }
type AuditConfigArrayInput interface {
pulumi.Input
ToAuditConfigArrayOutput() AuditConfigArrayOutput
ToAuditConfigArrayOutputWithContext(context.Context) AuditConfigArrayOutput
}
type AuditConfigArray []AuditConfigInput
func (AuditConfigArray) ElementType() reflect.Type {
return reflect.TypeOf((*[]AuditConfig)(nil)).Elem()
}
func (i AuditConfigArray) ToAuditConfigArrayOutput() AuditConfigArrayOutput {
return i.ToAuditConfigArrayOutputWithContext(context.Background())
}
|
}
// Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:[email protected]" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:[email protected]" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts [email protected] from DATA_READ logging, and [email protected] from DATA_WRITE logging.
type AuditConfigOutput struct{ *pulumi.OutputState }
func (AuditConfigOutput) ElementType() reflect.Type {
return reflect.TypeOf((*AuditConfig)(nil)).Elem()
}
func (o AuditConfigOutput) ToAuditConfigOutput() AuditConfigOutput {
return o
}
func (o AuditConfigOutput) ToAuditConfigOutputWithContext(ctx context.Context) AuditConfigOutput {
return o
}
// The configuration for logging of each type of permission.
func (o AuditConfigOutput) AuditLogConfigs() AuditLogConfigArrayOutput {
return o.ApplyT(func(v AuditConfig) []AuditLogConfig { return v.AuditLogConfigs }).(AuditLogConfigArrayOutput)
}
// Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.
func (o AuditConfigOutput) Service() pulumi.StringPtrOutput {
return o.ApplyT(func(v AuditConfig) *string { return v.Service }).(pulumi.StringPtrOutput)
}
type AuditConfigArrayOutput struct{ *pulumi.OutputState }
func (AuditConfigArrayOutput) ElementType() reflect.Type {
return reflect.TypeOf((*[]AuditConfig)(nil)).Elem()
}
func (o AuditConfigArrayOutput) ToAuditConfigArrayOutput() AuditConfigArrayOutput {
return o
}
func (o AuditConfigArrayOutput) ToAuditConfigArrayOutputWithContext(ctx context.Context) AuditConfigArrayOutput {
return o
}
func (o AuditConfigArrayOutput) Index(i pulumi.IntInput) AuditConfigOutput {
return pulumi.All(o, i).ApplyT(func(vs []interface{}) AuditConfig {
return vs[0].([]AuditConfig)[vs[1].(int)]
}).(AuditConfigOutput)
}
// Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:[email protected]" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:[email protected]" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts [email protected] from DATA_READ logging, and [email protected] from DATA_WRITE logging.
type AuditConfigResponse struct {
// The configuration for logging of each type of permission.
AuditLogConfigs []AuditLogConfigResponse `pulumi:"auditLogConfigs"`
// Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.
Service string `pulumi:"service"`
}
// Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs. If there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted. Example Policy with multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:[email protected]" ] }, { "log_type": "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ "user:[email protected]" ] } ] } ] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts [email protected] from DATA_READ logging, and [email protected] from DATA_WRITE logging.
type AuditConfigResponseOutput struct{ *pulumi.OutputState }
func (AuditConfigResponseOutput) ElementType() reflect.Type {
return reflect.TypeOf((*AuditConfigResponse)(nil)).Elem()
}
func (o AuditConfigResponseOutput) ToAuditConfigResponseOutput() AuditConfigResponseOutput {
return o
}
func (o AuditConfigResponseOutput) ToAuditConfigResponseOutputWithContext(ctx context.Context) AuditConfigResponseOutput {
return o
}
// The configuration for logging of each type of permission.
func (o AuditConfigResponseOutput) AuditLogConfigs() AuditLogConfigResponseArrayOutput {
return o.ApplyT(func(v AuditConfigResponse) []AuditLogConfigResponse { return v.AuditLogConfigs }).(AuditLogConfigResponseArrayOutput)
}
// Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.
func (o AuditConfigResponseOutput) Service() pulumi.StringOutput {
return o.ApplyT(func(v AuditConfigResponse) string { return v.Service }).(pulumi.StringOutput)
}
type AuditConfigResponseArrayOutput struct{ *pulumi.OutputState }
func (AuditConfigResponseArrayOutput) ElementType() reflect.Type {
return reflect.TypeOf((*[]AuditConfigResponse)(nil)).Elem()
}
func (o AuditConfigResponseArrayOutput) ToAuditConfigResponseArrayOutput() AuditConfigResponseArrayOutput {
return o
}
func (o AuditConfigResponseArrayOutput) ToAuditConfigResponseArrayOutputWithContext(ctx context.Context) AuditConfigResponseArrayOutput {
return o
}
func (o AuditConfigResponseArrayOutput) Index(i pulumi.IntInput) AuditConfigResponseOutput {
return pulumi.All(o, i).ApplyT(func(vs []interface{}) AuditConfigResponse {
return vs[0].([]AuditConfigResponse)[vs[1].(int)]
}).(AuditConfigResponseOutput)
}
// Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:[email protected]" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting [email protected] from DATA_READ logging.
type AuditLogConfig struct {
// Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.
ExemptedMembers []string `pulumi:"exemptedMembers"`
// The log type that this config enables.
LogType *AuditLogConfigLogType `pulumi:"logType"`
}
// AuditLogConfigInput is an input type that accepts AuditLogConfigArgs and AuditLogConfigOutput values.
// You can construct a concrete instance of `AuditLogConfigInput` via:
//
// AuditLogConfigArgs{...}
type AuditLogConfigInput interface {
pulumi.Input
ToAuditLogConfigOutput() AuditLogConfigOutput
ToAuditLogConfigOutputWithContext(context.Context) AuditLogConfigOutput
}
// Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:[email protected]" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting [email protected] from DATA_READ logging.
type AuditLogConfigArgs struct {
// Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.
ExemptedMembers pulumi.StringArrayInput `pulumi:"exemptedMembers"`
// The log type that this config enables.
LogType AuditLogConfigLogTypePtrInput `pulumi:"logType"`
}
func (AuditLogConfigArgs) ElementType() reflect.Type {
return reflect.TypeOf((*AuditLogConfig)(nil)).Elem()
}
func (i AuditLogConfigArgs) ToAuditLogConfigOutput() AuditLogConfigOutput {
return i.ToAuditLogConfigOutputWithContext(context.Background())
}
func (i AuditLogConfigArgs) ToAuditLogConfigOutputWithContext(ctx context.Context) AuditLogConfigOutput {
return pulumi.ToOutputWithContext(ctx, i).(AuditLogConfigOutput)
}
// AuditLogConfigArrayInput is an input type that accepts AuditLogConfigArray and AuditLogConfigArrayOutput values.
// You can construct a concrete instance of `AuditLogConfigArrayInput` via:
//
// AuditLogConfigArray{ AuditLogConfigArgs{...} }
type AuditLogConfigArrayInput interface {
pulumi.Input
ToAuditLogConfigArrayOutput() AuditLogConfigArrayOutput
ToAuditLogConfigArrayOutputWithContext(context.Context) AuditLogConfigArrayOutput
}
type AuditLogConfigArray []AuditLogConfigInput
func (AuditLogConfigArray) ElementType() reflect.Type {
return reflect.TypeOf((*[]AuditLogConfig)(nil)).Elem()
}
func (i AuditLogConfigArray) ToAuditLogConfigArrayOutput() AuditLogConfigArrayOutput {
return i.ToAuditLogConfigArrayOutputWithContext(context.Background())
}
func (i AuditLogConfigArray) ToAuditLogConfigArrayOutputWithContext(ctx context.Context) AuditLogConfigArrayOutput {
return pulumi.ToOutputWithContext(ctx, i).(AuditLogConfigArrayOutput)
}
// Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:[email protected]" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting [email protected] from DATA_READ logging.
type AuditLogConfigOutput struct{ *pulumi.OutputState }
func (AuditLogConfigOutput) ElementType() reflect.Type {
return reflect.TypeOf((*AuditLogConfig)(nil)).Elem()
}
func (o AuditLogConfigOutput) ToAuditLogConfigOutput() AuditLogConfigOutput {
return o
}
func (o AuditLogConfigOutput) ToAuditLogConfigOutputWithContext(ctx context.Context) AuditLogConfigOutput {
return o
}
// Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.
func (o AuditLogConfigOutput) ExemptedMembers() pulumi.StringArrayOutput {
return o.ApplyT(func(v AuditLogConfig) []string { return v.ExemptedMembers }).(pulumi.StringArrayOutput)
}
// The log type that this config enables.
func (o AuditLogConfigOutput) LogType() AuditLogConfigLogTypePtrOutput {
return o.ApplyT(func(v AuditLogConfig) *AuditLogConfigLogType { return v.LogType }).(AuditLogConfigLogTypePtrOutput)
}
type AuditLogConfigArrayOutput struct{ *pulumi.OutputState }
func (AuditLogConfigArrayOutput) ElementType() reflect.Type {
return reflect.TypeOf((*[]AuditLogConfig)(nil)).Elem()
}
func (o AuditLogConfigArrayOutput) ToAuditLogConfigArrayOutput() AuditLogConfigArrayOutput {
return o
}
func (o AuditLogConfigArrayOutput) ToAuditLogConfigArrayOutputWithContext(ctx context.Context) AuditLogConfigArrayOutput {
return o
}
func (o AuditLogConfigArrayOutput) Index(i pulumi.IntInput) AuditLogConfigOutput {
return pulumi.All(o, i).ApplyT(func(vs []interface{}) AuditLogConfig {
return vs[0].([]AuditLogConfig)[vs[1].(int)]
}).(AuditLogConfigOutput)
}
// Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:[email protected]" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting [email protected] from DATA_READ logging.
type AuditLogConfigResponse struct {
// Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.
ExemptedMembers []string `pulumi:"exemptedMembers"`
// The log type that this config enables.
LogType string `pulumi:"logType"`
}
// Provides the configuration for logging a type of permissions. Example: { "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ "user:[email protected]" ] }, { "log_type": "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting [email protected] from DATA_READ logging.
type AuditLogConfigResponseOutput struct{ *pulumi.OutputState }
func (AuditLogConfigResponseOutput) ElementType() reflect.Type {
return reflect.TypeOf((*AuditLogConfigResponse)(nil)).Elem()
}
func (o AuditLogConfigResponseOutput) ToAuditLogConfigResponseOutput() AuditLogConfigResponseOutput {
return o
}
func (o AuditLogConfigResponseOutput) ToAuditLogConfigResponseOutputWithContext(ctx context.Context) AuditLogConfigResponseOutput {
return o
}
// Specifies the identities that do not cause logging for this type of permission. Follows the same format of Binding.members.
func (o AuditLogConfigResponseOutput) ExemptedMembers() pulumi.StringArrayOutput {
return o.ApplyT(func(v AuditLogConfigResponse) []string { return v.ExemptedMembers }).(pulumi.StringArrayOutput)
}
// The log type that this config enables.
func (o AuditLogConfigResponseOutput) LogType() pulumi.StringOutput {
return o.ApplyT(func(v AuditLogConfigResponse) string { return v.LogType }).(pulumi.StringOutput)
}
type AuditLogConfigResponseArrayOutput struct{ *pulumi.OutputState }
func (AuditLogConfigResponseArrayOutput) ElementType() reflect.Type {
return reflect.TypeOf((*[]AuditLogConfigResponse)(nil)).Elem()
}
func (o AuditLogConfigResponseArrayOutput) ToAuditLogConfigResponseArrayOutput() AuditLogConfigResponseArrayOutput {
return o
}
func (o AuditLogConfigResponseArrayOutput) ToAuditLogConfigResponseArrayOutputWithContext(ctx context.Context) AuditLogConfigResponseArrayOutput {
return o
}
func (o AuditLogConfigResponseArrayOutput) Index(i pulumi.IntInput) AuditLogConfigResponseOutput {
return pulumi.All(o, i).ApplyT(func(vs []interface{}) AuditLogConfigResponse {
return vs[0].([]AuditLogConfigResponse)[vs[1].(int)]
}).(AuditLogConfigResponseOutput)
}
// Associates `members`, or principals, with a `role`.
type Binding struct {
// The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
Condition *Expr `pulumi:"condition"`
// Specifies the principals requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `[email protected]` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `[email protected]`. * `group:{emailid}`: An email address that represents a Google group. For example, `[email protected]`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`.
Members []string `pulumi:"members"`
// Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
Role *string `pulumi:"role"`
}
// BindingInput is an input type that accepts BindingArgs and BindingOutput values.
// You can construct a concrete instance of `BindingInput` via:
//
// BindingArgs{...}
type BindingInput interface {
pulumi.Input
ToBindingOutput() BindingOutput
ToBindingOutputWithContext(context.Context) BindingOutput
}
// Associates `members`, or principals, with a `role`.
type BindingArgs struct {
// The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
Condition ExprPtrInput `pulumi:"condition"`
// Specifies the principals requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `[email protected]` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `[email protected]`. * `group:{emailid}`: An email address that represents a Google group. For example, `[email protected]`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`.
Members pulumi.StringArrayInput `pulumi:"members"`
// Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
Role pulumi.StringPtrInput `pulumi:"role"`
}
func (BindingArgs) ElementType() reflect.Type {
return reflect.TypeOf((*Binding)(nil)).Elem()
}
func (i BindingArgs) ToBindingOutput() BindingOutput {
return i.ToBindingOutputWithContext(context.Background())
}
func (i BindingArgs) ToBindingOutputWithContext(ctx context.Context) BindingOutput {
return pulumi.ToOutputWithContext(ctx, i).(BindingOutput)
}
// BindingArrayInput is an input type that accepts BindingArray and BindingArrayOutput values.
// You can construct a concrete instance of `BindingArrayInput` via:
//
// BindingArray{ BindingArgs{...} }
type BindingArrayInput interface {
pulumi.Input
ToBindingArrayOutput() BindingArrayOutput
ToBindingArrayOutputWithContext(context.Context) BindingArrayOutput
}
type BindingArray []BindingInput
func (BindingArray) ElementType() reflect.Type {
return reflect.TypeOf((*[]Binding)(nil)).Elem()
}
func (i BindingArray) ToBindingArrayOutput() BindingArrayOutput {
return i.ToBindingArrayOutputWithContext(context.Background())
}
func (i BindingArray) ToBindingArrayOutputWithContext(ctx context.Context) BindingArrayOutput {
return pulumi.ToOutputWithContext(ctx, i).(BindingArrayOutput)
}
// Associates `members`, or principals, with a `role`.
type BindingOutput struct{ *pulumi.OutputState }
func (BindingOutput) ElementType() reflect.Type {
return reflect.TypeOf((*Binding)(nil)).Elem()
}
func (o BindingOutput) ToBindingOutput() BindingOutput {
return o
}
func (o BindingOutput) ToBindingOutputWithContext(ctx context.Context) BindingOutput {
return o
}
// The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
func (o BindingOutput) Condition() ExprPtrOutput {
return o.ApplyT(func(v Binding) *Expr { return v.Condition }).(ExprPtrOutput)
}
// Specifies the principals requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `[email protected]` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `[email protected]`. * `group:{emailid}`: An email address that represents a Google group. For example, `[email protected]`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`.
func (o BindingOutput) Members() pulumi.StringArrayOutput {
return o.ApplyT(func(v Binding) []string { return v.Members }).(pulumi.StringArrayOutput)
}
// Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
func (o BindingOutput) Role() pulumi.StringPtrOutput {
return o.ApplyT(func(v Binding) *string { return v.Role }).(pulumi.StringPtrOutput)
}
type BindingArrayOutput struct{ *pulumi.OutputState }
func (BindingArrayOutput) ElementType() reflect.Type {
return reflect.TypeOf((*[]Binding)(nil)).Elem()
}
func (o BindingArrayOutput) ToBindingArrayOutput() BindingArrayOutput {
return o
}
func (o BindingArrayOutput) ToBindingArrayOutputWithContext(ctx context.Context) BindingArrayOutput {
return o
}
func (o BindingArrayOutput) Index(i pulumi.IntInput) BindingOutput {
return pulumi.All(o, i).ApplyT(func(vs []interface{}) Binding {
return vs[0].([]Binding)[vs[1].(int)]
}).(BindingOutput)
}
// Associates `members`, or principals, with a `role`.
type BindingResponse struct {
// The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
Condition ExprResponse `pulumi:"condition"`
// Specifies the principals requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `[email protected]` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `[email protected]`. * `group:{emailid}`: An email address that represents a Google group. For example, `[email protected]`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`.
Members []string `pulumi:"members"`
// Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
Role string `pulumi:"role"`
}
// Associates `members`, or principals, with a `role`.
type BindingResponseOutput struct{ *pulumi.OutputState }
func (BindingResponseOutput) ElementType() reflect.Type {
return reflect.TypeOf((*BindingResponse)(nil)).Elem()
}
func (o BindingResponseOutput) ToBindingResponseOutput() BindingResponseOutput {
return o
}
func (o BindingResponseOutput) ToBindingResponseOutputWithContext(ctx context.Context) BindingResponseOutput {
return o
}
// The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
func (o BindingResponseOutput) Condition() ExprResponseOutput {
return o.ApplyT(func(v BindingResponse) ExprResponse { return v.Condition }).(ExprResponseOutput)
}
// Specifies the principals requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `[email protected]` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `[email protected]`. * `group:{emailid}`: An email address that represents a Google group. For example, `[email protected]`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`.
func (o BindingResponseOutput) Members() pulumi.StringArrayOutput {
return o.ApplyT(func(v BindingResponse) []string { return v.Members }).(pulumi.StringArrayOutput)
}
// Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
func (o BindingResponseOutput) Role() pulumi.StringOutput {
return o.ApplyT(func(v BindingResponse) string { return v.Role }).(pulumi.StringOutput)
}
type BindingResponseArrayOutput struct{ *pulumi.OutputState }
func (BindingResponseArrayOutput) ElementType() reflect.Type {
return reflect.TypeOf((*[]BindingResponse)(nil)).Elem()
}
func (o BindingResponseArrayOutput) ToBindingResponseArrayOutput() BindingResponseArrayOutput {
return o
}
func (o BindingResponseArrayOutput) ToBindingResponseArrayOutputWithContext(ctx context.Context) BindingResponseArrayOutput {
return o
}
func (o BindingResponseArrayOutput) Index(i pulumi.IntInput) BindingResponseOutput {
return pulumi.All(o, i).ApplyT(func(vs []interface{}) BindingResponse {
return vs[0].([]BindingResponse)[vs[1].(int)]
}).(BindingResponseOutput)
}
// Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.
type Expr struct {
// Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
Description *string `pulumi:"description"`
// Textual representation of an expression in Common Expression Language syntax.
Expression *string `pulumi:"expression"`
// Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
Location *string `pulumi:"location"`
// Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
Title *string `pulumi:"title"`
}
// ExprInput is an input type that accepts ExprArgs and ExprOutput values.
// You can construct a concrete instance of `ExprInput` via:
//
// ExprArgs{...}
type ExprInput interface {
pulumi.Input
ToExprOutput() ExprOutput
ToExprOutputWithContext(context.Context) ExprOutput
}
// Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.
type ExprArgs struct {
// Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
Description pulumi.StringPtrInput `pulumi:"description"`
// Textual representation of an expression in Common Expression Language syntax.
Expression pulumi.StringPtrInput `pulumi:"expression"`
// Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
Location pulumi.StringPtrInput `pulumi:"location"`
// Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
Title pulumi.StringPtrInput `pulumi:"title"`
}
func (ExprArgs) ElementType() reflect.Type {
return reflect.TypeOf((*Expr)(nil)).Elem()
}
func (i ExprArgs) ToExprOutput() ExprOutput {
return i.ToExprOutputWithContext(context.Background())
}
func (i ExprArgs) ToExprOutputWithContext(ctx context.Context) ExprOutput {
return pulumi.ToOutputWithContext(ctx, i).(ExprOutput)
}
func (i ExprArgs) ToExprPtrOutput() ExprPtrOutput {
return i.ToExprPtrOutputWithContext(context.Background())
}
func (i ExprArgs) ToExprPtrOutputWithContext(ctx context.Context) ExprPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(ExprOutput).ToExprPtrOutputWithContext(ctx)
}
// ExprPtrInput is an input type that accepts ExprArgs, ExprPtr and ExprPtrOutput values.
// You can construct a concrete instance of `ExprPtrInput` via:
//
// ExprArgs{...}
//
// or:
//
// nil
type ExprPtrInput interface {
pulumi.Input
ToExprPtrOutput() ExprPtrOutput
ToExprPtrOutputWithContext(context.Context) ExprPtrOutput
}
type exprPtrType ExprArgs
func ExprPtr(v *ExprArgs) ExprPtrInput {
return (*exprPtrType)(v)
}
func (*exprPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**Expr)(nil)).Elem()
}
func (i *exprPtrType) ToExprPtrOutput() ExprPtrOutput {
return i.ToExprPtrOutputWithContext(context.Background())
}
func (i *exprPtrType) ToExprPtrOutputWithContext(ctx context.Context) ExprPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(ExprPtrOutput)
}
// Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.
type ExprOutput struct{ *pulumi.OutputState }
func (ExprOutput) ElementType() reflect.Type {
return reflect.TypeOf((*Expr)(nil)).Elem()
}
func (o ExprOutput) ToExprOutput() ExprOutput {
return o
}
func (o ExprOutput) ToExprOutputWithContext(ctx context.Context) ExprOutput {
return o
}
func (o ExprOutput) ToExprPtrOutput() ExprPtrOutput {
return o.ToExprPtrOutputWithContext(context.Background())
}
func (o ExprOutput) ToExprPtrOutputWithContext(ctx context.Context) ExprPtrOutput {
return o.ApplyTWithContext(ctx, func(_ context.Context, v Expr) *Expr {
return &v
}).(ExprPtrOutput)
}
// Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
func (o ExprOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func(v Expr) *string { return v.Description }).(pulumi.StringPtrOutput)
}
// Textual representation of an expression in Common Expression Language syntax.
func (o ExprOutput) Expression() pulumi.StringPtrOutput {
return o.ApplyT(func(v Expr) *string { return v.Expression }).(pulumi.StringPtrOutput)
}
// Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
func (o ExprOutput) Location() pulumi.StringPtrOutput {
return o.ApplyT(func(v Expr) *string { return v.Location }).(pulumi.StringPtrOutput)
}
// Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
func (o ExprOutput) Title() pulumi.StringPtrOutput {
return o.ApplyT(func(v Expr) *string { return v.Title }).(pulumi.StringPtrOutput)
}
type ExprPtrOutput struct{ *pulumi.OutputState }
func (ExprPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**Expr)(nil)).Elem()
}
func (o ExprPtrOutput) ToExprPtrOutput() ExprPtrOutput {
return o
}
func (o ExprPtrOutput) ToExprPtrOutputWithContext(ctx context.Context) ExprPtrOutput {
return o
}
func (o ExprPtrOutput) Elem() ExprOutput {
return o.ApplyT(func(v *Expr) Expr {
if v != nil {
return *v
}
var ret Expr
return ret
}).(ExprOutput)
}
// Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
func (o ExprPtrOutput) Description() pulumi.StringPtrOutput {
return o.ApplyT(func(v *Expr) *string {
if v == nil {
return nil
}
return v.Description
}).(pulumi.StringPtrOutput)
}
// Textual representation of an expression in Common Expression Language syntax.
func (o ExprPtrOutput) Expression() pulumi.StringPtrOutput {
return o.ApplyT(func(v *Expr) *string {
if v == nil {
return nil
}
return v.Expression
}).(pulumi.StringPtrOutput)
}
// Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
func (o ExprPtrOutput) Location() pulumi.StringPtrOutput {
return o.ApplyT(func(v *Expr) *string {
if v == nil {
return nil
}
return v.Location
}).(pulumi.StringPtrOutput)
}
// Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
func (o ExprPtrOutput) Title() pulumi.StringPtrOutput {
return o.ApplyT(func(v *Expr) *string {
if v == nil {
return nil
}
return v.Title
}).(pulumi.StringPtrOutput)
}
// Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.
type ExprResponse struct {
// Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
Description string `pulumi:"description"`
// Textual representation of an expression in Common Expression Language syntax.
Expression string `pulumi:"expression"`
// Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
Location string `pulumi:"location"`
// Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
Title string `pulumi:"title"`
}
// Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.
type ExprResponseOutput struct{ *pulumi.OutputState }
func (ExprResponseOutput) ElementType() reflect.Type {
return reflect.TypeOf((*ExprResponse)(nil)).Elem()
}
func (o ExprResponseOutput) ToExprResponseOutput() ExprResponseOutput {
return o
}
func (o ExprResponseOutput) ToExprResponseOutputWithContext(ctx context.Context) ExprResponseOutput {
return o
}
// Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
func (o ExprResponseOutput) Description() pulumi.StringOutput {
return o.ApplyT(func(v ExprResponse) string { return v.Description }).(pulumi.StringOutput)
}
// Textual representation of an expression in Common Expression Language syntax.
func (o ExprResponseOutput) Expression() pulumi.StringOutput {
return o.ApplyT(func(v ExprResponse) string { return v.Expression }).(pulumi.StringOutput)
}
// Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
func (o ExprResponseOutput) Location() pulumi.StringOutput {
return o.ApplyT(func(v ExprResponse) string { return v.Location }).(pulumi.StringOutput)
}
// Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
func (o ExprResponseOutput) Title() pulumi.StringOutput {
return o.ApplyT(func(v ExprResponse) string { return v.Title }).(pulumi.StringOutput)
}
// A container to reference an id for any resource type. A `resource` in Google Cloud Platform is a generic term for something you (a developer) may want to interact with through one of our API's. Some examples are an App Engine app, a Compute Engine instance, a Cloud SQL database, and so on.
type ResourceId struct {
// The type-specific id. This should correspond to the id used in the type-specific API's.
Id *string `pulumi:"id"`
// The resource type this id is for. At present, the valid types are: "organization", "folder", and "project".
Type *string `pulumi:"type"`
}
// ResourceIdInput is an input type that accepts ResourceIdArgs and ResourceIdOutput values.
// You can construct a concrete instance of `ResourceIdInput` via:
//
// ResourceIdArgs{...}
type ResourceIdInput interface {
pulumi.Input
ToResourceIdOutput() ResourceIdOutput
ToResourceIdOutputWithContext(context.Context) ResourceIdOutput
}
// A container to reference an id for any resource type. A `resource` in Google Cloud Platform is a generic term for something you (a developer) may want to interact with through one of our API's. Some examples are an App Engine app, a Compute Engine instance, a Cloud SQL database, and so on.
type ResourceIdArgs struct {
// The type-specific id. This should correspond to the id used in the type-specific API's.
Id pulumi.StringPtrInput `pulumi:"id"`
// The resource type this id is for. At present, the valid types are: "organization", "folder", and "project".
Type pulumi.StringPtrInput `pulumi:"type"`
}
func (ResourceIdArgs) ElementType() reflect.Type {
return reflect.TypeOf((*ResourceId)(nil)).Elem()
}
func (i ResourceIdArgs) ToResourceIdOutput() ResourceIdOutput {
return i.ToResourceIdOutputWithContext(context.Background())
}
func (i ResourceIdArgs) ToResourceIdOutputWithContext(ctx context.Context) ResourceIdOutput {
return pulumi.ToOutputWithContext(ctx, i).(ResourceIdOutput)
}
func (i ResourceIdArgs) ToResourceIdPtrOutput() ResourceIdPtrOutput {
return i.ToResourceIdPtrOutputWithContext(context.Background())
}
func (i ResourceIdArgs) ToResourceIdPtrOutputWithContext(ctx context.Context) ResourceIdPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(ResourceIdOutput).ToResourceIdPtrOutputWithContext(ctx)
}
// ResourceIdPtrInput is an input type that accepts ResourceIdArgs, ResourceIdPtr and ResourceIdPtrOutput values.
// You can construct a concrete instance of `ResourceIdPtrInput` via:
//
// ResourceIdArgs{...}
//
// or:
//
// nil
type ResourceIdPtrInput interface {
pulumi.Input
ToResourceIdPtrOutput() ResourceIdPtrOutput
ToResourceIdPtrOutputWithContext(context.Context) ResourceIdPtrOutput
}
type resourceIdPtrType ResourceIdArgs
func ResourceIdPtr(v *ResourceIdArgs) ResourceIdPtrInput {
return (*resourceIdPtrType)(v)
}
func (*resourceIdPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**ResourceId)(nil)).Elem()
}
func (i *resourceIdPtrType) ToResourceIdPtrOutput() ResourceIdPtrOutput {
return i.ToResourceIdPtrOutputWithContext(context.Background())
}
func (i *resourceIdPtrType) ToResourceIdPtrOutputWithContext(ctx context.Context) ResourceIdPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(ResourceIdPtrOutput)
}
// A container to reference an id for any resource type. A `resource` in Google Cloud Platform is a generic term for something you (a developer) may want to interact with through one of our API's. Some examples are an App Engine app, a Compute Engine instance, a Cloud SQL database, and so on.
type ResourceIdOutput struct{ *pulumi.OutputState }
func (ResourceIdOutput) ElementType() reflect.Type {
return reflect.TypeOf((*ResourceId)(nil)).Elem()
}
func (o ResourceIdOutput) ToResourceIdOutput() ResourceIdOutput {
return o
}
func (o ResourceIdOutput) ToResourceIdOutputWithContext(ctx context.Context) ResourceIdOutput {
return o
}
func (o ResourceIdOutput) ToResourceIdPtrOutput() ResourceIdPtrOutput {
return o.ToResourceIdPtrOutputWithContext(context.Background())
}
func (o ResourceIdOutput) ToResourceIdPtrOutputWithContext(ctx context.Context) ResourceIdPtrOutput {
return o.ApplyTWithContext(ctx, func(_ context.Context, v ResourceId) *ResourceId {
return &v
}).(ResourceIdPtrOutput)
}
// The type-specific id. This should correspond to the id used in the type-specific API's.
func (o ResourceIdOutput) Id() pulumi.StringPtrOutput {
return o.ApplyT(func(v ResourceId) *string { return v.Id }).(pulumi.StringPtrOutput)
}
// The resource type this id is for. At present, the valid types are: "organization", "folder", and "project".
func (o ResourceIdOutput) Type() pulumi.StringPtrOutput {
return o.ApplyT(func(v ResourceId) *string { return v.Type }).(pulumi.StringPtrOutput)
}
type ResourceIdPtrOutput struct{ *pulumi.OutputState }
func (ResourceIdPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**ResourceId)(nil)).Elem()
}
func (o ResourceIdPtrOutput) ToResourceIdPtrOutput() ResourceIdPtrOutput {
return o
}
func (o ResourceIdPtrOutput) ToResourceIdPtrOutputWithContext(ctx context.Context) ResourceIdPtrOutput {
return o
}
func (o ResourceIdPtrOutput) Elem() ResourceIdOutput {
return o.ApplyT(func(v *ResourceId) ResourceId {
if v != nil {
return *v
}
var ret ResourceId
return ret
}).(ResourceIdOutput)
}
// The type-specific id. This should correspond to the id used in the type-specific API's.
func (o ResourceIdPtrOutput) Id() pulumi.StringPtrOutput {
return o.ApplyT(func(v *ResourceId) *string {
if v == nil {
return nil
}
return v.Id
}).(pulumi.StringPtrOutput)
}
// The resource type this id is for. At present, the valid types are: "organization", "folder", and "project".
func (o ResourceIdPtrOutput) Type() pulumi.StringPtrOutput {
return o.ApplyT(func(v *ResourceId) *string {
if v == nil {
return nil
}
return v.Type
}).(pulumi.StringPtrOutput)
}
// A container to reference an id for any resource type. A `resource` in Google Cloud Platform is a generic term for something you (a developer) may want to interact with through one of our API's. Some examples are an App Engine app, a Compute Engine instance, a Cloud SQL database, and so on.
type ResourceIdResponse struct {
// The resource type this id is for. At present, the valid types are: "organization", "folder", and "project".
Type string `pulumi:"type"`
}
// A container to reference an id for any resource type. A `resource` in Google Cloud Platform is a generic term for something you (a developer) may want to interact with through one of our API's. Some examples are an App Engine app, a Compute Engine instance, a Cloud SQL database, and so on.
type ResourceIdResponseOutput struct{ *pulumi.OutputState }
func (ResourceIdResponseOutput) ElementType() reflect.Type {
return reflect.TypeOf((*ResourceIdResponse)(nil)).Elem()
}
func (o ResourceIdResponseOutput) ToResourceIdResponseOutput() ResourceIdResponseOutput {
return o
}
func (o ResourceIdResponseOutput) ToResourceIdResponseOutputWithContext(ctx context.Context) ResourceIdResponseOutput {
return o
}
// The resource type this id is for. At present, the valid types are: "organization", "folder", and "project".
func (o ResourceIdResponseOutput) Type() pulumi.StringOutput {
return o.ApplyT(func(v ResourceIdResponse) string { return v.Type }).(pulumi.StringOutput)
}
func init() {
pulumi.RegisterInputType(reflect.TypeOf((*AuditConfigInput)(nil)).Elem(), AuditConfigArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*AuditConfigArrayInput)(nil)).Elem(), AuditConfigArray{})
pulumi.RegisterInputType(reflect.TypeOf((*AuditLogConfigInput)(nil)).Elem(), AuditLogConfigArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*AuditLogConfigArrayInput)(nil)).Elem(), AuditLogConfigArray{})
pulumi.RegisterInputType(reflect.TypeOf((*BindingInput)(nil)).Elem(), BindingArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*BindingArrayInput)(nil)).Elem(), BindingArray{})
pulumi.RegisterInputType(reflect.TypeOf((*ExprInput)(nil)).Elem(), ExprArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*ExprPtrInput)(nil)).Elem(), ExprArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*ResourceIdInput)(nil)).Elem(), ResourceIdArgs{})
pulumi.RegisterInputType(reflect.TypeOf((*ResourceIdPtrInput)(nil)).Elem(), ResourceIdArgs{})
pulumi.RegisterOutputType(AuditConfigOutput{})
pulumi.RegisterOutputType(AuditConfigArrayOutput{})
pulumi.RegisterOutputType(AuditConfigResponseOutput{})
pulumi.RegisterOutputType(AuditConfigResponseArrayOutput{})
pulumi.RegisterOutputType(AuditLogConfigOutput{})
pulumi.RegisterOutputType(AuditLogConfigArrayOutput{})
pulumi.RegisterOutputType(AuditLogConfigResponseOutput{})
pulumi.RegisterOutputType(AuditLogConfigResponseArrayOutput{})
pulumi.RegisterOutputType(BindingOutput{})
pulumi.RegisterOutputType(BindingArrayOutput{})
pulumi.RegisterOutputType(BindingResponseOutput{})
pulumi.RegisterOutputType(BindingResponseArrayOutput{})
pulumi.RegisterOutputType(ExprOutput{})
pulumi.RegisterOutputType(ExprPtrOutput{})
pulumi.RegisterOutputType(ExprResponseOutput{})
pulumi.RegisterOutputType(ResourceIdOutput{})
pulumi.RegisterOutputType(ResourceIdPtrOutput{})
pulumi.RegisterOutputType(ResourceIdResponseOutput{})
}
|
func (i AuditConfigArray) ToAuditConfigArrayOutputWithContext(ctx context.Context) AuditConfigArrayOutput {
return pulumi.ToOutputWithContext(ctx, i).(AuditConfigArrayOutput)
|
namingutils.go
|
// Copyright 2019 The Kubernetes Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"sigs.k8s.io/krew/pkg/constants"
"sigs.k8s.io/krew/pkg/index"
)
// indexOf returns the index name of a receipt.
func indexOf(r index.Receipt) string {
if r.Status.Source.Name == "" {
return constants.DefaultIndexName
}
return r.Status.Source.Name
}
// displayName returns the display name of a Plugin.
// The index name is omitted if it is the default index.
func displayName(p index.Plugin, indexName string) string
|
func isDefaultIndex(name string) bool {
return name == "" || name == constants.DefaultIndexName
}
// canonicalName returns INDEX/NAME value for a plugin, even if
// it is in the default index.
func canonicalName(p index.Plugin, indexName string) string {
if isDefaultIndex(indexName) {
indexName = constants.DefaultIndexName
}
return indexName + "/" + p.Name
}
|
{
if isDefaultIndex(indexName) {
return p.Name
}
return indexName + "/" + p.Name
}
|
mod.rs
|
use ark_ff::fields::PrimeField;
#[cfg(feature = "r1cs")]
pub mod constraints;
#[derive(Clone, Default)]
pub struct Input<F: PrimeField> {
pub recipient: F,
pub relayer: F,
pub fee: F,
pub refund: F,
pub commitment: F,
}
impl<F: PrimeField> Input<F> {
pub fn new(recipient: F, relayer: F, fee: F, refund: F, commitment: F) -> Self
|
}
|
{
Self {
recipient,
relayer,
fee,
refund,
commitment,
}
}
|
id_helper.py
|
"""
features.py
---------------
In trimesh.comparison, we arbitrarily threshold identifier values
at a certain number of significant figures.
This file permutates meshes around and observes how their identifier,
which is supposed to be pretty invariant to translation and tessellation
changes. We use this to generate the arbitrary sigfig thresholds.
"""
import numpy as np
import trimesh
import time
import json
import os
import collections
import logging
TOL_ZERO = 1e-12
def permutations(mesh,
function=lambda x: x.identifier,
displacement_max=1e-8,
count=1000,
subdivisions=2,
cutoff=3600):
"""
Permutate a mesh, record the maximum it deviates from the original mesh
and the resulting value of an identifier function.
Parameters
----------
mesh: Trimesh object
function: function which takes a single mesh as an argument
and returns an (n,) float vector
subdivisions: the maximum number of times to subdivide the mesh
count: int, number of times to permutate each subdivision step
Returns
-----------
identifiers: numpy array of identifiers
"""
identifiers = []
start = time.time()
# do subdivisions
divided = [mesh.copy()]
for j in range(subdivisions - 1):
divided.append(divided[-1].copy().subdivide())
for i, displacement in enumerate(np.linspace(0.0,
displacement_max / mesh.scale,
count)):
# get one of the subdivided meshes
current = np.random.choice(divided).copy()
if i > (count / 10):
# run first bunch without tessellation permutation
current = current.permutate.tessellation()
# after the first few displace it a lot
transformed = trimesh.permutate.transform(current)
# noisy = trimesh.permutate.noise(transformed, displacement)
identifier = function(transformed)
identifiers.append(identifier)
if (time.time() - start) > cutoff:
print('bailing for time:{} count:{}'.format(time.time() - start,
i))
return np.array(identifiers)
return np.array(identifiers)
def get_meshes(path='../../../models', cutoff=None):
"""
Get a list of single- body meshes to test identifiers on.
Parameters
------------
path: str, location of models
cutoff: int, number of meshes to stop loading at
Returns
------------
meshes: (n,) list of Trimesh objects
"""
bodies = collections.deque()
for file_name in os.listdir(path):
try:
mesh = trimesh.load(os.path.join(path, file_name))
split = mesh.split()
bodies.extend(split)
if len(split) > 1:
bodies.append(mesh)
except BaseException:
continue
if cutoff is not None and len(bodies) > cutoff:
return np.array(bodies)
for i in range(100):
cylinder = trimesh.creation.cylinder(
radius=np.random.random() * 100,
height=np.random.random() * 1000,
sections=int(np.clip(np.random.random() * 720,
20,
720)))
capsule = trimesh.creation.capsule(
radius=np.random.random() * 100,
height=np.random.random() * 1000,
count=np.clip(np.random.random(2) * 720,
20,
720).astype(int))
bodies.append(cylinder)
bodies.append(capsule)
for i in range(10):
bodies.append(trimesh.creation.random_soup(
|
int(np.clip(np.random.random() * 1000,
20,
1000))))
bodies.append(trimesh.creation.icosphere())
bodies.append(trimesh.creation.uv_sphere())
bodies.append(trimesh.creation.icosahedron())
return np.array(bodies)
def data_stats(data):
data = np.asanyarray(data, dtype=np.float64)
# mean identifier
mean = data.mean(axis=0)
# thresholdable percentile
percent = np.abs(mean - np.abs(np.percentile(data, 99.999, axis=0)))
return mean, percent
if __name__ == '__main__':
trimesh.util.attach_to_log(level=logging.INFO)
meshes = get_meshes()
print('loaded meshes!')
# we want the whole thing to last less than
hours = 5
cutoff = (hours * 3600) / len(meshes)
cutoff = 30
result = []
running = []
for i, m in enumerate(meshes):
# calculate permutations
identifier = permutations(m,
count=1000,
cutoff=cutoff)
# get data
mean, percent = data_stats(identifier)
nz = np.logical_and(np.abs(mean) > TOL_ZERO,
np.abs(percent) > TOL_ZERO)
r = np.ones_like(mean) * 10
r[nz] = np.round(np.log10(np.abs(mean[nz] / percent[nz]))) - 1
running.append(r)
result.append({'mean': mean.tolist(),
'percent': percent.tolist()})
print('\n\n{}/{}'.format(i, len(meshes) - 1))
print('mean', mean)
print('percent', percent)
print('oom', mean / percent)
print('curun', running[-1])
print('minrun', np.min(running, axis=0))
print('meanrun', np.mean(running, axis=0))
# every loop dump everything
# thrash- ey for sure but intermediate results are great
name_out = 'res.json'
with open(name_out, 'w') as file_obj:
json.dump(result,
file_obj,
indent=4)
| |
state-no-debug.rs
|
extern crate sm;
#[derive(Copy, Clone, Eq, PartialEq)]
struct HelloWorld;
impl sm::State for HelloWorld {}
//~^ ERROR `HelloWorld` doesn't implement `std::fmt::Debug`
fn main()
|
{}
|
|
0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-01-28 15:36
from django.db import migrations, models
import django.db.models.deletion
import labour.models.signup_extras
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0029_auto_20170827_1818'),
]
operations = [
migrations.CreateModel(
name='SignupExtra',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True)),
('shift_type', models.CharField(choices=[('none', 'Ei väliä'), ('4h', 'Pari pitkää vuoroa'), ('yli4h', 'Useita lyhyitä vuoroja')], help_text='Haluatko tehdä yhden pitkän työvuoron vaiko monta lyhyempää vuoroa?', max_length=15, verbose_name='Toivottu työvuoron pituus')),
('prior_experience', models.TextField(blank=True, help_text='Kerro tässä kentässä, jos sinulla on aiempaa kokemusta vastaavista tehtävistä tai muuta sellaista työkokemusta, josta arvioit olevan hyötyä hakemassasi tehtävässä.', verbose_name='Työkokemus')),
('free_text', models.TextField(blank=True, help_text='Jos haluat sanoa hakemuksesi käsittelijöille jotain sellaista, jolle ei ole omaa kenttää yllä, käytä tätä kenttää. Jos haet valokuvaajaksi, kerro lisäksi millaista kuvauskalustoa sinulla on käytettävissäsi ja listaamuutamia gallerialinkkejä, joista pääsemme ihailemaan ottamiasi kuvia. ', verbose_name='Vapaa alue')),
('special_diet_other', models.TextField(blank=True, help_text='Jos noudatat erikoisruokavaliota, jota ei ole yllä olevassa listassa, ilmoita se tässä. Tapahtuman järjestäjä pyrkii ottamaan erikoisruokavaliot huomioon, mutta kaikkia erikoisruokavalioita ei välttämättä pystytä järjestämään.', verbose_name='Muu erikoisruokavalio')),
('shirt_size', models.CharField(choices=[('NO_SHIRT', 'Ei paitaa'), ('XS', 'XS Unisex'), ('S', 'S Unisex'), ('M', 'M Unisex'), ('L', 'L Unisex'), ('XL', 'XL Unisex'), ('XXL', 'XXL Unisex'), ('3XL', '3XL Unisex'), ('4XL', '4XL Unisex'), ('5XL', '5XL Unisex'), ('LF_XS', 'XS Ladyfit'), ('LF_S', 'S Ladyfit'), ('LF_M', 'M Ladyfit'), ('LF_L', 'L Ladyfit'), ('LF_XL', 'XL Ladyfit')], default='NO_SHIRT', help_text='Ajoissa ilmoittautuneet saavat maksuttoman työvoimapaidan. Kokotaulukot: <a href="http://www.bc-collection.eu/uploads/sizes/TU004.jpg" target="_blank">unisex-paita</a>, <a href="http://www.bc-collection.eu/uploads/sizes/TW040.jpg" target="_blank">ladyfit-paita</a>', max_length=8, verbose_name='Paidan koko')),
('shirt_type', models.CharField(choices=[('STAFF', 'Staff'), ('DESURITY', 'Desurity'), ('KUVAAJA', 'Kuvaaja'), ('VENDOR', 'Myynti'), ('TOOLATE', 'Myöhästyi paitatilauksesta')], default='TOOLATE', max_length=8, verbose_name='Paidan tyyppi')),
|
options={
'abstract': False,
},
bases=(labour.models.signup_extras.SignupExtraMixin, models.Model),
),
migrations.CreateModel(
name='SpecialDiet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=63)),
],
),
migrations.AddField(
model_name='signupextra',
name='special_diet',
field=models.ManyToManyField(blank=True, related_name='_signupextra_special_diet_+', to='desucon2018.SpecialDiet', verbose_name='Erikoisruokavalio'),
),
]
|
('night_work', models.BooleanField(default=False, verbose_name='Olen valmis tekemään yötöitä')),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='desucon2018_signup_extras', to='core.Event')),
('person', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='desucon2018_signup_extra', to='core.Person')),
],
|
main.rs
|
// Copyright (c) 2017 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
// Welcome to the deferred lighting example!
//
// The idea behind deferred lighting is to render the scene in two steps.
//
// First you draw all the objects of the scene. But instead of calculating the color they will
// have on the screen, you output their characteristics such as their diffuse color and their
// normals, and write this to images.
//
// After all the objects are drawn, you should obtain several images that contain the
// characteristics of each pixel.
//
// Then you apply lighting to the scene. In other words you draw to the final image by taking
// these intermediate images and the various lights of the scene as input.
//
// This technique allows you to apply tons of light sources to a scene, which would be too
// expensive otherwise. It has some drawbacks, which are the fact that transparent objects must be
// drawn after the lighting, and that the whole process consumes more memory.
use vulkano::device::{Device, DeviceExtensions};
use vulkano::instance::{Instance, PhysicalDevice};
use vulkano::swapchain::{AcquireError, PresentMode, SurfaceTransform, Swapchain, SwapchainCreationError};
use vulkano::swapchain;
use vulkano::sync::{GpuFuture, FlushError};
use vulkano::sync;
use vulkano_win::VkSurfaceBuild;
use winit::event_loop::{EventLoop, ControlFlow};
use winit::window::{WindowBuilder};
use winit::event::{Event, WindowEvent};
use cgmath::Matrix4;
use cgmath::SquareMatrix;
use cgmath::Vector3;
mod frame;
mod triangle_draw_system;
use crate::frame::*;
use crate::triangle_draw_system::*;
fn main() {
// Basic initialization. See the triangle example if you want more details about this.
let extensions = vulkano_win::required_extensions();
let instance = Instance::new(None, &extensions, None).unwrap();
let physical = PhysicalDevice::enumerate(&instance).next().unwrap();
let events_loop = EventLoop::new();
let surface = WindowBuilder::new().build_vk_surface(&events_loop, instance.clone()).unwrap();
let window = surface.window();
let queue_family = physical.queue_families().find(|&q| {
q.supports_graphics() && surface.is_supported(q).unwrap_or(false)
}).expect("couldn't find a graphical queue family");
let device_ext = DeviceExtensions { khr_swapchain: true, .. DeviceExtensions::none() };
let (device, mut queues) = Device::new(physical, physical.supported_features(), &device_ext,
[(queue_family, 0.5)].iter().cloned()).unwrap();
let queue = queues.next().unwrap();
let (mut swapchain, mut images) = {
let caps = surface.capabilities(physical).unwrap();
let usage = caps.supported_usage_flags;
let alpha = caps.supported_composite_alpha.iter().next().unwrap();
let format = caps.supported_formats[0].0;
let initial_dimensions = {
let dimensions: (u32, u32) = window.inner_size().to_physical(window.hidpi_factor()).into();
[dimensions.0, dimensions.1]
};
Swapchain::new(device.clone(), surface.clone(), caps.min_image_count, format,
initial_dimensions, 1, usage, &queue, SurfaceTransform::Identity, alpha,
PresentMode::Fifo, true, None).unwrap()
};
// Here is the basic initialization for the deferred system.
let mut frame_system = FrameSystem::new(queue.clone(), swapchain.format());
let triangle_draw_system = TriangleDrawSystem::new(queue.clone(), frame_system.deferred_subpass());
let mut recreate_swapchain = false;
let mut previous_frame_end = Some(Box::new(sync::now(device.clone())) as Box<dyn GpuFuture>);
events_loop.run(move |ev, _, cf| {
*cf = ControlFlow::Poll;
let window = surface.window();
previous_frame_end.as_mut().unwrap().cleanup_finished();
if recreate_swapchain {
let dimensions = {
let dimensions: (u32, u32) = window.inner_size().to_physical(window.hidpi_factor()).into();
[dimensions.0, dimensions.1]
};
let (new_swapchain, new_images) = match swapchain.recreate_with_dimension(dimensions) {
Ok(r) => r,
Err(SwapchainCreationError::UnsupportedDimensions) => {
return;
}
|
Err(err) => panic!("{:?}", err)
};
swapchain = new_swapchain;
images = new_images;
recreate_swapchain = false;
}
let (image_num, acquire_future) = match swapchain::acquire_next_image(swapchain.clone(), None) {
Ok(r) => r,
Err(AcquireError::OutOfDate) => {
recreate_swapchain = true;
return;
}
Err(err) => panic!("{:?}", err)
};
let prev = previous_frame_end.take();
let future = prev.unwrap().join(acquire_future);
let mut frame = frame_system.frame(future, images[image_num].clone(), Matrix4::identity());
let mut after_future = None;
while let Some(pass) = frame.next_pass() {
match pass {
Pass::Deferred(mut draw_pass) => {
let cb = triangle_draw_system.draw(draw_pass.viewport_dimensions());
draw_pass.execute(cb);
}
Pass::Lighting(mut lighting) => {
lighting.ambient_light([0.1, 0.1, 0.1]);
lighting.directional_light(Vector3::new(0.2, -0.1, -0.7), [0.6, 0.6, 0.6]);
lighting.point_light(Vector3::new(0.5, -0.5, -0.1), [1.0, 0.0, 0.0]);
lighting.point_light(Vector3::new(-0.9, 0.2, -0.15), [0.0, 1.0, 0.0]);
lighting.point_light(Vector3::new(0.0, 0.5, -0.05), [0.0, 0.0, 1.0]);
}
Pass::Finished(af) => {
after_future = Some(af);
}
}
}
let future = after_future.unwrap()
.then_swapchain_present(queue.clone(), swapchain.clone(), image_num)
.then_signal_fence_and_flush();
match future {
Ok(future) => {
// This wait is required when using NVIDIA or running on macOS. See https://github.com/vulkano-rs/vulkano/issues/1247
future.wait(None).unwrap();
previous_frame_end = Some(Box::new(future) as Box<_>);
}
Err(FlushError::OutOfDate) => {
recreate_swapchain = true;
previous_frame_end = Some(Box::new(sync::now(device.clone())) as Box<_>);
}
Err(e) => {
println!("{:?}", e);
previous_frame_end = Some(Box::new(sync::now(device.clone())) as Box<_>);
}
}
match ev {
Event::WindowEvent { event: WindowEvent::CloseRequested, .. } => *cf = ControlFlow::Exit,
Event::WindowEvent { event: WindowEvent::Resized(_), .. } => recreate_swapchain = true,
_ => (),
}
});
}
| |
phys_mem.rs
|
use crate::arch;
use crate::arch::phys_mem;
use crate::ptr::{self, Align, PointerInSlice};
use crate::spin::Mutex;
use bit_vec::BitVec;
use core::cmp;
use core::intrinsics;
use core::mem;
use core::slice;
use core::sync::atomic::{AtomicUsize, Ordering};
use libc::c_void;
use syscall::{ErrNum, Result};
extern "C" {
static mut KERNEL_BASE: u8;
static kernel_end: u8;
static mut heap_start: u8;
static heap_end: u8;
}
pub unsafe fn resize_kernel_heap(delta: isize) -> *mut u8 {
static mut BRK: usize = 0;
let begin = (&mut heap_start as *mut u8).offset(BRK as isize);
assert!((begin as *const u8) < (&heap_end as *const u8), "out of heap space");
BRK += delta as usize;
begin
}
static MALLOC_LOCK: AtomicUsize = AtomicUsize::new(0);
static mut MALLOC_LOCK_TOKEN: usize = 0;
#[no_mangle]
pub extern "C" fn __malloc_lock(_reent: *mut c_void) {
let token = arch::disable_interrupts();
// TODO: multi CPU
if MALLOC_LOCK.fetch_add(1, Ordering::SeqCst) == 0 {
unsafe {
MALLOC_LOCK_TOKEN = token;
}
}
}
#[no_mangle]
pub extern "C" fn __malloc_unlock(_reent: *mut c_void)
|
pub const PAGE_SIZE: usize = 4096;
pub struct PhysicalBitmap {
free: Mutex<BitVec>,
}
impl PhysicalBitmap {
pub fn new(total_bytes: usize) -> PhysicalBitmap {
let free = BitVec::from_elem(total_bytes / PAGE_SIZE, true);
PhysicalBitmap { free: Mutex::new(free) }
}
pub fn machine() -> PhysicalBitmap {
phys_mem::machine()
}
pub fn reserve_pages(&self, start_page: usize, page_count: usize) {
let mut free = lock!(self.free);
if start_page <= free.len() {
let page_count = cmp::min(page_count, free.len() - start_page);
for i in start_page..start_page + page_count {
free.set(i, false);
}
}
}
pub fn reserve_addr(&self, addr: usize, len: usize) {
self.reserve_pages(addr / PAGE_SIZE, (len + PAGE_SIZE - 1) / PAGE_SIZE)
}
pub fn reserve_ptr<T>(&self, ptr: *const T, len: usize) {
let addr = virt2phys(ptr);
self.reserve_addr(addr, len)
}
pub fn total_bytes(&self) -> usize {
let total_count = lock!(self.free).len();
total_count * PAGE_SIZE
}
pub fn free_bytes(&self) -> usize {
let free = lock!(self.free);
let free_count = free.iter().filter(|x| *x).count();
free_count * PAGE_SIZE
}
pub fn alloc_page(&self) -> Result<usize> {
let mut free = lock!(self.free);
match free.iter().position(|x| x) {
Some(i) => {
free.set(i, false);
Ok(i * PAGE_SIZE)
}
None => Err(ErrNum::OutOfMemory),
}
}
pub fn alloc_zeroed_page(&self) -> Result<usize> {
let addr = self.alloc_page()?;
unsafe {
let ptr: &mut u8 = phys2virt(addr);
intrinsics::write_bytes(ptr, 0, PAGE_SIZE);
}
Ok(addr)
}
pub fn free_page(&self, addr: usize) {
let mut free = lock!(self.free);
let i = addr / PAGE_SIZE;
free.set(i, true)
}
}
pub fn identity_range() -> &'static [u8] {
let gigabyte = 1024 * 1024 * 1024;
unsafe {
let base_ptr = &KERNEL_BASE as *const u8;
let end_ptr = Align::up(&kernel_end as *const u8, gigabyte);
let len = ptr::bytes_between(base_ptr, end_ptr);
slice::from_raw_parts(base_ptr, len)
}
}
fn check_identity(addr: usize, ptr: *const u8) {
let identity = identity_range();
if !identity.contains_ptr(ptr) {
panic!(
"physical {:x}/virtual {:p} can't be contained in the identity mapping {:p}..{:p}",
addr,
ptr,
identity.as_ptr(),
unsafe { identity.as_ptr().offset(identity.len() as isize) }
);
}
}
pub unsafe fn phys2virt<T>(addr: usize) -> &'static mut T {
let kernel_base_ptr: *mut u8 = &mut KERNEL_BASE as *mut u8;
let ptr: *mut u8 = kernel_base_ptr.offset(addr as isize);
check_identity(addr, ptr);
let ptr: *mut T = ptr as *mut T;
&mut *ptr
}
pub fn virt2phys<T>(ptr: *const T) -> usize {
let kernel_base_ptr: *const u8 = unsafe { &KERNEL_BASE as *const u8 };
let addr = ptr as usize - kernel_base_ptr as usize;
check_identity(addr, ptr as *const u8);
addr
}
#[cfg(feature = "test")]
pub mod test {
use super::*;
use syscall::ErrNum;
test! {
fn can_alloc_two_pages() {
let bitmap = PhysicalBitmap::new(640 * 1024);
let addr1 = bitmap.alloc_page().unwrap();
let addr2 = bitmap.alloc_page().unwrap();
assert!(addr1 != addr2);
}
fn can_alloc_free_realloc() {
let bitmap = PhysicalBitmap::new(640 * 1024);
let addr1 = bitmap.alloc_page().unwrap();
bitmap.free_page(addr1);
let addr2 = bitmap.alloc_page().unwrap();
assert_eq!(addr1, addr2);
}
fn can_handle_out_of_memory() {
let bitmap = PhysicalBitmap::new(2 * PAGE_SIZE);
bitmap.alloc_page().unwrap();
bitmap.alloc_page().unwrap();
let err = bitmap.alloc_page().unwrap_err();
assert_eq!(err, ErrNum::OutOfMemory);
}
}
}
|
{
// TODO: multi CPU
if MALLOC_LOCK.fetch_sub(1, Ordering::SeqCst) == 1 {
let token = mem::replace(unsafe { &mut MALLOC_LOCK_TOKEN }, 0);
arch::restore_interrupts(token);
}
}
|
app.routing.ts
|
import { ModuleWithProviders } from '@angular/core';
import { Routes, RouterModule } from '@angular/router';
import { HeaderComponent } from './shared/header/header.component';
import { AboutComponent } from './about/about.component';
import { LoginComponent } from './login/login.component';
|
import { HomeComponent } from './home/home.component';
const appRoutes: Routes = [
{ path: 'header', component: HeaderComponent },
{ path: 'about', component: AboutComponent },
{ path: '', component: HomeComponent }
];
export const appRoutingProviders: any[] = [
];
export const appRouting: ModuleWithProviders = RouterModule.forRoot(appRoutes);
|
import { ProtocolsComponent } from './components/protocols/protocols.component';
|
0001_initial.py
|
# Generated by Django 2.1.7 on 2019-02-26 03:56
from django.db import migrations, models
class
|
(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AccessToken',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create', models.DateTimeField(auto_created=True)),
('key', models.CharField(max_length=512)),
('expires_in', models.DateTimeField()),
],
),
]
|
Migration
|
test_slow.py
|
import pytest
@pytest.mark.slow
def test_long_computation():
...
@pytest.mark.timeout(10, method="thread")
def test_topology_sort():
|
def test_foo():
pass
|
...
|
socksproxy.py
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2019 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
from core.common import retrieve_content
__url__ = "https://raw.githubusercontent.com/firehol/blocklist-ipsets/master/socks_proxy_7d.ipset"
__check__ = "socks_proxy_7d"
__info__ = "proxy (suspicious)"
__reference__ = "socks-proxy.net"
def fetch():
|
if __check__ in content:
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#') or '.' not in line:
continue
retval[line] = (__info__, __reference__)
return retval
|
retval = {}
content = retrieve_content(__url__)
|
abci.go
|
package distribution
import (
abci "github.com/tendermint/tendermint/abci/types"
sdk "github.com/pocblockchain/pocc/types"
"github.com/pocblockchain/pocc/x/distribution/keeper"
)
// set the proposer for determining distribution during endblock
// and distribute rewards for the previous block
func BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock, k keeper.Keeper)
|
{
// determine the total power signing the block
var previousTotalPower, sumPreviousPrecommitPower int64
for _, voteInfo := range req.LastCommitInfo.GetVotes() {
previousTotalPower += voteInfo.Validator.Power
if voteInfo.SignedLastBlock {
sumPreviousPrecommitPower += voteInfo.Validator.Power
}
}
// TODO this is Tendermint-dependent
// ref https://github.com/pocblockchain/pocc/issues/3095
if ctx.BlockHeight() > 1 {
previousProposer := k.GetPreviousProposerConsAddr(ctx)
k.AllocateTokens(ctx, sumPreviousPrecommitPower, previousTotalPower, previousProposer, req.LastCommitInfo.GetVotes())
}
// record the proposer for when we payout on the next block
consAddr := sdk.ConsAddress(req.Header.ProposerAddress)
k.SetPreviousProposerConsAddr(ctx, consAddr)
}
|
|
layout.py
|
from __future__ import annotations
from abc import ABC, abstractmethod, abstractmethod
from dataclasses import dataclass
from itertools import chain
from operator import itemgetter
import sys
from typing import Iterable, Iterator, NamedTuple, TYPE_CHECKING
from rich import segment
import rich.repr
from rich.control import Control
from rich.console import Console, ConsoleOptions, RenderResult, RenderableType
from rich.segment import Segment, SegmentLines
from rich.style import Style
from . import log, panic
from ._loop import loop_last
from .layout_map import LayoutMap
from ._profile import timer
from ._lines import crop_lines
from ._types import Lines
from .geometry import clamp, Region, Offset, Size
PY38 = sys.version_info >= (3, 8)
if TYPE_CHECKING:
from .widget import Widget
from .view import View
class NoWidget(Exception):
pass
class OrderedRegion(NamedTuple):
region: Region
order: tuple[int, int]
class ReflowResult(NamedTuple):
"""The result of a reflow operation. Describes the chances to widgets."""
hidden: set[Widget]
shown: set[Widget]
resized: set[Widget]
class LayoutUpdate:
def __init__(self, lines: Lines, x: int, y: int) -> None:
|
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
yield Control.home().segment
x = self.x
new_line = Segment.line()
move_to = Control.move_to
for last, (y, line) in loop_last(enumerate(self.lines, self.y)):
yield move_to(x, y).segment
yield from line
if not last:
yield new_line
class Layout(ABC):
"""Responsible for arranging Widgets in a view and rendering them."""
def __init__(self) -> None:
self._layout_map: LayoutMap | None = None
self.width = 0
self.height = 0
self.regions: dict[Widget, tuple[Region, Region]] = {}
self._cuts: list[list[int]] | None = None
self._require_update: bool = True
self.background = ""
def check_update(self) -> bool:
return self._require_update
def require_update(self) -> None:
self._require_update = True
self.reset()
self._layout_map = None
def reset_update(self) -> None:
self._require_update = False
def reset(self) -> None:
self._cuts = None
# if self._require_update:
# self.regions.clear()
# self._layout_map = None
def reflow(
self, console: Console, width: int, height: int, scroll: Offset
) -> ReflowResult:
self.reset()
self.width = width
self.height = height
map = self.generate_map(
console,
Size(width, height),
Region(0, 0, width, height),
scroll,
)
self._require_update = False
old_widgets = set() if self.map is None else set(self.map.keys())
new_widgets = set(map.keys())
# Newly visible widgets
shown_widgets = new_widgets - old_widgets
# Newly hidden widgets
hidden_widgets = old_widgets - new_widgets
self._layout_map = map
# Copy renders if the size hasn't changed
new_renders = {
widget: (region, clip) for widget, (region, _order, clip) in map.items()
}
self.regions = new_renders
# Widgets with changed size
resized_widgets = {
widget
for widget, (region, *_) in map.items()
if widget in old_widgets and widget.size != region.size
}
return ReflowResult(
hidden=hidden_widgets, shown=shown_widgets, resized=resized_widgets
)
@abstractmethod
def get_widgets(self) -> Iterable[Widget]:
...
@abstractmethod
def generate_map(
self, console: Console, size: Size, viewport: Region, scroll: Offset
) -> LayoutMap:
"""Generate a layout map that defines where on the screen the widgets will be drawn.
Args:
console (Console): Console instance.
size (Dimensions): Size of container.
viewport (Region): Screen relative viewport.
Returns:
LayoutMap: [description]
"""
async def mount_all(self, view: "View") -> None:
await view.mount(*self.get_widgets())
@property
def map(self) -> LayoutMap | None:
return self._layout_map
def __iter__(self) -> Iterator[tuple[Widget, Region, Region]]:
if self.map is not None:
layers = sorted(
self.map.widgets.items(), key=lambda item: item[1].order, reverse=True
)
for widget, (region, order, clip) in layers:
yield widget, region.intersection(clip), region
def get_offset(self, widget: Widget) -> Offset:
try:
return self.map[widget].region.origin
except KeyError:
raise NoWidget("Widget is not in layout")
def get_widget_at(self, x: int, y: int) -> tuple[Widget, Region]:
"""Get the widget under the given point or None."""
for widget, cropped_region, region in self:
if widget.is_visual and cropped_region.contains(x, y):
return widget, region
raise NoWidget(f"No widget under screen coordinate ({x}, {y})")
def get_style_at(self, x: int, y: int) -> Style:
try:
widget, region = self.get_widget_at(x, y)
except NoWidget:
return Style.null()
if widget not in self.regions:
return Style.null()
lines = widget._get_lines()
x -= region.x
y -= region.y
line = lines[y]
end = 0
for segment in line:
end += segment.cell_length
if x < end:
return segment.style or Style.null()
return Style.null()
def get_widget_region(self, widget: Widget) -> Region:
try:
region, *_ = self.map[widget]
except KeyError:
raise NoWidget("Widget is not in layout")
else:
return region
@property
def cuts(self) -> list[list[int]]:
"""Get vertical cuts.
A cut is every point on a line where a widget starts or ends.
Returns:
list[list[int]]: A list of cuts for every line.
"""
if self._cuts is not None:
return self._cuts
width = self.width
height = self.height
screen_region = Region(0, 0, width, height)
cuts_sets = [{0, width} for _ in range(height)]
if self.map is not None:
for region, order, clip in self.map.values():
region = region.intersection(clip)
if region and (region in screen_region):
region_cuts = region.x_extents
for y in region.y_range:
cuts_sets[y].update(region_cuts)
# Sort the cuts for each line
self._cuts = [sorted(cut_set) for cut_set in cuts_sets]
return self._cuts
def _get_renders(self, console: Console) -> Iterable[tuple[Region, Region, Lines]]:
_rich_traceback_guard = True
layout_map = self.map
if layout_map:
widget_regions = sorted(
(
(widget, region, order, clip)
for widget, (region, order, clip) in layout_map.items()
),
key=itemgetter(2),
reverse=True,
)
else:
widget_regions = []
for widget, region, _order, clip in widget_regions:
if not widget.is_visual:
continue
lines = widget._get_lines()
if clip in region:
yield region, clip, lines
elif clip.overlaps(region):
new_region = region.intersection(clip)
delta_x = new_region.x - region.x
delta_y = new_region.y - region.y
splits = [delta_x, delta_x + new_region.width]
lines = lines[delta_y : delta_y + new_region.height]
divide = Segment.divide
lines = [list(divide(line, splits))[1] for line in lines]
yield region, clip, lines
@classmethod
def _assemble_chops(
cls, chops: list[dict[int, list[Segment] | None]]
) -> Iterable[list[Segment]]:
from_iterable = chain.from_iterable
for bucket in chops:
yield from_iterable(
line for _, line in sorted(bucket.items()) if line is not None
)
def render(
self,
console: Console,
*,
crop: Region = None,
) -> SegmentLines:
"""Render a layout.
Args:
console (Console): Console instance.
clip (Optional[Region]): Region to clip to.
Returns:
SegmentLines: A renderable
"""
width = self.width
height = self.height
screen = Region(0, 0, width, height)
crop_region = crop or Region(0, 0, self.width, self.height)
_Segment = Segment
divide = _Segment.divide
# Maps each cut on to a list of segments
cuts = self.cuts
chops: list[dict[int, list[Segment] | None]] = [
{cut: None for cut in cut_set} for cut_set in cuts
]
# TODO: Provide an option to update the background
background_style = console.get_style(self.background)
background_render = [
[_Segment(" " * width, background_style)] for _ in range(height)
]
# Go through all the renders in reverse order and fill buckets with no render
renders = list(self._get_renders(console))
clip_y, clip_y2 = crop_region.y_extents
for region, clip, lines in chain(
renders, [(screen, screen, background_render)]
):
# clip = clip.intersection(crop_region)
render_region = region.intersection(clip)
for y, line in enumerate(lines, render_region.y):
if clip_y > y > clip_y2:
continue
# first_cut = clamp(render_region.x, clip_x, clip_x2)
# last_cut = clamp(render_region.x + render_region.width, clip_x, clip_x2)
first_cut = render_region.x
last_cut = render_region.x_max
final_cuts = [cut for cut in cuts[y] if (last_cut >= cut >= first_cut)]
# final_cuts = cuts[y]
# log(final_cuts, render_region.x_extents)
if len(final_cuts) == 2:
cut_segments = [line]
else:
render_x = render_region.x
relative_cuts = [cut - render_x for cut in final_cuts]
_, *cut_segments = divide(line, relative_cuts)
for cut, segments in zip(final_cuts, cut_segments):
if chops[y][cut] is None:
chops[y][cut] = segments
# Assemble the cut renders in to lists of segments
crop_x, crop_y, crop_x2, crop_y2 = crop_region.corners
output_lines = self._assemble_chops(chops[crop_y:crop_y2])
def width_view(line: list[Segment]) -> list[Segment]:
if line:
div_lines = list(divide(line, [crop_x, crop_x2]))
line = div_lines[1] if len(div_lines) > 1 else div_lines[0]
return line
if crop is not None and (crop_x, crop_x2) != (0, self.width):
render_lines = [width_view(line) for line in output_lines]
else:
render_lines = list(output_lines)
return SegmentLines(render_lines, new_lines=True)
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
yield self.render(console)
def update_widget(self, console: Console, widget: Widget) -> LayoutUpdate | None:
if widget not in self.regions:
return None
region, clip = self.regions[widget]
if not region.size:
return None
widget.clear_render_cache()
update_region = region.intersection(clip)
update_lines = self.render(console, crop=update_region).lines
update = LayoutUpdate(update_lines, update_region.x, update_region.y)
return update
|
self.lines = lines
self.x = x
self.y = y
|
test_bipartite_match_op.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
def bipartite_match(distance, match_indices, match_dist):
"""Bipartite Matching algorithm.
Arg:
distance (numpy.array) : The distance of two entries with shape [M, N].
match_indices (numpy.array): the matched indices from column to row
with shape [1, N], it must be initialized to -1.
match_dist (numpy.array): The matched distance from column to row
with shape [1, N], it must be initialized to 0.
"""
match_pair = []
row, col = distance.shape
for i in range(row):
for j in range(col):
match_pair.append((i, j, distance[i][j]))
match_sorted = sorted(match_pair, key=lambda tup: tup[2], reverse=True)
row_indices = -1 * np.ones((row, ), dtype=np.int)
idx = 0
for i, j, dist in match_sorted:
if idx >= row:
break
if match_indices[j] == -1 and row_indices[i] == -1 and dist > 0:
match_indices[j] = i
row_indices[i] = j
match_dist[j] = dist
idx += 1
def argmax_match(distance, match_indices, match_dist, threshold):
r, c = distance.shape
for j in xrange(c):
if match_indices[j] != -1:
continue
col_dist = distance[:, j]
indices = np.argwhere(col_dist >= threshold).flatten()
if len(indices) < 1:
continue
match_indices[j] = indices[np.argmax(col_dist[indices])]
match_dist[j] = col_dist[match_indices[j]]
def batch_bipartite_match(distance, lod, match_type=None, dist_threshold=None):
"""Bipartite Matching algorithm for batch input.
Arg:
distance (numpy.array) : The distance of two entries with shape [M, N].
lod (list of int): The offsets of each input in this batch.
"""
n = len(lod) - 1
m = distance.shape[1]
match_indices = -1 * np.ones((n, m), dtype=np.int)
match_dist = np.zeros((n, m), dtype=np.float32)
for i in range(len(lod) - 1):
bipartite_match(distance[lod[i]:lod[i + 1], :], match_indices[i, :],
match_dist[i, :])
if match_type == 'per_prediction':
argmax_match(distance[lod[i]:lod[i + 1], :], match_indices[i, :],
match_dist[i, :], dist_threshold)
return match_indices, match_dist
class TestBipartiteMatchOpWithLoD(OpTest):
def setUp(self):
self.op_type = 'bipartite_match'
lod = [[0, 5, 11, 23]]
dist = np.random.random((23, 217)).astype('float32')
match_indices, match_dist = batch_bipartite_match(dist, lod[0])
self.inputs = {'DistMat': (dist, lod)}
self.outputs = {
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_dist,
}
def test_check_output(self):
self.check_output()
class TestBipartiteMatchOpWithoutLoD(OpTest):
def setUp(self):
self.op_type = 'bipartite_match'
lod = [[0, 8]]
dist = np.random.random((8, 17)).astype('float32')
match_indices, match_dist = batch_bipartite_match(dist, lod[0])
self.inputs = {'DistMat': dist}
self.outputs = {
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_dist,
}
def test_check_output(self):
self.check_output()
class TestBipartiteMatchOpWithPerPredictionType(OpTest):
def setUp(self):
self.op_type = 'bipartite_match'
lod = [[0, 5, 11, 23]]
dist = np.random.random((23, 237)).astype('float32')
match_indices, match_dist = batch_bipartite_match(dist, lod[0],
'per_prediction', 0.5)
self.inputs = {'DistMat': (dist, lod)}
self.outputs = {
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_dist,
}
self.attrs = {
'match_type': 'per_prediction',
|
self.check_output()
if __name__ == '__main__':
unittest.main()
|
'dist_threshold': 0.5,
}
def test_check_output(self):
|
index.spec.util.ts
|
export function v(date: Date): number {
return date.valueOf();
|
}
|
|
debounce.ts
|
import {AsyncInterface} from "./async-interface";
import {timeOut} from "./time-out";
export class
|
{
constructor(private asyncModule: AsyncInterface = null, private callback: Function = null, private handle: number = null) {
}
/**
* Sets the scheduler; that is, a module with the Async interface,
* a callback and optional arguments to be passed to the run function
* from the async module.
*
* @param {!AsyncInterface} asyncModule Object with Async interface.
* @param {function()} callback Callback to run.
* @return {void}
*/
setConfig(asyncModule: AsyncInterface, callback: Function) {
this.asyncModule = asyncModule;
this.callback = callback;
this.handle = this.asyncModule.run(() => {
this.handle = null;
this.callback();
});
}
/**
* Cancels an active debouncer and returns a reference to itself.
*
* @return {void}
*/
cancel() {
if (this.isActive()) {
this.asyncModule.cancel(this.handle);
this.handle = null;
}
}
/**
* Flushes an active debouncer and returns a reference to itself.
*
* @return {void}
*/
flush() {
if (this.isActive()) {
this.cancel();
this.callback();
}
}
/**
* Returns true if the debouncer is active.
*
* @return {boolean} True if active.
*/
isActive() {
return this.handle != null;
}
/**
* Creates a debouncer if no debouncer is passed as a parameter
* or it cancels an active debouncer otherwise. The following
* example shows how a debouncer can be called multiple times within a
* microtask and "debounced" such that the provided callback function is
* called once. Add this method to a custom element:
*
* ```js
* import {microTask} from '@polymer/polymer/lib/utils/async.js';
* import {Debouncer} from '@polymer/polymer/lib/utils/debounce.js';
* // ...
*
* _debounceWork() {
* this._debounceJob = Debouncer.debounce(this._debounceJob,
* microTask, () => this._doWork());
* }
* ```
*
* If the `_debounceWork` method is called multiple times within the same
* microtask, the `_doWork` function will be called only once at the next
* microtask checkpoint.
*
* Note: In testing it is often convenient to avoid asynchrony. To accomplish
* this with a debouncer, you can use `enqueueDebouncer` and
* `flush`. For example, extend the above example by adding
* `enqueueDebouncer(this._debounceJob)` at the end of the
* `_debounceWork` method. Then in a test, call `flush` to ensure
* the debouncer has completed.
*
* @param {Debouncer?} debouncer Debouncer object.
* @param {!AsyncInterface} asyncModule Object with Async interface
* @param {function()} callback Callback to run.
* @return {!Debouncer} Returns a debouncer object.
*/
static debounce(debouncer: Debouncer, asyncModule: AsyncInterface, callback: Function) {
if (debouncer instanceof Debouncer) {
debouncer.cancel();
} else {
debouncer = new Debouncer();
}
debouncer.setConfig(asyncModule, callback);
return debouncer;
}
}
export const debounce: (delay: number) => MethodDecorator = delay => (target, propertyKey, descriptor) => {
let desc = descriptor as any;
let originalCall: Function = desc.value;
desc.value = (...args: any[]) => this.debouncer = Debouncer.debounce(this.debouncer, timeOut.after(delay), originalCall.apply(args));
};
|
Debouncer
|
CreateCustomDataIdentifierCommand.ts
|
import { getSerdePlugin } from "@aws-sdk/middleware-serde";
import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http";
import { Command as $Command } from "@aws-sdk/smithy-client";
import {
FinalizeHandlerArguments,
Handler,
HandlerExecutionContext,
HttpHandlerOptions as __HttpHandlerOptions,
MetadataBearer as __MetadataBearer,
MiddlewareStack,
SerdeContext as __SerdeContext,
} from "@aws-sdk/types";
import { Macie2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Macie2Client";
import { CreateCustomDataIdentifierRequest, CreateCustomDataIdentifierResponse } from "../models/models_0";
import {
deserializeAws_restJson1CreateCustomDataIdentifierCommand,
serializeAws_restJson1CreateCustomDataIdentifierCommand,
} from "../protocols/Aws_restJson1";
export interface CreateCustomDataIdentifierCommandInput extends CreateCustomDataIdentifierRequest {}
export interface CreateCustomDataIdentifierCommandOutput extends CreateCustomDataIdentifierResponse, __MetadataBearer {}
/**
* <p>Creates and defines the criteria and other settings for a custom data identifier.</p>
* @example
* Use a bare-bones client and the command you need to make an API call.
* ```javascript
* import { Macie2Client, CreateCustomDataIdentifierCommand } from "@aws-sdk/client-macie2"; // ES Modules import
* // const { Macie2Client, CreateCustomDataIdentifierCommand } = require("@aws-sdk/client-macie2"); // CommonJS import
* const client = new Macie2Client(config);
* const command = new CreateCustomDataIdentifierCommand(input);
* const response = await client.send(command);
* ```
*
* @see {@link CreateCustomDataIdentifierCommandInput} for command's `input` shape.
* @see {@link CreateCustomDataIdentifierCommandOutput} for command's `response` shape.
* @see {@link Macie2ClientResolvedConfig | config} for command's `input` shape.
*
*/
export class
|
extends $Command<
CreateCustomDataIdentifierCommandInput,
CreateCustomDataIdentifierCommandOutput,
Macie2ClientResolvedConfig
> {
// Start section: command_properties
// End section: command_properties
constructor(readonly input: CreateCustomDataIdentifierCommandInput) {
// Start section: command_constructor
super();
// End section: command_constructor
}
/**
* @internal
*/
resolveMiddleware(
clientStack: MiddlewareStack<ServiceInputTypes, ServiceOutputTypes>,
configuration: Macie2ClientResolvedConfig,
options?: __HttpHandlerOptions
): Handler<CreateCustomDataIdentifierCommandInput, CreateCustomDataIdentifierCommandOutput> {
this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize));
const stack = clientStack.concat(this.middlewareStack);
const { logger } = configuration;
const clientName = "Macie2Client";
const commandName = "CreateCustomDataIdentifierCommand";
const handlerExecutionContext: HandlerExecutionContext = {
logger,
clientName,
commandName,
inputFilterSensitiveLog: CreateCustomDataIdentifierRequest.filterSensitiveLog,
outputFilterSensitiveLog: CreateCustomDataIdentifierResponse.filterSensitiveLog,
};
const { requestHandler } = configuration;
return stack.resolve(
(request: FinalizeHandlerArguments<any>) =>
requestHandler.handle(request.request as __HttpRequest, options || {}),
handlerExecutionContext
);
}
private serialize(input: CreateCustomDataIdentifierCommandInput, context: __SerdeContext): Promise<__HttpRequest> {
return serializeAws_restJson1CreateCustomDataIdentifierCommand(input, context);
}
private deserialize(
output: __HttpResponse,
context: __SerdeContext
): Promise<CreateCustomDataIdentifierCommandOutput> {
return deserializeAws_restJson1CreateCustomDataIdentifierCommand(output, context);
}
// Start section: command_body_extra
// End section: command_body_extra
}
|
CreateCustomDataIdentifierCommand
|
dynamicbanscore.go
|
// Copyright (c) 2016 The btcsuite developers
// Copyright (c) 2016 The Eacred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package connmgr
import (
"fmt"
"math"
"sync"
"time"
)
const (
// Halflife defines the time (in seconds) by which the transient part
// of the ban score decays to one half of its original value.
Halflife = 60
// lambda is the decaying constant.
lambda = math.Ln2 / Halflife
// Lifetime defines the maximum age of the transient part of the ban
// score to be considered a non-zero score (in seconds).
Lifetime = 1800
// precomputedLen defines the amount of decay factors (one per second) that
// should be precomputed at initialization.
precomputedLen = 64
)
// precomputedFactor stores precomputed exponential decay factors for the first
// 'precomputedLen' seconds starting from t == 0.
var precomputedFactor [precomputedLen]float64
// init precomputes decay factors.
func init()
|
// decayFactor returns the decay factor at t seconds, using precalculated values
// if available, or calculating the factor if needed.
func decayFactor(t int64) float64 {
if t < precomputedLen {
return precomputedFactor[t]
}
return math.Exp(-1.0 * float64(t) * lambda)
}
// DynamicBanScore provides dynamic ban scores consisting of a persistent and a
// decaying component. The persistent score could be utilized to create simple
// additive banning policies similar to those found in other bitcoin node
// implementations.
//
// The decaying score enables the creation of evasive logic which handles
// misbehaving peers (especially application layer DoS attacks) gracefully
// by disconnecting and banning peers attempting various kinds of flooding.
// DynamicBanScore allows these two approaches to be used in tandem.
//
// Zero value: Values of type DynamicBanScore are immediately ready for use upon
// declaration.
type DynamicBanScore struct {
lastUnix int64
transient float64
persistent uint32
mtx sync.Mutex
}
// String returns the ban score as a human-readable string.
func (s *DynamicBanScore) String() string {
s.mtx.Lock()
r := fmt.Sprintf("persistent %v + transient %v at %v = %v as of now",
s.persistent, s.transient, s.lastUnix, s.Int())
s.mtx.Unlock()
return r
}
// Int returns the current ban score, the sum of the persistent and decaying
// scores.
//
// This function is safe for concurrent access.
func (s *DynamicBanScore) Int() uint32 {
s.mtx.Lock()
r := s.int(time.Now())
s.mtx.Unlock()
return r
}
// Increase increases both the persistent and decaying scores by the values
// passed as parameters. The resulting score is returned.
//
// This function is safe for concurrent access.
func (s *DynamicBanScore) Increase(persistent, transient uint32) uint32 {
s.mtx.Lock()
r := s.increase(persistent, transient, time.Now())
s.mtx.Unlock()
return r
}
// Reset set both persistent and decaying scores to zero.
//
// This function is safe for concurrent access.
func (s *DynamicBanScore) Reset() {
s.mtx.Lock()
s.persistent = 0
s.transient = 0
s.lastUnix = 0
s.mtx.Unlock()
}
// int returns the ban score, the sum of the persistent and decaying scores at a
// given point in time.
//
// This function is not safe for concurrent access. It is intended to be used
// internally and during testing.
func (s *DynamicBanScore) int(t time.Time) uint32 {
dt := t.Unix() - s.lastUnix
if s.transient < 1 || dt < 0 || Lifetime < dt {
return s.persistent
}
return s.persistent + uint32(s.transient*decayFactor(dt))
}
// increase increases the persistent, the decaying or both scores by the values
// passed as parameters. The resulting score is calculated as if the action was
// carried out at the point time represented by the third parameter. The
// resulting score is returned.
//
// This function is not safe for concurrent access.
func (s *DynamicBanScore) increase(persistent, transient uint32, t time.Time) uint32 {
s.persistent += persistent
tu := t.Unix()
dt := tu - s.lastUnix
if transient > 0 {
if Lifetime < dt {
s.transient = 0
} else if s.transient > 1 && dt > 0 {
s.transient *= decayFactor(dt)
}
s.transient += float64(transient)
s.lastUnix = tu
}
return s.persistent + uint32(s.transient)
}
|
{
for i := range precomputedFactor {
precomputedFactor[i] = math.Exp(-1.0 * float64(i) * lambda)
}
}
|
deleter.go
|
package fs
import (
"context"
"github.com/gomods/athens/pkg/errors"
opentracing "github.com/opentracing/opentracing-go"
)
// Delete removes a specific version of a module.
func (v *storageImpl) Delete(ctx context.Context, module, version string) error {
const op errors.Op = "fs.Delete"
sp, ctx := opentracing.StartSpanFromContext(ctx, "storage.fs.Delete")
defer sp.Finish()
versionedPath := v.versionLocation(module, version)
exists, err := v.Exists(ctx, module, version)
if err != nil
|
if !exists {
return errors.E(op, errors.M(module), errors.V(version), errors.KindNotFound)
}
return v.filesystem.RemoveAll(versionedPath)
}
|
{
return errors.E(op, err, errors.M(module), errors.V(version))
}
|
index.ts
|
import dotenv from "dotenv";
dotenv.config();
import { CommandClient, ShardClient } from "detritus-client";
import { ChannelBase } from "detritus-client/lib/structures";
const packagejson = require("../package.json");
const { TOKEN, PREFIX, VERSION } = {
TOKEN: process.env.TOKEN!,
PREFIX: process.env.PREFIX!,
VERSION: <string>packagejson.version!,
};
export const client = new ShardClient(TOKEN);
const commandClient = new CommandClient(client, {
prefix: PREFIX,
ignoreMe: true,
useClusterClient: false,
});
client.on("messageCreate", async (payload) => {
let author = payload.message.author;
if (author.isWebhook || author.isMe || author.isSystem) return;
if (
payload.message.content.startsWith(commandClient.prefixes.custom.first()!)
)
return;
if (Math.random() > 1 / 300) return;
let messages: string[] = [
"*Allegedly..*",
":thinking: *Hmmmm..*",
"~~Emily~~ Cobalt is so hot! :heart_eyes: *don't tell my husband tho..* :eyes:",
"I had so much fun last night titanicguy! :heart::eggplant::sweat_drops:",
'*"hi"* - drewfc',
"*Shivers..*",
];
let seed = Math.floor(Math.random() * messages.length);
await payload.message.reply(messages[seed]);
});
client.on("messageReactionAdd", async (payload) => {
if (payload.reaction.emoji.name !== "⭐") return;
//if (payload.reaction.count !== 10) return;
const starboardChannel: ChannelBase | undefined =
client.channels.get("773611028075380776") ||
(await client.rest.fetchChannel("773611028075380776"));
console.log(payload);
if (starboardChannel.canMessage)
|
embed: {
color: 0xbdaf4d,
author: {
iconUrl: payload.message?.author.avatarUrl,
name: payload.message?.author?.username,
},
fields: [{ name: "Source", value: payload.message?.jumpLink! }],
description: payload.message?.content || "content",
},
content: `💫 <#${payload.channelId}>`,
});
});
(async () => {
await client.run();
client.gateway.setPresence({
activity: {
name: `Bhop Bot v${VERSION.slice(0, 1)} | ${PREFIX}help`,
type: 1,
url: "https://twitch.tv/insyri",
},
});
await commandClient
.addMultipleIn("./commands", { subdirectories: true })
.then(async (c) => {
console.log(`Loaded ${c.commands.length.toString()} Commands`);
})
.catch((err) => console.error(err));
await commandClient.run();
console.log(`Bot Online\nRunning on node ${process.version}`);
})();
|
client.rest.createMessage(starboardChannel!.id, {
|
mac.py
|
#coding: utf-8
import subprocess, string, ast
def get(host):
file = open("%s.txt"%host, "r+")
macs = file.read()
macs = ast.literal_eval(macs)
return macs
def
|
(host):
macs = []
command1 = "ssh user@%s 'ls /sys/class/net'" %host
try:
list_intefaces = subprocess.check_output(command1, shell=True)
list_intefaces = string.split(list_intefaces)
for interface in list_intefaces:
command = "ssh user@%s 'cat /sys/class/net/%s/address'" %(host, interface) # command to return mac address
mac = subprocess.check_output(command, shell=True) # Receives the output of the above command
macs.append(mac.rstrip())
except subprocess.CalledProcessError:
print 'Não foi possível obter o MAC de %s'%host
file = open("%s.txt"%host, "w+")
file.write(str(macs))
file.close()
print '%s %s'%(host, macs)
|
set
|
configure.go
|
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// configureCmd represents the configure command
var configureCmd = &cobra.Command{
Use: "configure",
Short: "Configure saves the provided global flags to the Config File",
Long: `Configure saves the provided global flags to the Config File.
this makes using the cli easier as they don't have to be specified all the time.`,
RunE: func(cmd *cobra.Command, args []string) error {
if viper.ConfigFileUsed() == "" {
err := viper.SafeWriteConfig()
|
}
} else {
err := viper.WriteConfig()
if err != nil {
return fmt.Errorf("Writing Config: %w", err)
}
}
if viper.GetBool("debug") {
fmt.Printf("Saved: %+v\n", viper.AllSettings())
}
return nil
},
}
func init() {
rootCmd.AddCommand(configureCmd)
}
|
if err != nil {
return fmt.Errorf("Writing Config: %w", err)
|
modelio.py
|
"""
Contains I/O functions
"""
import numpy
import theano
import logging
import sys
import tempfile
from collections import defaultdict
def build_character_dictionary(path, chars = {}):
with open(path, 'r') as fin:
lineno = 0
for line in fin:
lineno += 1
line = line.strip()
if len(line) == 0:
continue
try:
word, _ = line.split('\t')
for c in word:
if c not in chars:
chars[c] = len(chars) + 1
except ValueError as ex:
print ex, lineno, line
return chars
def build_word_dictionary(path, words = {}):
with open(path, 'r') as fin:
for line in fin:
line = line.strip()
if len(line) == 0:
continue
word, _ = line.split('\t')
if word not in words:
words[word] = len(words) + 1
return words
def build_tag_dictionary(path, tags={}):
with open(path, 'r') as fin:
for line in fin:
line = line.strip()
if len(line) == 0:
continue
_, tag = line.split('\t')
if tag not in tags:
tags[tag] = len(tags) + 1
return tags
def get_tweet_words(path):
t = defaultdict(list)
c = 0
with open(path, 'r') as fin:
for line in fin:
line = line.strip()
if len(line) == 0:
c += 1
continue
word, pos = line.split('\t')
word = word.decode('utf8')
t[c].append(word)
return t
def get_max_word_count(path):
t = get_tweet_words(path)
m = [len(t[c]) for c in t]
m = int(numpy.percentile(m, 99))
#m = int(numpy.median([len(t[c]) for c in t]))
logging.debug("get_max_word_count('%s') = %d", path, m)
return m
def
|
(path):
t = get_tweet_words(path)
m = 0
d = []
for c in t:
for w in t[c]:
d.append(len(w))
if len(w) >= m:
m = len(w)
logging.debug('length: %s, %d', w, m)
m = numpy.percentile(d, 99)
logging.debug("get_max_word_length('%s') = %d", path, m)
return m
def get_max_length(path):
t = get_tweet_words(path)
t = {c: u"".join(t[c]) for c in t}
m = max([len(t[c]) for c in t])
logging.debug('get_max_length(%s) = %d', path, m)
return m
def load_pos_tagged_data(path, chardict = {}, worddict={}, posdict={}, overlap=15, allow_append=True):
if allow_append:
build_character_dictionary(path, chardict)
build_word_dictionary(path, worddict)
build_tag_dictionary(path, posdict)
cur_chars, cur_words, cur_labels = [], [], []
words, chars, labels = [], [], []
with open(path, 'r') as fin:
for line in fin:
line = line.strip()
if len(line) == 0:
chars.append(cur_chars[:-1])
words.append(cur_words[:-1])
labels.append(cur_labels)
cur_chars = []
cur_labels = []
cur_words = []
continue
word, pos = line.split('\t')
if word not in worddict and allow_append:
worddict[word] = len(worddict)+1
for c in word:
if c not in chardict and allow_append:
chardict[c] = len(chardict)+1
if c in chardict:
cur_chars.append(chardict[c])
else:
cur_chars.append(0)
if word in worddict:
cur_words.append(worddict[word])
else:
cur_words.append(0)
if pos not in posdict and allow_append:
posdict[pos] = len(posdict)+1
if pos in posdict:
cur_labels.append(posdict[pos])
else:
cur_labels.append(0)
if word in worddict:
cur_words.append(worddict[word])
else:
cur_words.append(0)
cur_chars.append(0)
if len(cur_chars) > 0:
chars.append(cur_chars)
words.append(cur_words)
labels.append(cur_labels)
return chars, words, labels
def string_to_unprepared_format(text, chardict, worddict):
with open('sample.conll', 'wb') as fp:
for word in text.split():
#if word not in worddict:
# raise Exception((word, "not in dictionary"))
line = '%s\t?\n' % (word,)
fp.write(line)
# print >> fp, "%s\t?" % (word,)
chars, words, labels = load_pos_tagged_data("sample.conll", chardict, worddict, {'?': 0}, False)
return [], chars, words, labels
def prepare_data(char_seqs, labels, maxw, maxwlen, dim_proj):
"""
Create the matrices from the datasets.
This pad each sequence to the same length: the length of the
longest sequence or maxlen.
if maxlen is set, we will cut all sequences to this maximum
length
This swap the axis!
"""
# x: a list of sentences
n_samples = len(char_seqs)
x_c = numpy.zeros((maxw, maxwlen, n_samples)).astype('int8')
x_mask = numpy.zeros((maxw, maxwlen, n_samples, dim_proj)).astype(theano.config.floatX)
y = numpy.zeros((maxw, n_samples)).astype('int8')
y_mask = numpy.zeros((maxw, n_samples)).astype('int8')
for idx, (s_c, l) in enumerate(zip(char_seqs, labels)):
# idx is the current position in the mini-batch
# s_c is a list of characters
# s_w is a list of words
# l is a list of labels
c = 0
p = 0
warning = None
for j, a in enumerate(s_c):
# j is the current character in this tweet
# idx is the current tweet in this minibatch
# c is the current word (can be up to 16)
# p is the current character in this word
if a == 0:
# This current character is a space
# Increase the word count and continue
c += 1
p = 0
j += 1 # Temporarily skip to next loop char
if c >= maxw:
if j != len(s_c):
warning = "truncation: too many words in this tweet! {}-{}".format(j, len(s_c))
break
if c >= len(l):
if j != len(s_c):
warning = "truncation: too many words for these labels {}-{}".format(j, len(s_c))
break
if p >= x_c.shape[1]:
warning = "truncation: too many characters for this maxwlen"
else:
x_c[c, p, idx] = a
x_mask[c, p, idx] = numpy.ones(dim_proj)
y[c, idx] = l[c]
y_mask[c, idx] = 1
p += 1
if warning is not None:
#logging.warning("%s", warning)
pass
return x_c, x_mask, y, y_mask
|
get_max_word_length
|
files.py
|
# -*- coding: utf-8 -*-
u"""ファイル操作関連"""
from __future__ import absolute_import, division, print_function
from squid.vendor.Qt import QtGui
import os
_SIZE_SUFFIXES = ["B", "KB", "MB", "GB", "TB", "PB"]
_FILE_PROTOCOL = "file:///"
def convert_readable
|
""HumanReadableなファイルサイズを返す
Args:
nbytes (float): ファイルサイズ
Returns:
str: HumanReadableなファイルサイズ
"""
i = 0
while nbytes >= 1024 and i < len(_SIZE_SUFFIXES) - 1:
nbytes /= 1024.
i += 1
f = ("%.2f" % nbytes).rstrip("0").rstrip(".")
return "{0}{1}".format(f, _SIZE_SUFFIXES[i])
def reveal_in_finder(path):
u"""Finderで指定パスを開く
Args:
path (unicode): パス
"""
QtGui.QDesktopServices.openUrl(_FILE_PROTOCOL + path)
def read_text(path):
u"""指定されたファイルを読み込んで返す
Args:
path (unicode): パス
Returns:
unicode: 読込結果
"""
res = ""
if not os.path.isfile(path):
return res
with open(path, "r") as f:
res = f.read()
return res
|
_file_size(nbytes):
u"
|
lib.rs
|
use git2::{Config, Repository, RepositoryInitOptions};
use heck::KebabCase;
use include_dir::{include_dir, include_dir_impl, Dir};
use std::env;
use std::path::{Path, PathBuf};
use tera::{Context, Tera};
mod error;
mod template;
pub use self::error::Error;
use self::template::{Template, TemplateKind};
static VIZ_EXT_DIR: Dir = include_dir!("./templates/visualisation");
pub fn gen_viz(project_name: &str, git: bool) -> Result<(), error::Error>
|
fn create_project_path(project_name: &str) -> Result<PathBuf, Error> {
let project_dir_name = project_name.to_kebab_case();
let project_directory = env::current_dir()
.unwrap_or_else(|_e| ".".into())
.join(&project_dir_name);
if project_directory.exists() {
Err(Error::new(format!(
"Directory {} already exists",
project_dir_name
)))?
}
Ok(project_directory)
}
fn create_context(project_dir_name: &str) -> Context {
let mut context = Context::new();
context.insert("project_name", &project_dir_name);
if let Some(user) = get_git_author() {
context.insert("author", &user.user_name);
if let Some(email) = user.email {
context.insert("email", &email);
} else {
context.insert("email", "[email protected]");
}
} else {
context.insert("author", "author");
context.insert("email", "[email protected]");
};
context
}
struct GitUser {
user_name: String,
email: Option<String>,
}
fn get_git_author() -> Option<GitUser> {
let config = Config::open_default().ok()?;
let user = config.get_string("user.name").ok()?;
if let Ok(email) = config.get_string("user.email") {
Some(GitUser {
user_name: user,
email: Some(email),
})
} else {
Some(GitUser {
user_name: user,
email: None,
})
}
}
pub fn init_git_repo(path: &PathBuf) -> Result<Repository, error::Error> {
let repo = Repository::init_opts(path, RepositoryInitOptions::new().bare(false))?;
Ok(repo)
}
|
{
let project_dir_name = project_name.to_kebab_case();
let project_directory = create_project_path(&project_dir_name)?;
if git {
init_git_repo(&project_directory)?;
} else {
std::fs::create_dir(&project_directory)?;
}
let template = Template::new(VIZ_EXT_DIR);
let context = create_context(&project_dir_name);
for thing in template.iter() {
match thing {
TemplateKind::Dir(dir) => {
let dir_name = dir.path();
let path = Tera::one_off(dir_name.to_str().unwrap(), &context, true)?;
let dir_name = Path::new(&path);
std::fs::create_dir(project_directory.join(&dir_name))?;
}
TemplateKind::File(file) => {
let file_name = file.path();
let path = Tera::one_off(file_name.to_str().unwrap(), &context, true)?;
let contents = Tera::one_off(
std::str::from_utf8(file.contents()).unwrap(),
&context,
true,
)
.unwrap();
let file_name = Path::new(&path);
std::fs::write(project_directory.join(&file_name), contents)?;
}
};
}
Ok(())
}
|
test_DMachineSetup.py
|
# -*- coding: utf-8 -*-
"""
@date Created on Thu May 18 14:35:34 2017
@copyright (C) 2015-2016 EOMYS ENGINEERING.
@author: pierre_b
"""
from os.path import join, isfile
from os import remove
import sys
from unittest import TestCase
from ddt import ddt, data
import mock # for unittest of raw_input
from PyQt5 import QtWidgets
from pyleecan.Classes.MachineSyRM import MachineSyRM
from pyleecan.Classes.MachineIPMSM import MachineIPMSM
from pyleecan.Classes.MachineDFIM import MachineDFIM
from pyleecan.Classes.MachineSCIM import MachineSCIM
from pyleecan.Classes.MachineSIPMSM import MachineSIPMSM
from pyleecan.Classes.MachineWRSM import MachineWRSM
from pyleecan.Classes.MachineSRM import MachineSRM
from pyleecan.GUI.Dialog.DMachineSetup.DMachineSetup import DMachineSetup
from pyleecan.Tests import save_gui_path as save_path
from pyleecan.GUI.Dialog.DMachineSetup.SMachineType.SMachineType import SMachineType
from pyleecan.GUI.Dialog.DMachineSetup.SMagnet.SMagnet import SMagnet
from pyleecan.GUI.Dialog.DMachineSetup.SWindParam.SWindParam import SWindParam
from pyleecan.GUI.Dialog.DMachineSetup.SWindCond.SWindCond import SWindCond
from pyleecan.GUI.Dialog.DMachineSetup.SBar.SBar import SBar
from pyleecan.GUI.Dialog.DMachineSetup.SWSlot.SWSlot import SWSlot
from pyleecan.GUI.Dialog.DMachineSetup.SMHoleMag.SMHoleMag import SMHoleMag
import matplotlib.pyplot as plt
from pyleecan.Tests import DATA_DIR
load_test = list()
load_test.append( # 1
{"type": "SCIM", "index": 0, "name": "SCIM_001", "p": 1, "count": 10}
)
load_test.append( # 2
{"type": "DFIM", "index": 1, "name": "DFIM_001", "p": 2, "count": 12}
)
load_test.append( # 3
{"type": "SyRM", "index": 2, "name": "SynRM_001", "p": 2, "count": 9}
)
load_test.append( # 4
{"type": "SPMSM", "index": 3, "name": "SPMSM_001", "p": 4, "count": 9}
)
load_test.append( # 5
{"type": "SIPMSM", "index": 4, "name": "SIPMSM_008", "p": 4, "count": 9}
)
load_test.append( # 6
{"type": "IPMSM", "index": 5, "name": "machine_IPMSM_A", "p": 5, "count": 9}
)
load_test.append( # 7
{"type": "WRSM", "index": 6, "name": "WRSM_001", "p": 6, "count": 12}
)
load_test.append( # 8
{"type": "SRM", "index": 7, "name": "SRM_test_load", "p": 10, "count": 9}
)
from PyQt5.QtCore import Qt
ENABLE_ITEM = Qt.ItemIsSelectable | Qt.ItemIsEnabled
@ddt
class test_DMachineSetup(TestCase):
"""Test that the widget DMachineSetup behave like it should"""
def setUp(self):
"""Run at the begining of every test to setup the gui"""
self.widget = DMachineSetup(matlib_path="./MaterialData")
@classmethod
def setUpClass(cls):
"""Start the app for the test"""
print("\nStart Test DMachineSetup")
cls.app = QtWidgets.QApplication(sys.argv)
@classmethod
def tearDownClass(cls):
"""Exit the app after the test"""
cls.app.quit()
@data(*load_test)
def test_load(self, test_dict):
"""Check that you can load a machine
"""
return_value = (
join(join(DATA_DIR, "Load_GUI"), test_dict["name"] + ".json"),
"Json (*.json)",
)
with mock.patch(
"PyQt5.QtWidgets.QFileDialog.getOpenFileName", return_value=return_value
):
# To trigger the slot
self.widget.b_load.clicked.emit(True)
# To remember to update when adding a new machine type
self.assertEqual(self.widget.w_step.c_type.count(), 8)
# Check load MachineType
self.assertEqual(type(self.widget.w_step), SMachineType)
self.assertEqual(self.widget.w_step.c_type.currentIndex(), test_dict["index"])
self.assertEqual(self.widget.w_step.c_type.currentText(), test_dict["type"])
self.assertEqual(self.widget.w_step.si_p.value(), test_dict["p"])
self.assertEqual(self.widget.w_step.le_name.text(), test_dict["name"])
# Check that the nav_step is correct
self.assertEqual(self.widget.nav_step.count(), test_dict["count"])
def test_set_save_machine_type(self):
"""Check that the Widget allow to change the machine type and save
"""
# Check that all the machine type are available
self.assertEqual(self.widget.w_step.c_type.count(), 8)
# DFIM
self.widget.w_step.c_type.setCurrentIndex(1)
self.assertEqual(self.widget.w_step.c_type.currentText(), "DFIM")
self.assertEqual(type(self.widget.machine), MachineDFIM)
save_function(self, self.widget, "test_dfim_save")
# SyRM
self.widget.w_step.c_type.setCurrentIndex(2)
self.assertEqual(self.widget.w_step.c_type.currentText(), "SyRM")
self.assertEqual(type(self.widget.machine), MachineSyRM)
save_function(self, self.widget, "test_syrm_save")
# SPMSM
self.widget.w_step.c_type.setCurrentIndex(3)
self.assertEqual(self.widget.w_step.c_type.currentText(), "SPMSM")
self.assertEqual(type(self.widget.machine), MachineSIPMSM)
save_function(self, self.widget, "test_spmsm_save")
# SIPMSM
self.widget.w_step.c_type.setCurrentIndex(4)
self.assertEqual(self.widget.w_step.c_type.currentText(), "SIPMSM")
self.assertEqual(type(self.widget.machine), MachineSIPMSM)
save_function(self, self.widget, "test_sipmsm_save")
# IPMSM
self.widget.w_step.c_type.setCurrentIndex(5)
self.assertEqual(self.widget.w_step.c_type.currentText(), "IPMSM")
self.assertEqual(type(self.widget.machine), MachineIPMSM)
save_function(self, self.widget, "test_ipmsm_save")
# WRSM
self.widget.w_step.c_type.setCurrentIndex(6)
self.assertEqual(self.widget.w_step.c_type.currentText(), "WRSM")
self.assertEqual(type(self.widget.machine), MachineWRSM)
save_function(self, self.widget, "test_wrsm_save")
# SRM
self.widget.w_step.c_type.setCurrentIndex(7)
self.assertEqual(self.widget.w_step.c_type.currentText(), "SRM")
self.assertEqual(type(self.widget.machine), MachineSRM)
save_function(self, self.widget, "test_srm_save")
# SCIM
self.widget.w_step.c_type.setCurrentIndex(0)
self.assertEqual(self.widget.w_step.c_type.currentText(), "SCIM")
self.assertEqual(type(self.widget.machine), MachineSCIM)
def save_function(self, widget, file_name):
"""Function to save a machine from the GUI
"""
file_path = join(save_path, file_name + ".json")
# Check that the file didn't already exist
if isfile(file_path):
remove(file_path)
self.assertFalse(isfile(file_path))
return_value = (file_path, "Json (*.json)")
with mock.patch(
|
# To trigger the slot
widget.b_save.clicked.emit(True)
# Check that the file now exist => delete for next test
self.assertTrue(isfile(file_path))
remove(file_path)
# Check that the GUI have been updated
self.assertEqual(type(widget.w_step), SMachineType)
self.assertEqual(widget.w_step.le_name.text(), file_name)
|
"PyQt5.QtWidgets.QFileDialog.getSaveFileName", return_value=return_value
):
|
b339297726b01d858501.worker.js
|
!function(e){var t=this.webpackHotUpdate;this.webpackHotUpdate=function(e,r){!function(e,t){if(!x[e]||!b[e])return;for(var r in b[e]=!1,t)Object.prototype.hasOwnProperty.call(t,r)&&(d[r]=t[r]);0==--g&&0===v&&T()}(e,r),t&&t(e,r)};var r,n=!0,o="b339297726b01d858501",i=1e4,a={},s=[],c=[];function u(e){var t=E[e];if(!t)return C;var n=function(n){return t.hot.active?(E[n]?-1===E[n].parents.indexOf(e)&&E[n].parents.push(e):(s=[e],r=n),-1===t.children.indexOf(n)&&t.children.push(n)):(console.warn("[HMR] unexpected require("+n+") from disposed module "+e),s=[]),C(n)},o=function(e){return{configurable:!0,enumerable:!0,get:function(){return C[e]},set:function(t){C[e]=t}}};for(var i in C)Object.prototype.hasOwnProperty.call(C,i)&&"e"!==i&&"t"!==i&&Object.defineProperty(n,i,o(i));return n.e=function(e){return"ready"===l&&h("prepare"),v++,C.e(e).then(t,function(e){throw t(),e});function t(){v--,"prepare"===l&&(y[e]||k(e),0===v&&0===g&&T())}},n.t=function(e,t){return 1&t&&(e=n(e)),C.t(e,-2&t)},n}var f=[],l="idle";function h(e){l=e;for(var t=0;t<f.length;t++)f[t].call(null,e)}var p,d,m,g=0,v=0,y={},b={},x={};function S(e){return+e+""===e?+e:e}function O(e){if("idle"!==l)throw new Error("check() is only allowed in idle status");return n=e,h("check"),(t=i,t=t||1e4,new Promise(function(e,r){if("undefined"==typeof XMLHttpRequest)return r(new Error("No browser support"));try{var n=new XMLHttpRequest,i=C.p+""+o+".hot-update.json";n.open("GET",i,!0),n.timeout=t,n.send(null)}catch(e){return r(e)}n.onreadystatechange=function(){if(4===n.readyState)if(0===n.status)r(new Error("Manifest request to "+i+" timed out."));else if(404===n.status)e();else if(200!==n.status&&304!==n.status)r(new Error("Manifest request to "+i+" failed."));else{try{var t=JSON.parse(n.responseText)}catch(e){return void r(e)}e(t)}}})).then(function(e){if(!e)return h("idle"),null;b={},y={},x=e.c,m=e.h,h("prepare");var t=new Promise(function(e,t){p={resolve:e,reject:t}});d={};return k(0),"prepare"===l&&0===v&&0===g&&T(),t});var t}function k(e){x[e]?(b[e]=!0,g++,function(e){importScripts(C.p+""+e+"."+o+".hot-update.js")}(e)):y[e]=!0}function T(){h("ready");var e=p;if(p=null,e)if(n)Promise.resolve().then(function(){return A(n)}).then(function(t){e.resolve(t)},function(t){e.reject(t)});else{var t=[];for(var r in d)Object.prototype.hasOwnProperty.call(d,r)&&t.push(S(r));e.resolve(t)}}function A(t){if("ready"!==l)throw new Error("apply() is only allowed in ready status");var r,n,i,c,u;function f(e){for(var t=[e],r={},n=t.slice().map(function(e){return{chain:[e],id:e}});n.length>0;){var o=n.pop(),i=o.id,a=o.chain;if((c=E[i])&&!c.hot._selfAccepted){if(c.hot._selfDeclined)return{type:"self-declined",chain:a,moduleId:i};if(c.hot._main)return{type:"unaccepted",chain:a,moduleId:i};for(var s=0;s<c.parents.length;s++){var u=c.parents[s],f=E[u];if(f){if(f.hot._declinedDependencies[i])return{type:"declined",chain:a.concat([u]),moduleId:i,parentId:u};-1===t.indexOf(u)&&(f.hot._acceptedDependencies[i]?(r[u]||(r[u]=[]),p(r[u],[i])):(delete r[u],t.push(u),n.push({chain:a.concat([u]),id:u})))}}}}return{type:"accepted",moduleId:e,outdatedModules:t,outdatedDependencies:r}}function p(e,t){for(var r=0;r<t.length;r++){var n=t[r];-1===e.indexOf(n)&&e.push(n)}}t=t||{};var g={},v=[],y={},b=function(){console.warn("[HMR] unexpected require("+k.moduleId+") to disposed module")};for(var O in d)if(Object.prototype.hasOwnProperty.call(d,O)){var k;u=S(O);var T=!1,A=!1,w=!1,_="";switch((k=d[O]?f(u):{type:"disposed",moduleId:O}).chain&&(_="\nUpdate propagation: "+k.chain.join(" -> ")),k.type){case"self-declined":t.onDeclined&&t.onDeclined(k),t.ignoreDeclined||(T=new Error("Aborted because of self decline: "+k.moduleId+_));break;case"declined":t.onDeclined&&t.onDeclined(k),t.ignoreDeclined||(T=new Error("Aborted because of declined dependency: "+k.moduleId+" in "+k.parentId+_));break;case"unaccepted":t.onUnaccepted&&t.onUnaccepted(k),t.ignoreUnaccepted||(T=new Error("Aborted because "+u+" is not accepted"+_));break;case"accepted":t.onAccepted&&t.onAccepted(k),A=!0;break;case"disposed":t.onDisposed&&t.onDisposed(k),w=!0;break;default:throw new Error("Unexception type "+k.type)}if(T)return h("abort"),Promise.reject(T);if(A)for(u in y[u]=d[u],p(v,k.outdatedModules),k.outdatedDependencies)Object.prototype.hasOwnProperty.call(k.outdatedDependencies,u)&&(g[u]||(g[u]=[]),p(g[u],k.outdatedDependencies[u]));w&&(p(v,[k.moduleId]),y[u]=b)}var I,P=[];for(n=0;n<v.length;n++)u=v[n],E[u]&&E[u].hot._selfAccepted&&P.push({module:u,errorHandler:E[u].hot._selfAccepted});h("dispose"),Object.keys(x).forEach(function(e){!1===x[e]&&function(e){delete installedChunks[e]}(e)});for(var j,V,M=v.slice();M.length>0;)if(u=M.pop(),c=E[u]){var N={},F=c.hot._disposeHandlers;for(i=0;i<F.length;i++)(r=F[i])(N);for(a[u]=N,c.hot.active=!1,delete E[u],delete g[u],i=0;i<c.children.length;i++){var D=E[c.children[i]];D&&((I=D.parents.indexOf(u))>=0&&D.parents.splice(I,1))}}for(u in g)if(Object.prototype.hasOwnProperty.call(g,u)&&(c=E[u]))for(V=g[u],i=0;i<V.length;i++)j=V[i],(I=c.children.indexOf(j))>=0&&c.children.splice(I,1);for(u in h("apply"),o=m,y)Object.prototype.hasOwnProperty.call(y,u)&&(e[u]=y[u]);var W=null;for(u in g)if(Object.prototype.hasOwnProperty.call(g,u)&&(c=E[u])){V=g[u];var R=[];for(n=0;n<V.length;n++)if(j=V[n],r=c.hot._acceptedDependencies[j]){if(-1!==R.indexOf(r))continue;R.push(r)}for(n=0;n<R.length;n++){r=R[n];try{r(V)}catch(e){t.onErrored&&t.onErrored({type:"accept-errored",moduleId:u,dependencyId:V[n],error:e}),t.ignoreErrored||W||(W=e)}}}for(n=0;n<P.length;n++){var $=P[n];u=$.module,s=[u];try{C(u)}catch(e){if("function"==typeof $.errorHandler)try{$.errorHandler(e)}catch(r){t.onErrored&&t.onErrored({type:"self-accept-error-handler-errored",moduleId:u,error:r,originalError:e}),t.ignoreErrored||W||(W=r),W||(W=e)}else t.onErrored&&t.onErrored({type:"self-accept-errored",moduleId:u,error:e}),t.ignoreErrored||W||(W=e)}}return W?(h("fail"),Promise.reject(W)):(h("idle"),new Promise(function(e){e(v)}))}var E={};function C(t){if(E[t])return E[t].exports;var n=E[t]={i:t,l:!1,exports:{},hot:function(e){var t={_acceptedDependencies:{},_declinedDependencies:{},_selfAccepted:!1,_selfDeclined:!1,_disposeHandlers:[],_main:r!==e,active:!0,accept:function(e,r){if(void 0===e)t._selfAccepted=!0;else if("function"==typeof e)t._selfAccepted=e;else if("object"==typeof e)for(var n=0;n<e.length;n++)t._acceptedDependencies[e[n]]=r||function(){};else t._acceptedDependencies[e]=r||function(){}},decline:function(e){if(void 0===e)t._selfDeclined=!0;else if("object"==typeof e)for(var r=0;r<e.length;r++)t._declinedDependencies[e[r]]=!0;else t._declinedDependencies[e]=!0},dispose:function(e){t._disposeHandlers.push(e)},addDisposeHandler:function(e){t._disposeHandlers.push(e)},removeDisposeHandler:function(e){var r=t._disposeHandlers.indexOf(e);r>=0&&t._disposeHandlers.splice(r,1)},check:O,apply:A,status:function(e){if(!e)return l;f.push(e)},addStatusHandler:function(e){f.push(e)},removeStatusHandler:function(e){var t=f.indexOf(e);t>=0&&f.splice(t,1)},data:a[e]};return r=void 0,t}(t),parents:(c=s,s=[],c),children:[]};return e[t].call(n.exports,n,n.exports,u(t)),n.l=!0,n.exports}C.m=e,C.c=E,C.d=function(e,t,r){C.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},C.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},C.t=function(e,t){if(1&t&&(e=C(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(C.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var n in e)C.d(r,n,function(t){return e[t]}.bind(null,n));return r},C.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return C.d(t,"a",t),t},C.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},C.p="",C.h=function(){return o},u(3)(C.s=3)}([function(e,t,r){"use strict";(function(e){function r(e){return"%"+e.charCodeAt(0).toString(16).toUpperCase()}function n(e){return encodeURIComponent(e).replace(/[!'()*]/g,r)}function o(e){return e.replace(/[#?]/,r)}var i,a=function(){function e(){this._scheme=e._empty,this._authority=e._empty,this._path=e._empty,this._query=e._empty,this._fragment=e._empty,this._formatted=null,this._fsPath=null}return e.isUri=function(t){return t instanceof e||!!t&&("string"==typeof t.authority&&"string"==typeof t.fragment&&"string"==typeof t.path&&"string"==typeof t.query&&"string"==typeof t.scheme)},Object.defineProperty(e.prototype,"scheme",{get:function(){return this._scheme},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"authority",{get:function(){return this._authority},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"path",{get:function(){return this._path},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"query",{get:function(){return this._query},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"fragment",{get:function(){return this._fragment},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"fsPath",{get:function(){var t;this._fsPath||(t=this._authority&&this._path&&"file"===this.scheme?"//"+this._authority+this._path:e._driveLetterPath.test(this._path)?this._path[1].toLowerCase()+this._path.substr(2):this._path,i&&(t=t.replace(/\//g,"\\")),this._fsPath=t);return this._fsPath},enumerable:!0,configurable:!0}),e.prototype.with=function(t){if(!t)return this;var r=t.scheme,n=t.authority,o=t.path,i=t.query,a=t.fragment;if(void 0===r?r=this.scheme:null===r&&(r=""),void 0===n?n=this.authority:null===n&&(n=""),void 0===o?o=this.path:null===o&&(o=""),void 0===i?i=this.query:null===i&&(i=""),void 0===a?a=this.fragment:null===a&&(a=""),r===this.scheme&&n===this.authority&&o===this.path&&i===this.query&&a===this.fragment)return this;var s=new e;return s._scheme=r,s._authority=n,s._path=o,s._query=i,s._fragment=a,e._validate(s),s},e.parse=function(t){var r=new e,n=e._parseComponents(t);return r._scheme=n.scheme,r._authority=decodeURIComponent(n.authority),r._path=decodeURIComponent(n.path),r._query=decodeURIComponent(n.query),r._fragment=decodeURIComponent(n.fragment),e._validate(r),r},e.file=function(t){var r=new e;if(r._scheme="file",i&&(t=t.replace(/\\/g,e._slash)),t[0]===e._slash&&t[0]===t[1]){var n=t.indexOf(e._slash,2);-1===n?r._authority=t.substring(2):(r._authority=t.substring(2,n),r._path=t.substring(n))}else r._path=t;return r._path[0]!==e._slash&&(r._path=e._slash+r._path),e._validate(r),r},e._parseComponents=function(t){var r={scheme:e._empty,authority:e._empty,path:e._empty,query:e._empty,fragment:e._empty},n=e._regexp.exec(t);return n&&(r.scheme=n[2]||r.scheme,r.authority=n[4]||r.authority,r.path=n[5]||r.path,r.query=n[7]||r.query,r.fragment=n[9]||r.fragment),r},e.from=function(t){return(new e).with(t)},e._validate=function(t){if(t.scheme&&!e._schemePattern.test(t.scheme))throw new Error("[UriError]: Scheme contains illegal characters.");if(t.path)if(t.authority){if(!e._singleSlashStart.test(t.path))throw new Error('[UriError]: If a URI contains an authority component, then the path component must either be empty or begin with a slash ("/") character')}else if(e._doubleSlashStart.test(t.path))throw new Error('[UriError]: If a URI does not contain an authority component, then the path cannot begin with two slash characters ("//")')},e.prototype.toString=function(t){return void 0===t&&(t=!1),t?e._asFormatted(this,!0):(this._formatted||(this._formatted=e._asFormatted(this,!1)),this._formatted)},e._asFormatted=function(t,r){var i=r?o:n,a=[],s=t.scheme,c=t.authority,u=t.path,f=t.query,l=t.fragment;(s&&a.push(s,":"),(c||"file"===s)&&a.push("//"),c)&&(-1===(d=(c=c.toLowerCase()).indexOf(":"))?a.push(i(c)):a.push(i(c.substr(0,d)),c.substr(d)));if(u){var h=e._upperCaseDrive.exec(u);h&&(u=h[1]?"/"+h[2].toLowerCase()+u.substr(3):h[2].toLowerCase()+u.substr(2));for(var p=0;;){var d;if(-1===(d=u.indexOf(e._slash,p))){a.push(i(u.substring(p)));break}a.push(i(u.substring(p,d)),e._slash),p=d+1}}return f&&a.push("?",i(f)),l&&a.push("#",i(l)),a.join(e._empty)},e.prototype.toJSON=function(){var e={fsPath:this.fsPath,external:this.toString(),$mid:1};return this.path&&(e.path=this.path),this.scheme&&(e.scheme=this.scheme),this.authority&&(e.authority=this.authority),this.query&&(e.query=this.query),this.fragment&&(e.fragment=this.fragment),e},e.revive=function(t){var r=new e;return r._scheme=t.scheme||e._empty,r._authority=t.authority||e._empty,r._path=t.path||e._empty,r._query=t.query||e._empty,r._fragment=t.fragment||e._empty,r._fsPath=t.fsPath,r._formatted=t.external,e._validate(r),r},e}();if(t.a=a,a._empty="",a._slash="/",a._regexp=/^(([^:/?#]+?):)?(\/\/([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?/,a._driveLetterPath=/^\/[a-zA-z]:/,a._upperCaseDrive=/^(\/)?([A-Z]:)/,a._schemePattern=/^\w[\w\d+.-]*$/,a._singleSlashStart=/^\//,a._doubleSlashStart=/^\/\//,"object"==typeof e)i="win32"===e.platform;else if("object"==typeof navigator){var s=navigator.userAgent;i=s.indexOf("Windows")>=0}}).call(this,r(2))},function(e,t,r){e.exports=function(){return new Worker(r.p+"51c25ac0adacae5e02ff.worker.js")}},function(e,t){var r,n,o=e.exports={};function i(){throw new Error("setTimeout has not been defined")}function a(){throw new Error("clearTimeout has not been defined")}function s(e){if(r===setTimeout)return setTimeout(e,0);if((r===i||!r)&&setTimeout)return r=setTimeout,setTimeout(e,0);try{return r(e,0)}catch(t){try{return r.call(null,e,0)}catch(t){return r.call(this,e,0)}}}!function(){try{r="function"==typeof setTimeout?setTimeout:i}catch(e){r=i}try{n="function"==typeof clearTimeout?clearTimeout:a}catch(e){n=a}}();var c,u=[],f=!1,l=-1;function h(){f&&c&&(f=!1,c.length?u=c.concat(u):l=-1,u.length&&p())}function p(){if(!f){var e=s(h);f=!0;for(var t=u.length;t;){for(c=u,u=[];++l<t;)c&&c[l].run();l=-1,t=u.length}c=null,f=!1,function(e){if(n===clearTimeout)return clearTimeout(e);if((n===a||!n)&&clearTimeout)return n=clearTimeout,clearTimeout(e);try{n(e)}catch(t){try{return n.call(null,e)}catch(t){return n.call(this,e)}}}(e)}}function d(e,t){this.fun=e,this.array=t}function m(){}o.nextTick=function(e){var t=new Array(arguments.length-1);if(arguments.length>1)for(var r=1;r<arguments.length;r++)t[r-1]=arguments[r];u.push(new d(e,t)),1!==u.length||f||s(p)},d.prototype.run=function(){this.fun.apply(null,this.array)},o.title="browser",o.browser=!0,o.env={},o.argv=[],o.version="",o.versions={},o.on=m,o.addListener=m,o.once=m,o.off=m,o.removeListener=m,o.removeAllListeners=m,o.emit=m,o.prependListener=m,o.prependOnceListener=m,o.listeners=function(e){return[]},o.binding=function(e){throw new Error("process.binding is not supported")},o.cwd=function(){return"/"},o.chdir=function(e){throw new Error("process.chdir is not supported")},o.umask=function(){return 0}},function(e,t,r){"use strict";r.r(t);var n,o,i,a,s,c,u,f,l=r(1);!function(e){e.create=function(e,t){return{line:e,character:t}},e.is=function(e){var t=e;return M.defined(t)&&M.number(t.line)&&M.number(t.character)}}(n||(n={})),function(e){e.create=function(e,t,r,o){if(M.number(e)&&M.number(t)&&M.number(r)&&M.number(o))return{start:n.create(e,t),end:n.create(r,o)};if(n.is(e)&&n.is(t))return{start:e,end:t};throw new Error("Range#create called with invalid arguments["+e+", "+t+", "+r+", "+o+"]")},e.is=function(e){var t=e;return M.defined(t)&&n.is(t.start)&&n.is(t.end)}}(o||(o={})),function(e){e.create=function(e,t){return{uri:e,range:t}},e.is=function(e){var t=e;return M.defined(t)&&o.is(t.range)&&(M.string(t.uri)||M.undefined(t.uri))}}(i||(i={})),function(e){e.Error=1,e.Warning=2,e.Information=3,e.Hint=4}(a||(a={})),function(e){e.create=function(e,t,r,n,o){var i={range:e,message:t};return M.defined(r)&&(i.severity=r),M.defined(n)&&(i.code=n),M.defined(o)&&(i.source=o),i},e.is=function(e){var t=e;return M.defined(t)&&o.is(t.range)&&M.string(t.message)&&(M.number(t.severity)||M.undefined(t.severity))&&(M.number(t.code)||M.string(t.code)||M.undefined(t.code))&&(M.string(t.source)||M.undefined(t.source))}}(s||(s={})),function(e){e.create=function(e,t){for(var r=[],n=2;n<arguments.length;n++)r[n-2]=arguments[n];var o={title:e,command:t};return M.defined(r)&&r.length>0&&(o.arguments=r),o},e.is=function(e){var t=e;return M.defined(t)&&M.string(t.title)&&M.string(t.title)}}(c||(c={})),function(e){e.replace=function(e,t){return{range:e,newText:t}},e.insert=function(e,t){return{range:{start:e,end:e},newText:t}},e.del=function(e){return{range:e,newText:""}}}(u||(u={})),function(e){e.create=function(e,t){return{textDocument:e,edits:t}},e.is=function(e){var t=e;return M.defined(t)&&p.is(t.textDocument)&&Array.isArray(t.edits)}}(f||(f={}));var h,p,d,m,g,v,y,b,x,S,O,k,T,A,E,C,w,_,I=function(){function e(e){this.edits=e}return e.prototype.insert=function(e,t){this.edits.push(u.insert(e,t))},e.prototype.replace=function(e,t){this.edits.push(u.replace(e,t))},e.prototype.delete=function(e){this.edits.push(u.del(e))},e.prototype.add=function(e){this.edits.push(e)},e.prototype.all=function(){return this.edits},e.prototype.clear=function(){this.edits.splice(0,this.edits.length)},e}();!function(){function e(e){var t=this;this._textEditChanges=Object.create(null),e&&(this._workspaceEdit=e,e.documentChanges?e.documentChanges.forEach(function(e){var r=new I(e.edits);t._textEditChanges[e.textDocument.uri]=r}):e.changes&&Object.keys(e.changes).forEach(function(r){var n=new I(e.changes[r]);t._textEditChanges[r]=n}))}Object.defineProperty(e.prototype,"edit",{get:function(){return this._workspaceEdit},enumerable:!0,configurable:!0}),e.prototype.getTextEditChange=function(e){if(p.is(e)){if(this._workspaceEdit||(this._workspaceEdit={documentChanges:[]}),!this._workspaceEdit.documentChanges)throw new Error("Workspace edit is not configured for versioned document changes.");var t=e;if(!(n=this._textEditChanges[t.uri])){var r={textDocument:t,edits:o=[]};this._workspaceEdit.documentChanges.push(r),n=new I(o),this._textEditChanges[t.uri]=n}return n}if(this._workspaceEdit||(this._workspaceEdit={changes:Object.create(null)}),!this._workspaceEdit.changes)throw new Error("Workspace edit is not configured for normal text edit changes.");var n;if(!(n=this._textEditChanges[e])){var o=[];this._workspaceEdit.changes[e]=o,n=new I(o),this._textEditChanges[e]=n}return n}}();!function(e){e.create=function(e){return{uri:e}},e.is=function(e){var t=e;return M.defined(t)&&M.string(t.uri)}}(h||(h={})),function(e){e.create=function(e,t){return{uri:e,version:t}},e.is=function(e){var t=e;return M.defined(t)&&M.string(t.uri)&&M.number(t.version)}}(p||(p={})),function(e){e.create=function(e,t,r,n){return{uri:e,languageId:t,version:r,text:n}},e.is=function(e){var t=e;return M.defined(t)&&M.string(t.uri)&&M.string(t.languageId)&&M.number(t.version)&&M.string(t.text)}}(d||(d={})),function(e){e.PlainText="plaintext",e.Markdown="markdown"}(m||(m={})),function(e){e.Text=1,e.Method=2,e.Function=3,e.Constructor=4,e.Field=5,e.Variable=6,e.Class=7,e.Interface=8,e.Module=9,e.Property=10,e.Unit=11,e.Value=12,e.Enum=13,e.Keyword=14,e.Snippet=15,e.Color=16,e.File=17,e.Reference=18,e.Folder=19,e.EnumMember=20,e.Constant=21,e.Struct=22,e.Event=23,e.Operator=24,e.TypeParameter=25}(g||(g={})),function(e){e.PlainText=1,e.Snippet=2}(v||(v={})),function(e){e.create=function(e){return{label:e}}}(y||(y={})),function(e){e.create=function(e,t){return{items:e||[],isIncomplete:!!t}}}(b||(b={})),function(e){e.fromPlainText=function(e){return e.replace(/[\\`*_{}[\]()#+\-.!]/g,"\\$&")}}(x||(x={})),function(e){e.create=function(e,t){return t?{label:e,documentation:t}:{label:e}}}(S||(S={})),function(e){e.create=function(e,t){for(var r=[],n=2;n<arguments.length;n++)r[n-2]=arguments[n];var o={label:e};return M.defined(t)&&(o.documentation=t),M.defined(r)?o.parameters=r:o.parameters=[],o}}(O||(O={})),function(e){e.Text=1,e.Read=2,e.Write=3}(k||(k={})),function(e){e.create=function(e,t){var r={range:e};return M.number(t)&&(r.kind=t),r}}(T||(T={})),function(e){e.File=1,e.Module=2,e.Namespace=3,e.Package=4,e.Class=5,e.Method=6,e.Property=7,e.Field=8,e.Constructor=9,e.Enum=10,e.Interface=11,e.Function=12,e.Variable=13,e.Constant=14,e.String=15,e.Number=16,e.Boolean=17,e.Array=18,e.Object=19,e.Key=20,e.Null=21,e.EnumMember=22,e.Struct=23,e.Event=24,e.Operator=25,e.TypeParameter=26}(A||(A={})),function(e){e.create=function(e,t,r,n,o){var i={name:e,kind:t,location:{uri:n,range:r}};return o&&(i.containerName=o),i}}(E||(E={})),function(e){e.create=function(e){return{diagnostics:e}},e.is=function(e){var t=e;return M.defined(t)&&M.typedArray(t.diagnostics,s.is)}}(C||(C={})),function(e){e.create=function(e,t){var r={range:e};return M.defined(t)&&(r.data=t),r},e.is=function(e){var t=e;return M.defined(t)&&o.is(t.range)&&(M.undefined(t.command)||c.is(t.command))}}(w||(w={})),function(e){e.create=function(e,t){return{tabSize:e,insertSpaces:t}},e.is=function(e){var t=e;return M.defined(t)&&M.number(t.tabSize)&&M.boolean(t.insertSpaces)}}(_||(_={}));var P=function(){return function(){}}();!function(e){e.create=function(e,t){return{range:e,target:t}},e.is=function(e){var t=e;return M.defined(t)&&o.is(t.range)&&(M.undefined(t.target)||M.string(t.target))}}(P||(P={}));var j,V;!function(e){e.create=function(e,t,r,n){return new N(e,t,r,n)},e.is=function(e){var t=e;return!!(M.defined(t)&&M.string(t.uri)&&(M.undefined(t.languageId)||M.string(t.languageId))&&M.number(t.lineCount)&&M.func(t.getText)&&M.func(t.positionAt)&&M.func(t.offsetAt))},e.applyEdits=function(e,t){for(var r=e.getText(),n=function e(t,r){if(t.length<=1)return t;var n=t.length/2|0,o=t.slice(0,n),i=t.slice(n);e(o,r),e(i,r);for(var a=0,s=0,c=0;a<o.length&&s<i.length;){var u=r(o[a],i[s]);t[c++]=u<=0?o[a++]:i[s++]}for(;a<o.length;)t[c++]=o[a++];for(;s<i.length;)t[c++]=i[s++];return t}(t,function(e,t){return 0==e.range.start.line-t.range.start.line?e.range.start.character-t.range.start.character:0}),o=r.length,i=n.length-1;i>=0;i--){var a=n[i],s=e.offsetAt(a.range.start),c=e.offsetAt(a.range.end);if(!(c<=o))throw new Error("Ovelapping edit");r=r.substring(0,s)+a.newText+r.substring(c,r.length),o=s}return r}}(j||(j={})),function(e){e.Manual=1,e.AfterDelay=2,e.FocusOut=3}(V||(V={}));var M,N=function(){function e(e,t,r,n){this._uri=e,this._languageId=t,this._version=r,this._content=n,this._lineOffsets=null}return Object.defineProperty(e.prototype,"uri",{get:function(){return this._uri},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"languageId",{get:function(){return this._languageId},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"version",{get:function(){return this._version},enumerable:!0,configurable:!0}),e.prototype.getText=function(e){if(e){var t=this.offsetAt(e.start),r=this.offsetAt(e.end);return this._content.substring(t,r)}return this._content},e.prototype.update=function(e,t){this._content=e.text,this._version=t,this._lineOffsets=null},e.prototype.getLineOffsets=function(){if(null===this._lineOffsets){for(var e=[],t=this._content,r=!0,n=0;n<t.length;n++){r&&(e.push(n),r=!1);var o=t.charAt(n);r="\r"===o||"\n"===o,"\r"===o&&n+1<t.length&&"\n"===t.charAt(n+1)&&n++}r&&t.length>0&&e.push(t.length),this._lineOffsets=e}return this._lineOffsets},e.prototype.positionAt=function(e){e=Math.max(Math.min(e,this._content.length),0);var t=this.getLineOffsets(),r=0,o=t.length;if(0===o)return n.create(0,e);for(;r<o;){var i=Math.floor((r+o)/2);t[i]>e?o=i:r=i+1}var a=r-1;return n.create(a,e-t[a])},e.prototype.offsetAt=function(e){var t=this.getLineOffsets();if(e.line>=t.length)return this._content.length;if(e.line<0)return 0;var r=t[e.line],n=e.line+1<t.length?t[e.line+1]:this._content.length;return Math.max(Math.min(r+e.character,n),r)},Object.defineProperty(e.prototype,"lineCount",{get:function(){return this.getLineOffsets().length},enumerable:!0,configurable:!0}),e}();function F(e,t){void 0===t&&(t=!1);var r=0,n=e.length,o="",i=0,a=16,s=0;function c(t,n){for(var o=0,i=0;o<t||!n;){var a=e.charCodeAt(r);if(a>=48&&a<=57)i=16*i+a-48;else if(a>=65&&a<=70)i=16*i+a-65+10;else{if(!(a>=97&&a<=102))break;i=16*i+a-97+10}r++,o++}return o<t&&(i=-1),i}function u(){if(o="",s=0,i=r,r>=n)return i=n,a=17;var t=e.charCodeAt(r);if(D(t)){do{r++,o+=String.fromCharCode(t),t=e.charCodeAt(r)}while(D(t));return a=15}if(W(t))return r++,o+=String.fromCharCode(t),13===t&&10===e.charCodeAt(r)&&(r++,o+="\n"),a=14;switch(t){case 123:return r++,a=1;case 125:return r++,a=2;case 91:return r++,a=3;case 93:return r++,a=4;case 58:return r++,a=6;case 44:return r++,a=5;case 34:return r++,o=function(){for(var t="",o=r;;){if(r>=n){t+=e.substring(o,r),s=2;break}var i=e.charCodeAt(r);if(34===i){t+=e.substring(o,r),r++;break}if(92!==i){if(i>=0&&i<=31){if(W(i)){t+=e.substring(o,r),s=2;break}s=6}r++}else{if(t+=e.substring(o,r),++r>=n){s=2;break}switch(i=e.charCodeAt(r++)){case 34:t+='"';break;case 92:t+="\\";break;case 47:t+="/";break;case 98:t+="\b";break;case 102:t+="\f";break;case 110:t+="\n";break;case 114:t+="\r";break;case 116:t+="\t";break;case 117:var a=c(4,!0);a>=0?t+=String.fromCharCode(a):s=4;break;default:s=5}o=r}}return t}(),a=10;case 47:var u=r-1;if(47===e.charCodeAt(r+1)){for(r+=2;r<n&&!W(e.charCodeAt(r));)r++;return o=e.substring(u,r),a=12}if(42===e.charCodeAt(r+1)){r+=2;for(var l=!1;r<n;){if(42===e.charCodeAt(r)&&r+1<n&&47===e.charCodeAt(r+1)){r+=2,l=!0;break}r++}return l||(r++,s=1),o=e.substring(u,r),a=13}return o+=String.fromCharCode(t),r++,a=16;case 45:if(o+=String.fromCharCode(t),++r===n||!R(e.charCodeAt(r)))return a=16;case 48:case 49:case 50:case 51:case 52:case 53:case 54:case 55:case 56:case 57:return o+=function(){var t=r;if(48===e.charCodeAt(r))r++;else for(r++;r<e.length&&R(e.charCodeAt(r));)r++;if(r<e.length&&46===e.charCodeAt(r)){if(!(++r<e.length&&R(e.charCodeAt(r))))return s=3,e.substring(t,r);for(r++;r<e.length&&R(e.charCodeAt(r));)r++}var n=r;if(r<e.length&&(69===e.charCodeAt(r)||101===e.charCodeAt(r)))if((++r<e.length&&43===e.charCodeAt(r)||45===e.charCodeAt(r))&&r++,r<e.length&&R(e.charCodeAt(r))){for(r++;r<e.length&&R(e.charCodeAt(r));)r++;n=r}else s=3;return e.substring(t,n)}(),a=11;default:for(;r<n&&f(t);)r++,t=e.charCodeAt(r);if(i!==r){switch(o=e.substring(i,r)){case"true":return a=8;case"false":return a=9;case"null":return a=7}return a=16}return o+=String.fromCharCode(t),r++,a=16}}function f(e){if(D(e)||W(e))return!1;switch(e){case 125:case 93:case 123:case 91:case 34:case 58:case 44:return!1}return!0}return{setPosition:function(e){r=e,o="",i=0,a=16,s=0},getPosition:function(){return r},scan:t?function(){var e;do{e=u()}while(e>=12&&e<=15);return e}:u,getToken:function(){return a},getTokenValue:function(){return o},getTokenOffset:function(){return i},getTokenLength:function(){return r-i},getTokenError:function(){return s}}}function D(e){return 32===e||9===e||11===e||12===e||160===e||5760===e||e>=8192&&e<=8203||8239===e||8287===e||12288===e||65279===e}function W(e){return 10===e||13===e||8232===e||8233===e}function R(e){return e>=48&&e<=57}function $(e,t,r){var n,o,i,a,s;if(t){for(a=t.offset,s=a+t.length,i=a;i>0&&!L(e,i-1);)i--;for(var c=s;c<e.length&&!L(e,c);)c++;o=e.substring(i,c),n=function(e,t,r){var n=0,o=0,i=r.tabSize||4;for(;n<e.length;){var a=e.charAt(n);if(" "===a)o++;else{if("\t"!==a)break;o+=i}n++}return Math.floor(o/i)}(o,0,r)}else o=e,n=0,i=0,a=0,s=e.length;var u,f=function(e,t){for(var r=0;r<t.length;r++){var n=t.charAt(r);if("\r"===n)return r+1<t.length&&"\n"===t.charAt(r+1)?"\r\n":"\r";if("\n"===n)return"\n"}return e&&e.eol||"\n"}(r,e),l=!1,h=0;u=r.insertSpaces?U(" ",r.tabSize||4):"\t";var p=F(o,!1),d=!1;function m(){return f+U(u,n+h)}function g(){var e=p.scan();for(l=!1;15===e||14===e;)l=l||14===e,e=p.scan();return d=16===e||0!==p.getTokenError(),e}var v=[];function y(t,r,n){!d&&r<s&&n>a&&e.substring(r,n)!==t&&v.push({offset:r,length:n-r,content:t})}var b=g();if(17!==b){var x=p.getTokenOffset()+i;y(U(u,n),i,x)}for(;17!==b;){for(var S=p.getTokenOffset()+p.getTokenLength()+i,O=g(),k="";!l&&(12===O||13===O);){y(" ",S,p.getTokenOffset()+i),S=p.getTokenOffset()+p.getTokenLength()+i,k=12===O?m():"",O=g()}if(2===O)1!==b&&(h--,k=m());else if(4===O)3!==b&&(h--,k=m());else{switch(b){case 3:case 1:h++,k=m();break;case 5:case 12:k=m();break;case 13:k=l?m():" ";break;case 6:k=" ";break;case 10:if(6===O){k="";break}case 7:case 8:case 9:case 11:case 2:case 4:12===O||13===O?k=" ":5!==O&&17!==O&&(d=!0);break;case 16:d=!0}!l||12!==O&&13!==O||(k=m())}y(k,S,p.getTokenOffset()+i),b=O}return v}function U(e,t){for(var r="",n=0;n<t;n++)r+=e;return r}function L(e,t){return-1!=="\r\n".indexOf(e.charAt(t))}function q(e,t,r){var n=F(e,!1);function o(e){return e?function(){return e(n.getTokenOffset(),n.getTokenLength())}:function(){return!0}}function i(e){return e?function(t){return e(t,n.getTokenOffset(),n.getTokenLength())}:function(){return!0}}var a=o(t.onObjectBegin),s=i(t.onObjectProperty),c=o(t.onObjectEnd),u=o(t.onArrayBegin),f=o(t.onArrayEnd),l=i(t.onLiteralValue),h=i(t.onSeparator),p=o(t.onComment),d=i(t.onError),m=r&&r.disallowComments,g=r&&r.allowTrailingComma;function v(){for(;;){var e=n.scan();switch(n.getTokenError()){case 4:y(14);break;case 5:y(15);break;case 3:y(13);break;case 1:m||y(11);break;case 2:y(12);break;case 6:y(16)}switch(e){case 12:case 13:m?y(10):p();break;case 16:y(1);break;case 15:case 14:break;default:return e}}}function y(e,t,r){if(void 0===t&&(t=[]),void 0===r&&(r=[]),d(e),t.length+r.length>0)for(var o=n.getToken();17!==o;){if(-1!==t.indexOf(o)){v();break}if(-1!==r.indexOf(o))break;o=v()}}function b(e){var t=n.getTokenValue();return e?l(t):s(t),v(),!0}function x(){switch(n.getToken()){case 3:return function(){u(),v();for(var e=!1;4!==n.getToken()&&17!==n.getToken();){if(5===n.getToken()){if(e||y(4,[],[]),h(","),v(),4===n.getToken()&&g)break}else e&&y(6,[],[]);x()||y(4,[],[4,5]),e=!0}return f(),4!==n.getToken()?y(8,[4],[]):v(),!0}();case 1:return function(){a(),v();for(var e=!1;2!==n.getToken()&&17!==n.getToken();){if(5===n.getToken()){if(e||y(4,[],[]),h(","),v(),2===n.getToken()&&g)break}else e&&y(6,[],[]);(10!==n.getToken()?(y(3,[],[2,5]),0):(b(!1),6===n.getToken()?(h(":"),v(),x()||y(4,[],[2,5])):y(5,[],[2,5]),1))||y(4,[],[2,5]),e=!0}return c(),2!==n.getToken()?y(7,[2],[]):v(),!0}();case 10:return b(!0);default:return function(){switch(n.getToken()){case 11:var e=0;try{"number"!=typeof(e=JSON.parse(n.getTokenValue()))&&(y(2),e=0)}catch(e){y(2)}l(e);break;case 7:l(null);break;case 8:l(!0);break;case 9:l(!1);break;default:return!1}return v(),!0}()}}return v(),17===n.getToken()||(x()?(17!==n.getToken()&&y(9,[],[]),!0):(y(4,[],[]),!1))}!function(e){var t=Object.prototype.toString;e.defined=function(e){return void 0!==e},e.undefined=function(e){return void 0===e},e.boolean=function(e){return!0===e||!1===e},e.string=function(e){return"[object String]"===t.call(e)},e.number=function(e){return"[object Number]"===t.call(e)},e.func=function(e){return"[object Function]"===t.call(e)},e.typedArray=function(e,t){return Array.isArray(e)&&e.every(t)}}(M||(M={}));var H=F,B=function(e,t,r){void 0===t&&(t=[]);var n=null,o=[],i=[];function a(e){Array.isArray(o)?o.push(e):n&&(o[n]=e)}return q(e,{onObjectBegin:function(){var e={};a(e),i.push(o),o=e,n=null},onObjectProperty:function(e){n=e},onObjectEnd:function(){o=i.pop()},onArrayBegin:function(){var e=[];a(e),i.push(o),o=e,n=null},onArrayEnd:function(){o=i.pop()},onLiteralValue:a,onError:function(e,r,n){t.push({error:e,offset:r,length:n})}},r),o[0]};function J(e,t){if(e===t)return!0;if(null===e||void 0===e||null===t||void 0===t)return!1;if(typeof e!=typeof t)return!1;if("object"!=typeof e)return!1;if(Array.isArray(e)!==Array.isArray(t))return!1;var r,n;if(Array.isArray(e)){if(e.length!==t.length)return!1;for(r=0;r<e.length;r++)if(!J(e[r],t[r]))return!1}else{var o=[];for(n in e)o.push(n);o.sort();var i=[];for(n in t)i.push(n);if(i.sort(),!J(o,i))return!1;for(r=0;r<o.length;r++)if(!J(e[o[r]],t[o[r]]))return!1}return!0}function z(e,t){for(var r=[],n=2;n<arguments.length;n++)r[n-2]=arguments[n];return function(e,t){return 0===t.length?e:e.replace(/\{(\d+)\}/g,function(e,r){var n=r[0];return void 0!==t[n]?t[n]:e})}(t,r)}function K(e){return z}var G,Z,X=r(0),Q=(G=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(e,t){e.__proto__=t}||function(e,t){for(var r in t)t.hasOwnProperty(r)&&(e[r]=t[r])},function(e,t){function r(){this.constructor=e}G(e,t),e.prototype=null===t?Object.create(t):(r.prototype=t.prototype,new r)}),Y=K();!function(e){e[e.Undefined=0]="Undefined",e[e.EnumValueMismatch=1]="EnumValueMismatch",e[e.UnexpectedEndOfComment=257]="UnexpectedEndOfComment",e[e.UnexpectedEndOfString=258]="UnexpectedEndOfString",e[e.UnexpectedEndOfNumber=259]="UnexpectedEndOfNumber",e[e.InvalidUnicode=260]="InvalidUnicode",e[e.InvalidEscapeCharacter=261]="InvalidEscapeCharacter",e[e.InvalidCharacter=262]="InvalidCharacter",e[e.PropertyExpected=513]="PropertyExpected",e[e.CommaExpected=514]="CommaExpected",e[e.ColonExpected=515]="ColonExpected",e[e.ValueExpected=516]="ValueExpected",e[e.CommaOrCloseBacketExpected=517]="CommaOrCloseBacketExpected",e[e.CommaOrCloseBraceExpected=518]="CommaOrCloseBraceExpected",e[e.TrailingComma=519]="TrailingComma"}(Z||(Z={}));var ee,te=/^#([0-9A-Fa-f]{3,4}|([0-9A-Fa-f]{2}){3,4})$/,re=/^(([^<>()\[\]\\.,;:\s@"]+(\.[^<>()\[\]\\.,;:\s@"]+)*)|(".+"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$/;!function(e){e.Ignore="ignore",e.Error="error",e.Warning="warning"}(ee||(ee={}));var ne,oe=function(){function e(e,t,r){this.offset=t,this.length=r,this.parent=e}return Object.defineProperty(e.prototype,"children",{get:function(){return[]},enumerable:!0,configurable:!0}),e.prototype.toString=function(){return"type: "+this.type+" ("+this.offset+"/"+this.length+")"+(this.parent?" parent: {"+this.parent.toString()+"}":"")},e}(),ie=function(e){function t(t,r){var n=e.call(this,t,r)||this;return n.type="null",n}return Q(t,e),t}(oe),ae=function(e){function t(t,r,n){var o=e.call(this,t,n)||this;return o.type="boolean",o.value=r,o}return Q(t,e),t}(oe),se=function(e){function t(t,r){var n=e.call(this,t,r)||this;return n.type="array",n.items=[],n}return Q(t,e),Object.defineProperty(t.prototype,"children",{get:function(){return this.items},enumerable:!0,configurable:!0}),t}(oe),ce=function(e){function t(t,r){var n=e.call(this,t,r)||this;return n.type="number",n.isInteger=!0,n.value=Number.NaN,n}return Q(t,e),t}(oe),ue=function(e){function t(t,r,n){var o=e.call(this,t,r,n)||this;return o.type="string",o.value="",o}return Q(t,e),t}(oe),fe=function(e){function t(t,r){var n=e.call(this,t,r)||this;return n.type="property",n.colonOffset=-1,n}return Q(t,e),Object.defineProperty(t.prototype,"children",{get:function(){return this.valueNode?[this.keyNode,this.valueNode]:[this.keyNode]},enumerable:!0,configurable:!0}),t}(oe),le=function(e){function t(t,r){var n=e.call(this,t,r)||this;return n.type="object",n.properties=[],n}return Q(t,e),Object.defineProperty(t.prototype,"children",{get:function(){return this.properties},enumerable:!0,configurable:!0}),t}(oe);function he(e){return"boolean"==typeof e?e?{}:{not:{}}:e}!function(e){e[e.Key=0]="Key",e[e.Enum=1]="Enum"}(ne||(ne={}));var pe=function(){function e(e,t){void 0===e&&(e=-1),void 0===t&&(t=null),this.focusOffset=e,this.exclude=t,this.schemas=[]}return e.prototype.add=function(e){this.schemas.push(e)},e.prototype.merge=function(e){var t;(t=this.schemas).push.apply(t,e.schemas)},e.prototype.include=function(e){return(-1===this.focusOffset||ye(e,this.focusOffset))&&e!==this.exclude},e.prototype.newSub=function(){return new e(-1,this.exclude)},e}(),de=function(){function e(){}return Object.defineProperty(e.prototype,"schemas",{get:function(){return[]},enumerable:!0,configurable:!0}),e.prototype.add=function(e){},e.prototype.merge=function(e){},e.prototype.include=function(e){return!0},e.prototype.newSub=function(){return this},e.instance=new e,e}(),me=function(){function e(){this.problems=[],this.propertiesMatches=0,this.propertiesValueMatches=0,this.primaryValueMatches=0,this.enumValueMatch=!1,this.enumValues=null}return e.prototype.hasProblems=function(){return!!this.problems.length},e.prototype.mergeAll=function(e){var t=this;e.forEach(function(e){t.merge(e)})},e.prototype.merge=function(e){this.problems=this.problems.concat(e.problems)},e.prototype.mergeEnumValues=function(e){if(!this.enumValueMatch&&!e.enumValueMatch&&this.enumValues&&e.enumValues){this.enumValues=this.enumValues.concat(e.enumValues);for(var t=0,r=this.problems;t<r.length;t++){var n=r[t];n.code===Z.EnumValueMismatch&&(n.message=Y("enumWarning","Value is not accepted. Valid values: {0}.",this.enumValues.map(function(e){return JSON.stringify(e)}).join(", ")))}}},e.prototype.mergePropertyMatch=function(e){this.merge(e),this.propertiesMatches++,(e.enumValueMatch||!e.hasProblems()&&e.propertiesMatches)&&this.propertiesValueMatches++,e.enumValueMatch&&e.enumValues&&1===e.enumValues.length&&this.primaryValueMatches++},e.prototype.compare=function(e){var t=this.hasProblems();return t!==e.hasProblems()?t?-1:1:this.enumValueMatch!==e.enumValueMatch?e.enumValueMatch?-1:1:this.primaryValueMatches!==e.primaryValueMatches?this.primaryValueMatches-e.primaryValueMatches:this.propertiesValueMatches!==e.propertiesValueMatches?this.propertiesValueMatches-e.propertiesValueMatches:this.propertiesMatches-e.propertiesMatches},e}();function ge(e){switch(e.type){case"array":return e.items.map(ge);case"object":for(var t=Object.create(null),r=0,n=e.properties;r<n.length;r++){var o=n[r];t[o.keyNode.value]=ge(o.valueNode)}return t;case"string":case"number":case"boolean":return e.value}return null}function ve(e){if(!e.parent)return[];var t=ve(e.parent);if("property"===e.parent.type){var r=e.parent.keyNode.value;t.push(r)}else if("array"===e.parent.type){var n=e.parent.items.indexOf(e);-1!==n&&t.push(n)}return t}function ye(e,t,r){return void 0===r&&(r=!1),t>=e.offset&&t<e.offset+e.length||r&&t===e.offset+e.length}var be=function(){function e(e,t,r,n){void 0===t&&(t=[]),void 0===r&&(r=[]),void 0===n&&(n=[]),this.root=e,this.syntaxErrors=t,this.comments=r,this.externalDiagnostic=n}return e.prototype.getNodeFromOffset=function(e){var t=function(r){if(e>=r.offset&&e<r.offset+r.length){for(var n=r.children,o=0;o<n.length&&n[o].offset<=e;o++){var i=t(n[o]);if(i)return i}return r}return null};return this.root&&t(this.root)},e.prototype.getNodeFromOffsetEndInclusive=function(e){var t=function(r){if(e>=r.offset&&e<=r.offset+r.length){for(var n=r.children,o=0;o<n.length&&n[o].offset<=e;o++){var i=t(n[o]);if(i)return i}return r}return null};return this.root&&t(this.root)},e.prototype.visit=function(e){if(this.root){var t=function(r){for(var n=e(r),o=r.children,i=0;i<o.length&&n;i++)n=t(o[i]);return n};t(this.root)}},e.prototype.validate=function(e){if(this.root&&e){var t=new me;return xe(this.root,e,t,de.instance),t.problems}return null},e.prototype.getMatchingSchemas=function(e,t,r){void 0===t&&(t=-1),void 0===r&&(r=null);var n=new pe(t,r);return this.root&&e&&xe(this.root,e,new me,n),n.schemas},e}();function xe(e,t,r,n){if(e&&n.include(e)){switch(e.type){case"object":!function(e,t,r,n){var o=Object.create(null),i=[];e.properties.forEach(function(e){var t=e.keyNode.value;o[t]=e.valueNode,i.push(t)}),Array.isArray(t.required)&&t.required.forEach(function(t){if(!o[t]){var n=e.parent&&"property"===e.parent.type&&e.parent.keyNode,i=n?{offset:n.offset,length:n.length}:{offset:e.offset,length:1};r.problems.push({location:i,severity:ee.Warning,message:Y("MissingRequiredPropWarning",'Missing property "{0}".',t)})}});var a=function(e){for(var t=i.indexOf(e);t>=0;)i.splice(t,1),t=i.indexOf(e)};t.properties&&Object.keys(t.properties).forEach(function(e){a(e);var i=t.properties[e],s=o[e];if(s)if("boolean"==typeof i)if(i)r.propertiesMatches++,r.propertiesValueMatches++;else{var c=s.parent;r.problems.push({location:{offset:c.keyNode.offset,length:c.keyNode.length},severity:ee.Warning,message:t.errorMessage||Y("DisallowedExtraPropWarning","Property {0} is not allowed.",e)})}else{var u=new me;xe(s,i,u,n),r.mergePropertyMatch(u)}});t.patternProperties&&Object.keys(t.patternProperties).forEach(function(e){var s=new RegExp(e);i.slice(0).forEach(function(i){if(s.test(i)){a(i);var c=o[i];if(c){var u=t.patternProperties[e];if("boolean"==typeof u)if(u)r.propertiesMatches++,r.propertiesValueMatches++;else{var f=c.parent;r.problems.push({location:{offset:f.keyNode.offset,length:f.keyNode.length},severity:ee.Warning,message:t.errorMessage||Y("DisallowedExtraPropWarning","Property {0} is not allowed.",i)})}else{var l=new me;xe(c,u,l,n),r.mergePropertyMatch(l)}}}})});"object"==typeof t.additionalProperties?i.forEach(function(e){var i=o[e];if(i){var a=new me;xe(i,t.additionalProperties,a,n),r.mergePropertyMatch(a)}}):!1===t.additionalProperties&&i.length>0&&i.forEach(function(e){var n=o[e];if(n){var i=n.parent;r.problems.push({location:{offset:i.keyNode.offset,length:i.keyNode.length},severity:ee.Warning,message:t.errorMessage||Y("DisallowedExtraPropWarning","Property {0} is not allowed.",e)})}});t.maxProperties&&e.properties.length>t.maxProperties&&r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:Y("MaxPropWarning","Object has more properties than limit of {0}.",t.maxProperties)});t.minProperties&&e.properties.length<t.minProperties&&r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:Y("MinPropWarning","Object has fewer properties than the required number of {0}",t.minProperties)});t.dependencies&&Object.keys(t.dependencies).forEach(function(i){var a=o[i];if(a){var s=t.dependencies[i];if(Array.isArray(s))s.forEach(function(t){o[t]?r.propertiesValueMatches++:r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:Y("RequiredDependentPropWarning","Object is missing property {0} required by property {1}.",t,i)})});else{var c=he(s);if(c){var u=new me;xe(e,c,u,n),r.mergePropertyMatch(u)}}}});var s=he(t.propertyNames);s&&e.properties.forEach(function(e){var t=e.keyNode;t&&xe(t,s,r,de.instance)})}(e,t,r,n);break;case"array":!function(e,t,r,n){if(Array.isArray(t.items)){var o=t.items;if(o.forEach(function(t,i){var a=he(t),s=new me,c=e.items[i];c?(xe(c,a,s,n),r.mergePropertyMatch(s)):e.items.length>=o.length&&r.propertiesValueMatches++}),e.items.length>o.length)if("object"==typeof t.additionalItems)for(var i=o.length;i<e.items.length;i++){var a=new me;xe(e.items[i],t.additionalItems,a,n),r.mergePropertyMatch(a)}else!1===t.additionalItems&&r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:Y("additionalItemsWarning","Array has too many items according to schema. Expected {0} or fewer.",o.length)})}else{var s=he(t.items);s&&e.items.forEach(function(e){var t=new me;xe(e,s,t,n),r.mergePropertyMatch(t)})}var c=he(t.contains);if(c){var u=e.items.some(function(e){var t=new me;return xe(e,c,t,de.instance),!t.hasProblems()});u||r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:t.errorMessage||Y("requiredItemMissingWarning","Array does not contain required item.",t.minItems)})}t.minItems&&e.items.length<t.minItems&&r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:Y("minItemsWarning","Array has too few items. Expected {0} or more.",t.minItems)});t.maxItems&&e.items.length>t.maxItems&&r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:Y("maxItemsWarning","Array has too many items. Expected {0} or fewer.",t.minItems)});if(!0===t.uniqueItems){var f=ge(e),l=f.some(function(e,t){return t!==f.lastIndexOf(e)});l&&r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:Y("uniqueItemsWarning","Array has duplicate items.")})}}(e,t,r,n);break;case"string":!function(e,t,r,n){t.minLength&&e.value.length<t.minLength&&r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:Y("minLengthWarning","String is shorter than the minimum length of {0}.",t.minLength)});t.maxLength&&e.value.length>t.maxLength&&r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:Y("maxLengthWarning","String is longer than the maximum length of {0}.",t.maxLength)});if(t.pattern){var o=new RegExp(t.pattern);o.test(e.value)||r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:t.patternErrorMessage||t.errorMessage||Y("patternWarning",'String does not match the pattern of "{0}".',t.pattern)})}if(t.format)switch(t.format){case"uri":case"uri-reference":var i=void 0;if(e.value)try{var a=X.a.parse(e.value);a.scheme||"uri"!==t.format||(i=Y("uriSchemeMissing","URI with a scheme is expected."))}catch(e){i=e.message}else i=Y("uriEmpty","URI expected.");i&&r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:t.patternErrorMessage||t.errorMessage||Y("uriFormatWarning","String is not a URI: {0}",i)});break;case"email":e.value.match(re)||r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:t.patternErrorMessage||t.errorMessage||Y("emailFormatWarning","String is not an e-mail address.")});break;case"color-hex":e.value.match(te)||r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:t.patternErrorMessage||t.errorMessage||Y("colorHexFormatWarning","Invalid color format. Use #RGB, #RGBA, #RRGGBB or #RRGGBBAA.")})}}(e,t,r);break;case"number":!function(e,t,r,n){var o=e.value;"number"==typeof t.multipleOf&&o%t.multipleOf!=0&&r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:Y("multipleOfWarning","Value is not divisible by {0}.",t.multipleOf)});function i(e,t){return"number"==typeof t?t:"boolean"==typeof t&&t?e:void 0}function a(e,t){if("boolean"!=typeof t||!t)return e}var s=i(t.minimum,t.exclusiveMinimum);"number"==typeof s&&o<=s&&r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:Y("exclusiveMinimumWarning","Value is below the exclusive minimum of {0}.",s)});var c=i(t.maximum,t.exclusiveMaximum);"number"==typeof c&&o>=c&&r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:Y("exclusiveMaximumWarning","Value is above the exclusive maximum of {0}.",c)});var u=a(t.minimum,t.exclusiveMinimum);"number"==typeof u&&o<u&&r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:Y("minimumWarning","Value is below the minimum of {0}.",u)});var f=a(t.maximum,t.exclusiveMaximum);"number"==typeof f&&o>f&&r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:Y("maximumWarning","Value is above the maximum of {0}.",f)})}(e,t,r);break;case"property":return xe(e.valueNode,t,r,n)}!function(){function o(t){return e.type===t||"integer"===t&&"number"===e.type&&e.isInteger}Array.isArray(t.type)?t.type.some(o)||r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:t.errorMessage||Y("typeArrayMismatchWarning","Incorrect type. Expected one of {0}.",t.type.join(", "))}):t.type&&(o(t.type)||r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:t.errorMessage||Y("typeMismatchWarning",'Incorrect type. Expected "{0}".',t.type)}));Array.isArray(t.allOf)&&t.allOf.forEach(function(t){xe(e,he(t),r,n)});var i=he(t.not);if(i){var a=new me,s=n.newSub();xe(e,i,a,s),a.hasProblems()||r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,message:Y("notSchemaWarning","Matches a schema that is not allowed.")}),s.schemas.forEach(function(e){e.inverted=!e.inverted,n.add(e)})}var c=function(t,o){var i=[],a=null;return t.forEach(function(t){var r=he(t),s=new me,c=n.newSub();if(xe(e,r,s,c),s.hasProblems()||i.push(r),a)if(o||s.hasProblems()||a.validationResult.hasProblems()){var u=s.compare(a.validationResult);u>0?a={schema:r,validationResult:s,matchingSchemas:c}:0===u&&(a.matchingSchemas.merge(c),a.validationResult.mergeEnumValues(s))}else a.matchingSchemas.merge(c),a.validationResult.propertiesMatches+=s.propertiesMatches,a.validationResult.propertiesValueMatches+=s.propertiesValueMatches;else a={schema:r,validationResult:s,matchingSchemas:c}}),i.length>1&&o&&r.problems.push({location:{offset:e.offset,length:1},severity:ee.Warning,message:Y("oneOfWarning","Matches multiple schemas when only one must validate.")}),null!==a&&(r.merge(a.validationResult),r.propertiesMatches+=a.validationResult.propertiesMatches,r.propertiesValueMatches+=a.validationResult.propertiesValueMatches,n.merge(a.matchingSchemas)),i.length};Array.isArray(t.anyOf)&&c(t.anyOf,!1);Array.isArray(t.oneOf)&&c(t.oneOf,!0);if(Array.isArray(t.enum)){for(var u=ge(e),f=!1,l=0,h=t.enum;l<h.length;l++){var p=h[l];if(J(u,p)){f=!0;break}}r.enumValues=t.enum,r.enumValueMatch=f,f||r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,code:Z.EnumValueMismatch,message:t.errorMessage||Y("enumWarning","Value is not accepted. Valid values: {0}.",t.enum.map(function(e){return JSON.stringify(e)}).join(", "))})}if(t.const){var u=ge(e);J(u,t.const)?r.enumValueMatch=!0:(r.problems.push({location:{offset:e.offset,length:e.length},severity:ee.Warning,code:Z.EnumValueMismatch,message:t.errorMessage||Y("constWarning","Value must be {0}.",JSON.stringify(t.const))}),r.enumValueMatch=!1),r.enumValues=[t.const]}t.deprecationMessage&&e.parent&&r.problems.push({location:{offset:e.parent.offset,length:e.parent.length},severity:ee.Warning,message:t.deprecationMessage})}(),n.add({node:e,schema:t})}}function Se(e,t){var r=[],n=e.getText(),o=H(n,!1),i=t&&t.collectComments?[]:void 0;function a(){for(;;){var e=o.scan();switch(u(),e){case 12:case 13:Array.isArray(i)&&i.push({offset:o.getTokenOffset(),length:o.getTokenLength()});break;case 15:case 14:break;default:return e}}}function s(e,t,n){0!==r.length&&r[r.length-1].location.offset===n.offset||r.push({message:e,location:n,code:t,severity:ee.Error})}function c(e,t,r,i,c){void 0===r&&(r=null),void 0===i&&(i=[]),void 0===c&&(c=[]);var u=o.getTokenOffset(),l=o.getTokenOffset()+o.getTokenLength();if(u===l&&u>0){for(u--;u>0&&/\s/.test(n.charAt(u));)u--;l=u+1}if(s(e,t,{offset:u,length:l-u}),r&&f(r,!1),i.length+c.length>0)for(var h=o.getToken();17!==h;){if(-1!==i.indexOf(h)){a();break}if(-1!==c.indexOf(h))break;h=a()}return r}function u(){switch(o.getTokenError()){case 4:return c(Y("InvalidUnicode","Invalid unicode sequence in string."),Z.InvalidUnicode),!0;case 5:return c(Y("InvalidEscapeCharacter","Invalid escape character in string."),Z.InvalidEscapeCharacter),!0;case 3:return c(Y("UnexpectedEndOfNumber","Unexpected end of number."),Z.UnexpectedEndOfNumber),!0;case 1:return c(Y("UnexpectedEndOfComment","Unexpected end of comment."),Z.UnexpectedEndOfComment),!0;case 2:return c(Y("UnexpectedEndOfString","Unexpected end of string."),Z.UnexpectedEndOfString),!0;case 6:return c(Y("InvalidCharacter","Invalid characters in string. Control characters must be escaped."),Z.InvalidCharacter),!0}return!1}function f(e,t){return e.length=o.getTokenOffset()+o.getTokenLength()-e.offset,t&&a(),e}function l(t,n){var i=new fe(t,o.getTokenOffset()),s=h(i);if(!s){if(16!==o.getToken())return null;c(Y("DoubleQuotesExpected","Property keys must be doublequoted"),Z.Undefined);var u=new ue(i,o.getTokenOffset(),o.getTokenLength());u.value=o.getTokenValue(),s=u,a()}i.keyNode=s;var f=n[s.value];if(f?(r.push({location:{offset:i.keyNode.offset,length:i.keyNode.length},message:Y("DuplicateKeyWarning","Duplicate object key"),code:Z.Undefined,severity:ee.Warning}),"object"==typeof f&&r.push({location:{offset:f.keyNode.offset,length:f.keyNode.length},message:Y("DuplicateKeyWarning","Duplicate object key"),code:Z.Undefined,severity:ee.Warning}),n[s.value]=!0):n[s.value]=i,6===o.getToken())i.colonOffset=o.getTokenOffset(),a();else if(c(Y("ColonExpected","Colon expected"),Z.ColonExpected),10===o.getToken()&&e.positionAt(s.offset+s.length).line<e.positionAt(o.getTokenOffset()).line)return i.length=s.length,i;var l=p(i,s.value);return l?(i.valueNode=l,i.length=l.offset+l.length-i.offset,i):c(Y("ValueExpected","Value expected"),Z.ValueExpected,i,[],[2,5])}function h(e){if(10!==o.getToken())return null;var t=new ue(e,o.getTokenOffset());return t.value=o.getTokenValue(),f(t,!0)}function p(e,t){return function(e){if(3!==o.getToken())return null;var t=new se(e,o.getTokenOffset());a();for(var r=0,n=!1;4!==o.getToken()&&17!==o.getToken();){if(5===o.getToken()){n||c(Y("ValueExpected","Value expected"),Z.ValueExpected);var i=o.getTokenOffset();if(a(),4===o.getToken()){n&&s(Y("TrailingComma","Trailing comma"),Z.TrailingComma,{offset:i,length:1});continue}}else n&&c(Y("ExpectedComma","Expected comma"),Z.CommaExpected);var u=p(t,r++);u?t.items.push(u):c(Y("PropertyExpected","Value expected"),Z.ValueExpected,null,[],[4,5]),n=!0}return 4!==o.getToken()?c(Y("ExpectedCloseBracket","Expected comma or closing bracket"),Z.CommaOrCloseBacketExpected,t):f(t,!0)}(e)||function(e){if(1!==o.getToken())return null;var t=new le(e,o.getTokenOffset()),r=Object.create(null);a();for(var n=!1;2!==o.getToken()&&17!==o.getToken();){if(5===o.getToken()){n||c(Y("PropertyExpected","Property expected"),Z.PropertyExpected);var i=o.getTokenOffset();if(a(),2===o.getToken()){n&&s(Y("TrailingComma","Trailing comma"),Z.TrailingComma,{offset:i,length:1});continue}}else n&&c(Y("ExpectedComma","Expected comma"),Z.CommaExpected);var u=l(t,r);u?t.properties.push(u):c(Y("PropertyExpected","Property expected"),Z.PropertyExpected,null,[],[2,5]),n=!0}return 2!==o.getToken()?c(Y("ExpectedCloseBrace","Expected comma or closing brace"),Z.CommaOrCloseBraceExpected,t):f(t,!0)}(e)||h(e)||function(e){if(11!==o.getToken())return null;var t=new ce(e,o.getTokenOffset());if(0===o.getTokenError()){var r=o.getTokenValue();try{var n=JSON.parse(r);if("number"!=typeof n)return c(Y("InvalidNumberFormat","Invalid number format."),Z.Undefined,t);t.value=n}catch(e){return c(Y("InvalidNumberFormat","Invalid number format."),Z.Undefined,t)}t.isInteger=-1===r.indexOf(".")}return f(t,!0)}(e)||function(e){switch(o.getToken()){case 7:return f(new ie(e,o.getTokenOffset()),!0);case 8:return f(new ae(e,!0,o.getTokenOffset()),!0);case 9:return f(new ae(e,!1,o.getTokenOffset()),!0);default:return null}}(e)}var d=null;return 17!==a()&&((d=p(null))?17!==o.getToken()&&c(Y("End of file expected","End of file expected."),Z.Undefined):c(Y("Invalid symbol","Expected a JSON object, array or literal."),Z.Undefined)),new be(d,r,i)}function Oe(e,t){var r=e.length-t.length;return r>0?e.lastIndexOf(t)===r:0===r&&e===t}var ke=K(),Te=function(){function e(e,t,r){void 0===t&&(t=[]),this.templateVarIdCounter=0,this.schemaService=e,this.contributions=t,this.promise=r||Promise}return e.prototype.doResolve=function(e){for(var t=this.contributions.length-1;t>=0;t--)if(this.contributions[t].resolveCompletion){var r=this.contributions[t].resolveCompletion(e);if(r)return r}return this.promise.resolve(e)},e.prototype.doComplete=function(e,t,r){var n=this,i={items:[],isIncomplete:!1},a=e.offsetAt(t),s=r.getNodeFromOffsetEndInclusive(a);if(this.isInComment(e,s?s.offset:0,a))return Promise.resolve(i);var c=this.getCurrentWord(e,a),f=null;if(!s||"string"!==s.type&&"number"!==s.type&&"boolean"!==s.type&&"null"!==s.type){var l=a-c.length;l>0&&'"'===e.getText()[l-1]&&l--,f=o.create(e.positionAt(l),t)}else f=o.create(e.positionAt(s.offset),e.positionAt(s.offset+s.length));var h={},p={add:function(e){var t=h[e.label];t?t.documentation||(t.documentation=e.documentation):(h[e.label]=e,f&&(e.textEdit=u.replace(f,e.insertText)),i.items.push(e))},setAsIncomplete:function(){i.isIncomplete=!0},error:function(e){console.error(e)},log:function(e){console.log(e)},getNumberOfProposals:function(){return i.items.length}};return this.schemaService.getSchemaForResource(e.uri,r).then(function(t){var o=[],u=!0,l="",d=null;if(s&&"string"===s.type){var m=s.parent;m&&"property"===m.type&&m.keyNode===s&&(u=!m.valueNode,d=m,l=e.getText().substr(s.offset+1,s.length-2),m&&(s=m.parent))}if(s&&"object"===s.type){if(s.offset===a)return i;s.properties.forEach(function(e){d&&d===e||(h[e.keyNode.value]=y.create("__"))});var b="";u&&(b=n.evaluateSeparatorAfter(e,e.offsetAt(f.end))),t?n.getPropertyCompletions(t,r,s,u,b,p):n.getSchemaLessPropertyCompletions(r,s,l,p);var x=ve(s);n.contributions.forEach(function(t){var r=t.collectPropertyCompletions(e.uri,x,c,u,""===b,p);r&&o.push(r)}),!t&&c.length>0&&'"'!==e.getText().charAt(a-c.length-1)&&p.add({kind:g.Property,label:n.getLabelForValue(c),insertText:n.getInsertTextForProperty(c,null,!1,b),insertTextFormat:v.Snippet,documentation:""})}var S={};return t?n.getValueCompletions(t,r,s,a,e,p,S):n.getSchemaLessValueCompletions(r,s,a,e,p),n.contributions.length>0&&n.getContributedValueCompletions(r,s,a,e,p,o),n.promise.all(o).then(function(){if(0===p.getNumberOfProposals()){var t=a;!s||"string"!==s.type&&"number"!==s.type&&"boolean"!==s.type&&"null"!==s.type||(t=s.offset+s.length);var r=n.evaluateSeparatorAfter(e,t);n.addFillerValueCompletions(S,r,p)}return i})})},e.prototype.getPropertyCompletions=function(e,t,r,n,o,i){var a=this;t.getMatchingSchemas(e.schema,r.offset).forEach(function(e){if(e.node===r&&!e.inverted){var t=e.schema.properties;t&&Object.keys(t).forEach(function(e){var r=t[e];if("object"==typeof r&&!r.deprecationMessage&&!r.doNotSuggest){var s={kind:g.Property,label:e,insertText:a.getInsertTextForProperty(e,r,n,o),insertTextFormat:v.Snippet,filterText:a.getFilterTextForValue(e),documentation:r.description||""};Oe(s.insertText,"$1"+o)&&(s.command={title:"Suggest",command:"editor.action.triggerSuggest"}),i.add(s)}})}})},e.prototype.getSchemaLessPropertyCompletions=function(e,t,r,n){var o=this,i=function(e){e.properties.forEach(function(e){var t=e.keyNode.value;n.add({kind:g.Property,label:t,insertText:o.getInsertTextForValue(t,""),insertTextFormat:v.Snippet,filterText:o.getFilterTextForValue(t),documentation:""})})};if(t.parent)if("property"===t.parent.type){var a=t.parent.keyNode.value;e.visit(function(e){return"property"===e.type&&e!==t.parent&&e.keyNode.value===a&&e.valueNode&&"object"===e.valueNode.type&&i(e.valueNode),!0})}else"array"===t.parent.type&&t.parent.items.forEach(function(e){"object"===e.type&&e!==t&&i(e)});else"object"===t.type&&n.add({kind:g.Property,label:"$schema",insertText:this.getInsertTextForProperty("$schema",null,!0,""),insertTextFormat:v.Snippet,documentation:"",filterText:this.getFilterTextForValue("$schema")})},e.prototype.getSchemaLessValueCompletions=function(e,t,r,n,o){var i=this,a=r;if(!t||"string"!==t.type&&"number"!==t.type&&"boolean"!==t.type&&"null"!==t.type||(a=t.offset+t.length,t=t.parent),!t)return o.add({kind:this.getSuggestionKind("object"),label:"Empty object",insertText:this.getInsertTextForValue({},""),insertTextFormat:v.Snippet,documentation:""}),void o.add({kind:this.getSuggestionKind("array"),label:"Empty array",insertText:this.getInsertTextForValue([],""),insertTextFormat:v.Snippet,documentation:""});var s=this.evaluateSeparatorAfter(n,a),c=function(e){ye(e.parent,r,!0)||o.add({kind:i.getSuggestionKind(e.type),label:i.getLabelTextForMatchingNode(e,n),insertText:i.getInsertTextForMatchingNode(e,n,s),insertTextFormat:v.Snippet,documentation:""}),"boolean"===e.type&&i.addBooleanValueCompletion(!e.value,s,o)};if("property"===t.type&&r>t.colonOffset){var u=t.valueNode;if(u&&(r>u.offset+u.length||"object"===u.type||"array"===u.type))return;var f=t.keyNode.value;e.visit(function(e){return"property"===e.type&&e.keyNode.value===f&&e.valueNode&&c(e.valueNode),!0}),"$schema"===f&&t.parent&&!t.parent.parent&&this.addDollarSchemaCompletions(s,o)}if("array"===t.type)if(t.parent&&"property"===t.parent.type){var l=t.parent.keyNode.value;e.visit(function(e){var t=e;return"property"===e.type&&t.keyNode.value===l&&t.valueNode&&"array"===t.valueNode.type&&t.valueNode.items.forEach(c),!0})}else t.items.forEach(c)},e.prototype.getValueCompletions=function(e,t,r,n,o,i,a){var s=this,c=n,u=null,f=null;if(!r||"string"!==r.type&&"number"!==r.type&&"boolean"!==r.type&&"null"!==r.type||(c=r.offset+r.length,f=r,r=r.parent),r){if("property"===r.type&&n>r.colonOffset){var l=r.valueNode;if(l&&n>l.offset+l.length)return;u=r.keyNode.value,r=r.parent}if(r&&(null!==u||"array"===r.type)){var h=this.evaluateSeparatorAfter(o,c);t.getMatchingSchemas(e.schema,r.offset,f).forEach(function(e){if(e.node===r&&!e.inverted&&e.schema){if("array"===r.type&&e.schema.items)if(Array.isArray(e.schema.items)){var t=s.findItemAtOffset(r,o,n);t<e.schema.items.length&&s.addSchemaValueCompletions(e.schema.items[t],h,i,a)}else s.addSchemaValueCompletions(e.schema.items,h,i,a);if(e.schema.properties){var c=e.schema.properties[u];c&&s.addSchemaValueCompletions(c,h,i,a)}}}),"$schema"!==u||r.parent||this.addDollarSchemaCompletions(h,i),a.boolean&&(this.addBooleanValueCompletion(!0,h,i),this.addBooleanValueCompletion(!1,h,i)),a.null&&this.addNullValueCompletion(h,i)}}else this.addSchemaValueCompletions(e.schema,"",i,a)},e.prototype.getContributedValueCompletions=function(e,t,r,n,o,i){if(t){if("string"!==t.type&&"number"!==t.type&&"boolean"!==t.type&&"null"!==t.type||(t=t.parent),"property"===t.type&&r>t.colonOffset){var a=t.keyNode.value,s=t.valueNode;if(!s||r<=s.offset+s.length){var c=ve(t.parent);this.contributions.forEach(function(e){var t=e.collectValueCompletions(n.uri,c,a,o);t&&i.push(t)})}}}else this.contributions.forEach(function(e){var t=e.collectDefaultCompletions(n.uri,o);t&&i.push(t)})},e.prototype.addSchemaValueCompletions=function(e,t,r,n){var o=this;"object"==typeof e&&(this.addEnumValueCompletions(e,t,r),this.addDefaultValueCompletions(e,t,r),this.collectTypes(e,n),Array.isArray(e.allOf)&&e.allOf.forEach(function(e){return o.addSchemaValueCompletions(e,t,r,n)}),Array.isArray(e.anyOf)&&e.anyOf.forEach(function(e){return o.addSchemaValueCompletions(e,t,r,n)}),Array.isArray(e.oneOf)&&e.oneOf.forEach(function(e){return o.addSchemaValueCompletions(e,t,r,n)}))},e.prototype.addDefaultValueCompletions=function(e,t,r,n){var o=this;void 0===n&&(n=0);var i=!1;if(e.default){for(var a=e.type,s=e.default,c=n;c>0;c--)s=[s],a="array";r.add({kind:this.getSuggestionKind(a),label:this.getLabelForValue(s),insertText:this.getInsertTextForValue(s,t),insertTextFormat:v.Snippet,detail:ke("json.suggest.default","Default value")}),i=!0}Array.isArray(e.defaultSnippets)&&e.defaultSnippets.forEach(function(a){var s,c,u=e.type,f=a.body,l=a.label;if(void 0!==f){e.type;for(var h=n;h>0;h--)f=[f],"array";s=o.getInsertTextForSnippetValue(f,t),c=o.getFilterTextForSnippetValue(f),l=l||o.getLabelForSnippetValue(f)}else if("string"==typeof a.bodyText){var p="",d="",m="";for(h=n;h>0;h--)p=p+m+"[\n",d=d+"\n"+m+"]",m+="\t",u="array";s=p+m+a.bodyText.split("\n").join("\n"+m)+d+t,l=l||s,c=s.replace(/[\n]/g,"")}r.add({kind:o.getSuggestionKind(u),label:l,documentation:a.description,insertText:s,insertTextFormat:v.Snippet,filterText:c}),i=!0}),i||"object"!=typeof e.items||Array.isArray(e.items)||this.addDefaultValueCompletions(e.items,t,r,n+1)},e.prototype.addEnumValueCompletions=function(e,t,r){if(Array.isArray(e.enum))for(var n=0,o=e.enum.length;n<o;n++){var i=e.enum[n],a=e.description;e.enumDescriptions&&n<e.enumDescriptions.length&&(a=e.enumDescriptions[n]),r.add({kind:this.getSuggestionKind(e.type),label:this.getLabelForValue(i),insertText:this.getInsertTextForValue(i,t),insertTextFormat:v.Snippet,documentation:a})}},e.prototype.collectTypes=function(e,t){if(!Array.isArray(e.enum)){var r=e.type;Array.isArray(r)?r.forEach(function(e){return t[e]=!0}):t[r]=!0}},e.prototype.addFillerValueCompletions=function(e,t,r){e.object&&r.add({kind:this.getSuggestionKind("object"),label:"{}",insertText:this.getInsertTextForGuessedValue({},t),insertTextFormat:v.Snippet,detail:ke("defaults.object","New object"),documentation:""}),e.array&&r.add({kind:this.getSuggestionKind("array"),label:"[]",insertText:this.getInsertTextForGuessedValue([],t),insertTextFormat:v.Snippet,detail:ke("defaults.array","New array"),documentation:""})},e.prototype.addBooleanValueCompletion=function(e,t,r){r.add({kind:this.getSuggestionKind("boolean"),label:e?"true":"false",insertText:this.getInsertTextForValue(e,t),insertTextFormat:v.Snippet,documentation:""})},e.prototype.addNullValueCompletion=function(e,t){t.add({kind:this.getSuggestionKind("null"),label:"null",insertText:"null"+e,insertTextFormat:v.Snippet,documentation:""})},e.prototype.addDollarSchemaCompletions=function(e,t){var r=this;this.schemaService.getRegisteredSchemaIds(function(e){return"http"===e||"https"===e}).forEach(function(n){return t.add({kind:g.Module,label:r.getLabelForValue(n),filterText:r.getFilterTextForValue(n),insertText:r.getInsertTextForValue(n,e),insertTextFormat:v.Snippet,documentation:""})})},e.prototype.getLabelForValue=function(e){var t=JSON.stringify(e);return t.length>57?t.substr(0,57).trim()+"...":t},e.prototype.getFilterTextForValue=function(e){return JSON.stringify(e)},e.prototype.getFilterTextForSnippetValue=function(e){return JSON.stringify(e).replace(/\$\{\d+:([^}]+)\}|\$\d+/g,"$1")},e.prototype.getLabelForSnippetValue=function(e){var t=JSON.stringify(e);return(t=t.replace(/\$\{\d+:([^}]+)\}|\$\d+/g,"$1")).length>57?t.substr(0,57).trim()+"...":t},e.prototype.getInsertTextForPlainText=function(e){return e.replace(/[\\\$\}]/g,"\\$&")},e.prototype.getInsertTextForValue=function(e,t){var r=JSON.stringify(e,null,"\t");return"{}"===r?"{\n\t$1\n}"+t:"[]"===r?"[\n\t$1\n]"+t:this.getInsertTextForPlainText(r+t)},e.prototype.getInsertTextForSnippetValue=function(e,t){return function e(t,r,n){if(null!==t&&"object"==typeof t){var o=r+"\t";if(Array.isArray(t)){if(0===t.length)return"[]";for(var i="[\n",a=0;a<t.length;a++)i+=o+e(t[a],o,n),a<t.length-1&&(i+=","),i+="\n";return i+=r+"]"}var s=Object.keys(t);if(0===s.length)return"{}";for(i="{\n",a=0;a<s.length;a++){var c=s[a];i+=o+JSON.stringify(c)+": "+e(t[c],o,n),a<s.length-1&&(i+=","),i+="\n"}return i+=r+"}"}return n(t)}(e,"",function(e){return"string"==typeof e&&"^"===e[0]?e.substr(1):JSON.stringify(e)})+t},e.prototype.getInsertTextForGuessedValue=function(e,t){switch(typeof e){case"object":return null===e?"${1:null}"+t:this.getInsertTextForValue(e,t);case"string":var r=JSON.stringify(e);return r=r.substr(1,r.length-2),'"${1:'+(r=this.getInsertTextForPlainText(r))+'}"'+t;case"number":case"boolean":return"${1:"+JSON.stringify(e)+"}"+t}return this.getInsertTextForValue(e,t)},e.prototype.getSuggestionKind=function(e){if(Array.isArray(e)){var t=e;e=t.length>0?t[0]:null}if(!e)return g.Value;switch(e){case"string":return g.Value;case"object":return g.Module;case"property":return g.Property;default:return g.Value}},e.prototype.getLabelTextForMatchingNode=function(e,t){switch(e.type){case"array":return"[]";case"object":return"{}";default:return t.getText().substr(e.offset,e.length)}},e.prototype.getInsertTextForMatchingNode=function(e,t,r){switch(e.type){case"array":return this.getInsertTextForValue([],r);case"object":return this.getInsertTextForValue({},r);default:var n=t.getText().substr(e.offset,e.length)+r;return this.getInsertTextForPlainText(n)}},e.prototype.getInsertTextForProperty=function(e,t,r,n){var o=this.getInsertTextForValue(e,"");if(!r)return o;var i,a=o+": ",s=0;if(t){if(Array.isArray(t.defaultSnippets)){if(1===t.defaultSnippets.length){var c=t.defaultSnippets[0].body;void 0!==c&&(i=this.getInsertTextForSnippetValue(c,""))}s+=t.defaultSnippets.length}if(t.enum&&(i||1!==t.enum.length||(i=this.getInsertTextForGuessedValue(t.enum[0],"")),s+=t.enum.length),void 0!==t.default&&(i||(i=this.getInsertTextForGuessedValue(t.default,"")),s++),0===s){var u=Array.isArray(t.type)?t.type[0]:t.type;switch(u||(t.properties?u="object":t.items&&(u="array")),u){case"boolean":i="$1";break;case"string":i='"$1"';break;case"object":i="{\n\t$1\n}";break;case"array":i="[\n\t$1\n]";break;case"number":case"integer":i="${1:0}";break;case"null":i="${1:null}";break;default:return o}}}return(!i||s>1)&&(i="$1"),a+i+n},e.prototype.getCurrentWord=function(e,t){for(var r=t-1,n=e.getText();r>=0&&-1===' \t\n\r\v":{[,]}'.indexOf(n.charAt(r));)r--;return n.substring(r+1,t)},e.prototype.evaluateSeparatorAfter=function(e,t){var r=H(e.getText(),!0);switch(r.setPosition(t),r.scan()){case 5:case 2:case 4:case 17:return"";default:return","}},e.prototype.findItemAtOffset=function(e,t,r){for(var n=H(t.getText(),!0),o=e.items,i=o.length-1;i>=0;i--){var a=o[i];if(r>a.offset+a.length)return n.setPosition(a.offset+a.length),5===n.scan()&&r>=n.getTokenOffset()+n.getTokenLength()?i+1:i;if(r>=a.offset)return i}return 0},e.prototype.isInComment=function(e,t,r){var n=H(e.getText(),!1);n.setPosition(t);for(var o=n.scan();17!==o&&n.getTokenOffset()+n.getTokenLength()<r;)o=n.scan();return(12===o||13===o)&&n.getTokenOffset()<=r},e}(),Ae=function(){function e(e,t,r){void 0===t&&(t=[]),this.schemaService=e,this.contributions=t,this.promise=r||Promise}return e.prototype.doHover=function(e,t,r){var n=e.offsetAt(t),i=r.getNodeFromOffset(n);if(!i||("object"===i.type||"array"===i.type)&&n>i.offset+1&&n<i.offset+i.length-1)return this.promise.resolve(null);var a=i;if("string"===i.type){var s=i.parent;if("property"===s.type&&s.keyNode===i&&!(i=s.valueNode))return this.promise.resolve(null)}for(var c=o.create(e.positionAt(a.offset),e.positionAt(a.offset+a.length)),u=function(e){return{contents:e,range:c}},f=ve(i),l=this.contributions.length-1;l>=0;l--){var h=this.contributions[l].getInfoContribution(e.uri,f);if(h)return h.then(function(e){return u(e)})}return this.schemaService.getSchemaForResource(e.uri,r).then(function(e){if(e){var t=null,n=null,o=null,a=null;r.getMatchingSchemas(e.schema,i.offset).every(function(e){if(e.node===i&&!e.inverted&&e.schema&&(t=t||e.schema.title,n=n||e.schema.markdownDescription||Ee(e.schema.description),e.schema.enum)){var r=e.schema.enum.indexOf(ge(i));e.schema.markdownEnumDescriptions?o=e.schema.markdownEnumDescriptions[r]:e.schema.enumDescriptions&&(o=Ee(e.schema.enumDescriptions[r])),o&&"string"!=typeof(a=e.schema.enum[r])&&(a=JSON.stringify(a))}return!0});var s="";return t&&(s=Ee(t)),n&&(s.length>0&&(s+="\n\n"),s+=n),o&&(s.length>0&&(s+="\n\n"),s+="`"+Ee(a)+"`: "+o),u([s])}return null})},e}();function Ee(e){if(e)return e.replace(/([^\n\r])(\r?\n)([^\n\r])/gm,"$1\n\n$3").replace(/[\\`*_{}[\]()#+\-.!]/g,"\\$&")}var Ce=K(),we=function(){function e(e,t){this.jsonSchemaService=e,this.promise=t,this.validationEnabled=!0}return e.prototype.configure=function(e){e&&(this.validationEnabled=e.validate,this.commentSeverity=e.allowComments?ee.Ignore:ee.Error)},e.prototype.doValidation=function(e,t,r){var n=this;if(!this.validationEnabled)return this.promise.resolve([]);var o=[],i={},s=function(t){if(t.severity!==ee.Ignore){var r=t.location.offset+" "+t.location.length+" "+t.message;if(!i[r]){i[r]=!0;var n={start:e.positionAt(t.location.offset),end:e.positionAt(t.location.offset+t.location.length)},s=t.severity===ee.Error?a.Error:a.Warning;o.push({severity:s,range:n,message:t.message})}}};return this.jsonSchemaService.getSchemaForResource(e.uri,t).then(function(e){var i=r?r.trailingCommas:ee.Error,a=r?r.comments:n.commentSeverity;if(e){if(e.errors.length&&t.root){var c=t.root,u="object"===c.type?c.properties[0]:null;if(u&&"$schema"===u.keyNode.value){var f=u.valueNode||u;s({location:{offset:f.offset,length:f.length},message:e.errors[0],severity:ee.Warning})}else s({location:{offset:c.offset,length:1},message:e.errors[0],severity:ee.Warning})}else{var l=t.validate(e.schema);l&&l.forEach(s)}_e(e.schema)&&(i=a=ee.Ignore)}if(t.syntaxErrors.forEach(function(e){e.code===Z.TrailingComma&&(e.severity=i),s(e)}),o.push.apply(o,t.externalDiagnostic),a!==ee.Ignore){var h=Ce("InvalidCommentToken","Comments are not permitted in JSON.");t.comments.forEach(function(e){s({location:e,severity:a,message:h})})}return o})},e}();function _e(e){if(e&&"object"==typeof e){if(e.allowComments)return!0;if(e.allOf)return e.allOf.some(_e)}return!1}var Ie=48,Pe=57,je=65,Ve=97,Me=102;function Ne(e){return e<Ie?0:e<=Pe?e-Ie:(e<Ve&&(e+=Ve-je),e>=Ve&&e<=Me?e-Ve+10:0)}function Fe(e){if("#"!==e[0])return null;switch(e.length){case 4:return{red:17*Ne(e.charCodeAt(1))/255,green:17*Ne(e.charCodeAt(2))/255,blue:17*Ne(e.charCodeAt(3))/255,alpha:1};case 5:return{red:17*Ne(e.charCodeAt(1))/255,green:17*Ne(e.charCodeAt(2))/255,blue:17*Ne(e.charCodeAt(3))/255,alpha:17*Ne(e.charCodeAt(4))/255};case 7:return{red:(16*Ne(e.charCodeAt(1))+Ne(e.charCodeAt(2)))/255,green:(16*Ne(e.charCodeAt(3))+Ne(e.charCodeAt(4)))/255,blue:(16*Ne(e.charCodeAt(5))+Ne(e.charCodeAt(6)))/255,alpha:1};case 9:return{red:(16*Ne(e.charCodeAt(1))+Ne(e.charCodeAt(2)))/255,green:(16*Ne(e.charCodeAt(3))+Ne(e.charCodeAt(4)))/255,blue:(16*Ne(e.charCodeAt(5))+Ne(e.charCodeAt(6)))/255,alpha:(16*Ne(e.charCodeAt(7))+Ne(e.charCodeAt(8)))/255}}return null}var De=function(){function e(e){this.schemaService=e}return e.prototype.findDocumentSymbols=function(e,t){var r=this,n=t.root;if(!n)return null;var a=e.uri;if(("vscode://defaultsettings/keybindings.json"===a||Oe(a.toLowerCase(),"/user/keybindings.json"))&&"array"===n.type){var s=[];return n.items.forEach(function(t){if("object"===t.type)for(var r=0,n=t.properties;r<n.length;r++){var a=n[r];if("key"===a.keyNode.value){if(a.valueNode){var c=i.create(e.uri,o.create(e.positionAt(t.offset),e.positionAt(t.offset+t.length)));s.push({name:ge(a.valueNode),kind:A.Function,location:c})}return}}}),s}var c=function(t,n,a){return"array"===n.type?n.items.forEach(function(e){return c(t,e,a)}):"object"===n.type&&n.properties.forEach(function(n){var s=i.create(e.uri,o.create(e.positionAt(n.offset),e.positionAt(n.offset+n.length))),u=n.valueNode;if(u){var f=a?a+"."+n.keyNode.value:n.keyNode.value;t.push({name:n.keyNode.value,kind:r.getSymbolKind(u.type),location:s,containerName:a}),c(t,u,f)}}),t};return c([],n,void 0)},e.prototype.getSymbolKind=function(e){switch(e){case"object":return A.Module;case"string":return A.String;case"number":return A.Number;case"array":return A.Array;case"boolean":return A.Boolean;default:return A.Variable}},e.prototype.findDocumentColors=function(e,t){return this.schemaService.getSchemaForResource(e.uri,t).then(function(r){var n=[];if(r)for(var i={},a=0,s=t.getMatchingSchemas(r.schema);a<s.length;a++){var c=s[a];if(!c.inverted&&c.schema&&("color"===c.schema.format||"color-hex"===c.schema.format)&&c.node&&"string"===c.node.type){var u=String(c.node.offset);if(!i[u]){var f=Fe(ge(c.node));if(f){var l=o.create(e.positionAt(c.node.offset),e.positionAt(c.node.offset+c.node.length));n.push({color:f,range:l})}i[u]=!0}}}return n})},e.prototype.getColorPresentations=function(e,t,r,n){var o,i=[],a=Math.round(255*r.red),s=Math.round(255*r.green),c=Math.round(255*r.blue);function f(e){var t=e.toString(16);return 2!==t.length?"0"+t:t}return o=1===r.alpha?"#"+f(a)+f(s)+f(c):"#"+f(a)+f(s)+f(c)+f(Math.round(255*r.alpha)),i.push({label:o,textEdit:u.replace(n,JSON.stringify(o))}),i},e}(),We=K(),Re={schemaAssociations:{},schemas:{"http://json-schema.org/draft-04/schema#":{title:We("schema.json","Describes a JSON file using a schema. See json-schema.org for more info."),$schema:"http://json-schema.org/draft-04/schema#",definitions:{schemaArray:{type:"array",minItems:1,items:{$ref:"#"}},positiveInteger:{type:"integer",minimum:0},positiveIntegerDefault0:{allOf:[{$ref:"#/definitions/positiveInteger"},{default:0}]},simpleTypes:{type:"string",enum:["array","boolean","integer","null","number","object","string"]},stringArray:{type:"array",items:{type:"string"},minItems:1,uniqueItems:!0}},type:"object",properties:{id:{type:"string",format:"uri",description:We("schema.json.id","A unique identifier for the schema.")},$schema:{type:"string",format:"uri",description:We("schema.json.$schema","The schema to verify this document against ")},title:{type:"string",description:We("schema.json.title","A descriptive title of the element")},description:{type:"string",description:We("schema.json.description","A long description of the element. Used in hover menus and suggestions.")},default:{description:We("schema.json.default","A default value. Used by suggestions.")},multipleOf:{type:"number",minimum:0,exclusiveMinimum:!0,description:We("schema.json.multipleOf","A number that should cleanly divide the current value (i.e. have no remainder)")},maximum:{type:"number",description:We("schema.json.maximum","The maximum numerical value, inclusive by default.")},exclusiveMaximum:{type:"boolean",default:!1,description:We("schema.json.exclusiveMaximum","Makes the maximum property exclusive.")},minimum:{type:"number",description:We("schema.json.minimum","The minimum numerical value, inclusive by default.")},exclusiveMinimum:{type:"boolean",default:!1,description:We("schema.json.exclusiveMininum","Makes the minimum property exclusive.")},maxLength:{allOf:[{$ref:"#/definitions/positiveInteger"}],description:We("schema.json.maxLength","The maximum length of a string.")},minLength:{allOf:[{$ref:"#/definitions/positiveIntegerDefault0"}],description:We("schema.json.minLength","The minimum length of a string.")},pattern:{type:"string",format:"regex",description:We("schema.json.pattern","A regular expression to match the string against. It is not implicitly anchored.")},additionalItems:{anyOf:[{type:"boolean"},{$ref:"#"}],default:{},description:We("schema.json.additionalItems","For arrays, only when items is set as an array. If it is a schema, then this schema validates items after the ones specified by the items array. If it is false, then additional items will cause validation to fail.")},items:{anyOf:[{$ref:"#"},{$ref:"#/definitions/schemaArray"}],default:{},description:We("schema.json.items","For arrays. Can either be a schema to validate every element against or an array of schemas to validate each item against in order (the first schema will validate the first element, the second schema will validate the second element, and so on.")},maxItems:{allOf:[{$ref:"#/definitions/positiveInteger"}],description:We("schema.json.maxItems","The maximum number of items that can be inside an array. Inclusive.")},minItems:{allOf:[{$ref:"#/definitions/positiveIntegerDefault0"}],description:We("schema.json.minItems","The minimum number of items that can be inside an array. Inclusive.")},uniqueItems:{type:"boolean",default:!1,description:We("schema.json.uniqueItems","If all of the items in the array must be unique. Defaults to false.")},maxProperties:{allOf:[{$ref:"#/definitions/positiveInteger"}],description:We("schema.json.maxProperties","The maximum number of properties an object can have. Inclusive.")},minProperties:{allOf:[{$ref:"#/definitions/positiveIntegerDefault0"}],description:We("schema.json.minProperties","The minimum number of properties an object can have. Inclusive.")},required:{allOf:[{$ref:"#/definitions/stringArray"}],description:We("schema.json.required","An array of strings that lists the names of all properties required on this object.")},additionalProperties:{anyOf:[{type:"boolean"},{$ref:"#"}],default:{},description:We("schema.json.additionalProperties","Either a schema or a boolean. If a schema, then used to validate all properties not matched by 'properties' or 'patternProperties'. If false, then any properties not matched by either will cause this schema to fail.")},definitions:{type:"object",additionalProperties:{$ref:"#"},default:{},description:We("schema.json.definitions","Not used for validation. Place subschemas here that you wish to reference inline with $ref")},properties:{type:"object",additionalProperties:{$ref:"#"},default:{},description:We("schema.json.properties","A map of property names to schemas for each property.")},patternProperties:{type:"object",additionalProperties:{$ref:"#"},default:{},description:We("schema.json.patternProperties","A map of regular expressions on property names to schemas for matching properties.")},dependencies:{type:"object",additionalProperties:{anyOf:[{$ref:"#"},{$ref:"#/definitions/stringArray"}]},description:We("schema.json.dependencies","A map of property names to either an array of property names or a schema. An array of property names means the property named in the key depends on the properties in the array being present in the object in order to be valid. If the value is a schema, then the schema is only applied to the object if the property in the key exists on the object.")},enum:{type:"array",minItems:1,uniqueItems:!0,description:We("schema.json.enum","The set of literal values that are valid")},type:{anyOf:[{$ref:"#/definitions/simpleTypes"},{type:"array",items:{$ref:"#/definitions/simpleTypes"},minItems:1,uniqueItems:!0}],description:We("schema.json.type","Either a string of one of the basic schema types (number, integer, null, array, object, boolean, string) or an array of strings specifying a subset of those types.")},format:{anyOf:[{type:"string",description:We("schema.json.format","Describes the format expected for the value."),enum:["date-time","uri","email","hostname","ipv4","ipv6","regex"]},{type:"string"}]},allOf:{allOf:[{$ref:"#/definitions/schemaArray"}],description:We("schema.json.allOf","An array of schemas, all of which must match.")},anyOf:{allOf:[{$ref:"#/definitions/schemaArray"}],description:We("schema.json.anyOf","An array of schemas, where at least one must match.")},oneOf:{allOf:[{$ref:"#/definitions/schemaArray"}],description:We("schema.json.oneOf","An array of schemas, exactly one of which must match.")},not:{allOf:[{$ref:"#"}],description:We("schema.json.not","A schema which must not match.")}},dependencies:{exclusiveMaximum:["maximum"],exclusiveMinimum:["minimum"]},default:{}}}},$e=K(),Ue=function(){function e(e){try{this.patternRegExp=new RegExp(function(e){return e.replace(/[\-\\\{\}\+\?\|\^\$\.\,\[\]\(\)\#\s]/g,"\\$&").replace(/[\*]/g,".*")}(e)+"$")}catch(e){this.patternRegExp=null}this.schemas=[]}return e.prototype.addSchema=function(e){this.schemas.push(e)},e.prototype.matchesPattern=function(e){return this.patternRegExp&&this.patternRegExp.test(e)},e.prototype.getSchemas=function(){return this.schemas},e}(),Le=function(){function e(e,t,r){this.service=e,this.url=t,r&&(this.unresolvedSchema=this.service.promise.resolve(new qe(r)))}return e.prototype.getUnresolvedSchema=function(){return this.unresolvedSchema||(this.unresolvedSchema=this.service.loadSchema(this.url)),this.unresolvedSchema},e.prototype.getResolvedSchema=function(){var e=this;return this.resolvedSchema||(this.resolvedSchema=this.getUnresolvedSchema().then(function(t){return e.service.resolveSchemaContent(t,e.url)})),this.resolvedSchema},e.prototype.clearSchema=function(){this.resolvedSchema=null,this.unresolvedSchema=null},e}(),qe=function(){return function(e,t){void 0===t&&(t=[]),this.schema=e,this.errors=t}}(),He=function(){function e(e,t){void 0===t&&(t=[]),this.schema=e,this.errors=t}return e.prototype.getSection=function(e){return he(this.getSectionRecursive(e,this.schema))},e.prototype.getSectionRecursive=function(e,t){var r=this;if(!t||"boolean"==typeof t||0===e.length)return t;var n=e.shift();if(t.properties&&(t.properties[n],1))return this.getSectionRecursive(e,t.properties[n]);if(t.patternProperties)Object.keys(t.patternProperties).forEach(function(o){if(new RegExp(o).test(n))return r.getSectionRecursive(e,t.patternProperties[o])});else{if("object"==typeof t.additionalProperties)return this.getSectionRecursive(e,t.additionalProperties);if(n.match("[0-9]+"))if(Array.isArray(t.items)){var o=parseInt(n,10);if(!isNaN(o)&&t.items[o])return this.getSectionRecursive(e,t.items[o])}else if(t.items)return this.getSectionRecursive(e,t.items)}return null},e}(),Be=function(){function e(e,t,r){this.contextService=t,this.requestService=e,this.promiseConstructor=r||Promise,this.callOnDispose=[],this.contributionSchemas={},this.contributionAssociations={},this.schemasById={},this.filePatternAssociations=[],this.filePatternAssociationById={},this.registeredSchemasIds={}}return e.prototype.getRegisteredSchemaIds=function(e){return Object.keys(this.registeredSchemasIds).filter(function(t){var r=X.a.parse(t).scheme;return"schemaservice"!==r&&(!e||e(r))})},Object.defineProperty(e.prototype,"promise",{get:function(){return this.promiseConstructor},enumerable:!0,configurable:!0}),e.prototype.dispose=function(){for(;this.callOnDispose.length>0;)this.callOnDispose.pop()()},e.prototype.onResourceChange=function(e){e=this.normalizeId(e);var t=this.schemasById[e];return!!t&&(t.clearSchema(),!0)},e.prototype.normalizeId=function(e){return X.a.parse(e).toString()},e.prototype.setSchemaContributions=function(e){var t=this;if(e.schemas){var r=e.schemas;for(var n in r){var o=this.normalizeId(n);this.contributionSchemas[o]=this.addSchemaHandle(o,r[n])}}if(e.schemaAssociations){var i=e.schemaAssociations;for(var a in i){var s=i[a];this.contributionAssociations[a]=s;var c=this.getOrAddFilePatternAssociation(a);s.forEach(function(e){var r=t.normalizeId(e);c.addSchema(r)})}}},e.prototype.addSchemaHandle=function(e,t){var r=new Le(this,e,t);return this.schemasById[e]=r,r},e.prototype.getOrAddSchemaHandle=function(e,t){return this.schemasById[e]||this.addSchemaHandle(e,t)},e.prototype.getOrAddFilePatternAssociation=function(e){var t=this.filePatternAssociationById[e];return t||(t=new Ue(e),this.filePatternAssociationById[e]=t,this.filePatternAssociations.push(t)),t},e.prototype.registerExternalSchema=function(e,t,r){var n=this;void 0===t&&(t=null);var o=this.normalizeId(e);return this.registeredSchemasIds[o]=!0,t&&t.forEach(function(e){n.getOrAddFilePatternAssociation(e).addSchema(o)}),r?this.addSchemaHandle(o,r):this.getOrAddSchemaHandle(o)},e.prototype.clearExternalSchemas=function(){var e=this;for(var t in this.schemasById={},this.filePatternAssociations=[],this.filePatternAssociationById={},this.registeredSchemasIds={},this.contributionSchemas)this.schemasById[t]=this.contributionSchemas[t],this.registeredSchemasIds[t]=!0;for(var r in this.contributionAssociations){var n=this.getOrAddFilePatternAssociation(r);this.contributionAssociations[r].forEach(function(t){var r=e.normalizeId(t);n.addSchema(r)})}},e.prototype.getResolvedSchema=function(e){var t=this.normalizeId(e),r=this.schemasById[t];return r?r.getResolvedSchema():this.promise.resolve(null)},e.prototype.loadSchema=function(e){if(!this.requestService){var t=$e("json.schema.norequestservice","Unable to load schema from '{0}'. No schema request service available",Je(e));return this.promise.resolve(new qe({},[t]))}return this.requestService(e).then(function(t){if(!t){var r=$e("json.schema.nocontent","Unable to load schema from '{0}': No content.",Je(e));return new qe({},[r])}var n,o=[];n=B(t,o);var i=o.length?[$e("json.schema.invalidFormat","Unable to parse content from '{0}': Parse error at offset {1}.",Je(e),o[0].offset)]:[];return new qe(n,i)},function(t){var r=$e("json.schema.unabletoload","Unable to load schema from '{0}': {1}",Je(e),t.toString());return new qe({},[r])})},e.prototype.resolveSchemaContent=function(e,t){var r=this,n=e.errors.slice(0),o=e.schema,i=this.contextService,a=function(e,t,r,o){var i=function(e,t){if(!t)return e;var r=e;return"/"===t[0]&&(t=t.substr(1)),t.split("/").some(function(e){return!(r=r[e])}),r}(t,o);if(i)for(var a in i)i.hasOwnProperty(a)&&!e.hasOwnProperty(a)&&(e[a]=i[a]);else n.push($e("json.schema.invalidref","$ref '{0}' in '{1}' can not be resolved.",o,r))},s=function(e,t,o,s){return i&&!/^\w+:\/\/.*/.test(t)&&(t=i.resolveRelativePath(t,s)),t=r.normalizeId(t),r.getOrAddSchemaHandle(t).getUnresolvedSchema().then(function(r){if(r.errors.length){var i=o?t+"#"+o:t;n.push($e("json.schema.problemloadingref","Problems loading reference '{0}': {1}",i,r.errors[0]))}return a(e,r.schema,t,o),c(e,r.schema,t)})},c=function(e,t,n){if(!e||"object"!=typeof e)return Promise.resolve(null);for(var o=[e],i=[],c=[],u=function(e){for(;e.$ref;){var r=e.$ref.split("#",2);if(delete e.$ref,r[0].length>0)return void c.push(s(e,r[0],r[1],n));a(e,t,n,r[1])}!function(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];for(var r=0,n=e;r<n.length;r++){var i=n[r];"object"==typeof i&&o.push(i)}}(e.items,e.additionalProperties,e.not,e.contains,e.propertyNames),function(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];for(var r=0,n=e;r<n.length;r++){var i=n[r];if("object"==typeof i)for(var a in i){var s=i[a];"object"==typeof s&&o.push(s)}}}(e.definitions,e.properties,e.patternProperties,e.dependencies),function(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];for(var r=0,n=e;r<n.length;r++){var i=n[r];if(Array.isArray(i))for(var a=0,s=i;a<s.length;a++){var c=s[a];"object"==typeof c&&o.push(c)}}}(e.anyOf,e.allOf,e.oneOf,e.items)};o.length;){var f=o.pop();i.indexOf(f)>=0||(i.push(f),u(f))}return r.promise.all(c)};return c(o,o,t).then(function(e){return new He(o,n)})},e.prototype.getSchemaForResource=function(e,t){if(t&&t.root&&"object"===t.root.type){var r=t.root.properties.filter(function(e){return"$schema"===e.keyNode.value&&e.valueNode&&"string"===e.valueNode.type});if(r.length>0){var n=ge(r[0].valueNode);if(n&&function(e,t){if(e.length<t.length)return!1;for(var r=0;r<t.length;r++)if(e[r]!==t[r])return!1;return!0}(n,".")&&this.contextService&&(n=this.contextService.resolveRelativePath(n,e)),n){var o=this.normalizeId(n);return this.getOrAddSchemaHandle(o).getResolvedSchema()}}}for(var i=Object.create(null),a=[],s=0,c=this.filePatternAssociations;s<c.length;s++){var u=c[s];if(u.matchesPattern(e))for(var f=0,l=u.getSchemas();f<l.length;f++){var h=l[f];i[h]||(a.push(h),i[h]=!0)}}return a.length>0?this.createCombinedSchema(e,a).getResolvedSchema():this.promise.resolve(null)},e.prototype.createCombinedSchema=function(e,t){if(1===t.length)return this.getOrAddSchemaHandle(t[0]);var r="schemaservice://combinedSchema/"+encodeURIComponent(e),n={allOf:t.map(function(e){return{$ref:e}})};return this.addSchemaHandle(r,n)},e}();function Je(e){try{var t=X.a.parse(e);if("file"===t.scheme)return t.fsPath}catch(e){}return e}function ze(e){var t=e.promiseConstructor||Promise,r=new Be(e.schemaRequestService,e.workspaceContext,t);r.setSchemaContributions(Re);var n=new Te(r,e.contributions,t),i=new Ae(r,e.contributions,t),a=new De(r),s=new we(r,t);return{configure:function(e){r.clearExternalSchemas(),e.schemas&&e.schemas.forEach(function(e){r.registerExternalSchema(e.uri,e.fileMatch,e.schema)}),s.configure(e)},resetSchema:function(e){return r.onResourceChange(e)},doValidation:s.doValidation.bind(s),parseJSONDocument:function(e){return Se(e,{collectComments:!0})},newJSONDocument:function(e,t){return function(e,t){return void 0===t&&(t=[]),new be(e,[],[],t)}(e,t)},doResolve:n.doResolve.bind(n),doComplete:n.doComplete.bind(n),findDocumentSymbols:a.findDocumentSymbols.bind(a),findColorSymbols:function(e,t){return a.findDocumentColors(e,t).then(function(e){return e.map(function(e){return e.range})})},findDocumentColors:a.findDocumentColors.bind(a),getColorPresentations:a.getColorPresentations.bind(a),doHover:i.doHover.bind(i),format:function(e,t,r){var n=void 0;if(t){var i=e.offsetAt(t.start);n={offset:i,length:e.offsetAt(t.end)-i}}var a={tabSize:r?r.tabSize:4,insertSpaces:!r||r.insertSpaces,eol:"\n"};return function(e,t,r){return $(e,t,r)}(e.getText(),n,a).map(function(t){return u.replace(o.create(e.positionAt(t.offset),e.positionAt(t.offset+t.length)),t.content)})}}}var Ke=monaco.Promise,Ge=function(){function e(e){this.wrapped=new monaco.Promise(e)}return e.prototype.then=function(e,t){return this.wrapped.then(e,t)},e.prototype.getWrapped=function(){return this.wrapped},e.prototype.cancel=function(){this.wrapped.cancel()},e.resolve=function(e){return monaco.Promise.as(e)},e.reject=function(e){return monaco.Promise.wrapError(e)},e.all=function(e){return monaco.Promise.join(e)},e}(),Ze=function(){function e(e,t){this._ctx=e,this._languageSettings=t.languageSettings,this._languageId=t.languageId,this._languageService=ze({promiseConstructor:Ge}),this._languageService.configure(this._languageSettings)}return e.prototype.doValidation=function(e){var t=this._getTextDocument(e);if(t){var r=this._languageService.parseJSONDocument(t);return this._languageService.doValidation(t,r)}return Ke.as([])},e.prototype.doComplete=function(e,t){var r=this._getTextDocument(e),n=this._languageService.parseJSONDocument(r);return this._languageService.doComplete(r,t,n)},e.prototype.doResolve=function(e){return this._languageService.doResolve(e)},e.prototype.doHover=function(e,t){var r=this._getTextDocument(e),n=this._languageService.parseJSONDocument(r);return this._languageService.doHover(r,t,n)},e.prototype.format=function(e,t,r){var n=this._getTextDocument(e),o=this._languageService.format(n,t,r);return Ke.as(o)},e.prototype.resetSchema=function(e){return Ke.as(this._languageService.resetSchema(e))},e.prototype.findDocumentSymbols=function(e){var t=this._getTextDocument(e),r=this._languageService.parseJSONDocument(t),n=this._languageService.findDocumentSymbols(t,r);return Ke.as(n)},e.prototype.findDocumentColors=function(e){var t=this._getTextDocument(e),r=this._languageService.parseJSONDocument(t),n=this._languageService.findDocumentColors(t,r);return Ke.as(n)},e.prototype.getColorPresentations=function(e,t,r){var n=this._getTextDocument(e),o=this._languageService.parseJSONDocument(n),i=this._languageService.getColorPresentations(n,o,t,r);return Ke.as(i)},e.prototype._getTextDocument=function(e){for(var t=0,r=this._ctx.getMirrorModels();t<r.length;t++){var n=r[t];if(n.uri.toString()===e)return j.create(e,this._languageId,n.version,n.getValue())}return null},e}();self.onmessage=function(){l.initialize(function(e,t){return new Ze(e,t)})}}]);
|
||
models.rs
|
mod definitions;
mod requests;
pub mod swagger;
pub use self::definitions::*;
pub use self::requests::*;
use reqwest::Method;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
pub trait Request: Serialize {
const METHOD: Method;
const SIGNED: bool = false;
const ENDPOINT: &'static str;
const HAS_PAYLOAD: bool = true;
type Response: DeserializeOwned;
#[inline]
fn no_payload(&self) -> bool {
!Self::HAS_PAYLOAD
}
}
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum Side {
Buy,
Sell,
#[serde(rename = "")]
Unknown, // BitMEX sometimes has empty side due to unknown reason
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum BinSize {
#[serde(rename = "1m")]
M1,
#[serde(rename = "5m")]
M5,
#[serde(rename = "1h")]
H1,
#[serde(rename = "1d")]
D1,
}
impl Default for BinSize {
fn default() -> Self {
self::BinSize::D1
}
}
/// http://fixwiki.org/fixwiki/PegPriceType
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum PegPriceType {
LastPeg,
OpeningPeg,
MidPricePeg,
MarketPeg,
PrimaryPeg,
PegToVWAP,
TrailingStopPeg,
PegToLimitPrice,
ShortSaleMinPricePeg,
#[serde(rename = "")]
Unknown, // BitMEX sometimes has empty due to unknown reason
}
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum OrdType {
Market,
Limit,
Stop,
StopLimit,
MarketIfTouched,
LimitIfTouched,
MarketWithLeftOverAsLimit,
Pegged,
}
/// https://www.onixs.biz/fix-dictionary/5.0.SP2/tagNum_59.html
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum TimeInForce {
Day,
GoodTillCancel,
AtTheOpening,
ImmediateOrCancel,
FillOrKill,
GoodTillCrossing,
GoodTillDate,
AtTheClose,
GoodThroughCrossing,
AtCrossing,
}
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum ExecInst {
ParticipateDoNotInitiate,
AllOrNone,
MarkPrice,
IndexPrice,
LastPrice,
Close,
ReduceOnly,
Fixed,
#[serde(rename = "")]
Unknown, // BitMEX sometimes has empty due to unknown reason
|
}
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub enum ContingencyType {
OneCancelsTheOther,
OneTriggersTheOther,
OneUpdatesTheOtherAbsolute,
OneUpdatesTheOtherProportional,
#[serde(rename = "")]
Unknown, // BitMEX sometimes has empty due to unknown reason
}
| |
v2.ts
|
/**
* Copyright 2015 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* jshint maxlen: false */
const createAPIRequest = require('../../lib/apirequest');
const utils = require('../../lib/utils');
/**
* Google Play Developer API
*
* Lets Android application developers access their Google Play accounts.
*
* @example
* const google = require('googleapis');
* const androidpublisher = google.androidpublisher('v2');
*
* @namespace androidpublisher
* @type {Function}
* @version v2
* @variation v2
* @param {object=} options Options for Androidpublisher
*/
function Androidpublisher(options) { // eslint-disable-line
const self = this;
self._options = options || {};
self.edits = {
/**
* androidpublisher.edits.commit
*
* @desc Commits/applies the changes made in this edit back to the app.
*
* @alias androidpublisher.edits.commit
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
commit: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}:commit',
method: 'POST'
}, options),
params: params,
requiredParams: ['packageName', 'editId'],
pathParams: ['editId', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
|
/**
* androidpublisher.edits.delete
*
* @desc Deletes an edit for an app. Creating a new edit will automatically delete any of your previous edits so this method need only be called if you want to preemptively abandon an edit.
*
* @alias androidpublisher.edits.delete
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
delete: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}',
method: 'DELETE'
}, options),
params: params,
requiredParams: ['packageName', 'editId'],
pathParams: ['editId', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.get
*
* @desc Returns information about the edit specified. Calls will fail if the edit is no long active (e.g. has been deleted, superseded or expired).
*
* @alias androidpublisher.edits.get
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
get: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName', 'editId'],
pathParams: ['editId', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.insert
*
* @desc Creates a new edit for an app, populated with the app's current state.
*
* @alias androidpublisher.edits.insert
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {androidpublisher(v2).AppEdit} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
insert: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits',
method: 'POST'
}, options),
params: params,
requiredParams: ['packageName'],
pathParams: ['packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.validate
*
* @desc Checks that the edit can be successfully committed. The edit's changes are not applied to the live app.
*
* @alias androidpublisher.edits.validate
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
validate: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}:validate',
method: 'POST'
}, options),
params: params,
requiredParams: ['packageName', 'editId'],
pathParams: ['editId', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
apklistings: {
/**
* androidpublisher.edits.apklistings.delete
*
* @desc Deletes the APK-specific localized listing for a specified APK and language code.
*
* @alias androidpublisher.edits.apklistings.delete
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {integer} params.apkVersionCode The APK version code whose APK-specific listings should be read or modified.
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.language The language code (a BCP-47 language tag) of the APK-specific localized listing to read or modify. For example, to select Austrian German, pass "de-AT".
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
delete: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/apks/{apkVersionCode}/listings/{language}',
method: 'DELETE'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'apkVersionCode', 'language'],
pathParams: ['apkVersionCode', 'editId', 'language', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.apklistings.deleteall
*
* @desc Deletes all the APK-specific localized listings for a specified APK.
*
* @alias androidpublisher.edits.apklistings.deleteall
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {integer} params.apkVersionCode The APK version code whose APK-specific listings should be read or modified.
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
deleteall: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/apks/{apkVersionCode}/listings',
method: 'DELETE'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'apkVersionCode'],
pathParams: ['apkVersionCode', 'editId', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.apklistings.get
*
* @desc Fetches the APK-specific localized listing for a specified APK and language code.
*
* @alias androidpublisher.edits.apklistings.get
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {integer} params.apkVersionCode The APK version code whose APK-specific listings should be read or modified.
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.language The language code (a BCP-47 language tag) of the APK-specific localized listing to read or modify. For example, to select Austrian German, pass "de-AT".
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
get: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/apks/{apkVersionCode}/listings/{language}',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'apkVersionCode', 'language'],
pathParams: ['apkVersionCode', 'editId', 'language', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.apklistings.list
*
* @desc Lists all the APK-specific localized listings for a specified APK.
*
* @alias androidpublisher.edits.apklistings.list
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {integer} params.apkVersionCode The APK version code whose APK-specific listings should be read or modified.
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
list: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/apks/{apkVersionCode}/listings',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'apkVersionCode'],
pathParams: ['apkVersionCode', 'editId', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.apklistings.patch
*
* @desc Updates or creates the APK-specific localized listing for a specified APK and language code. This method supports patch semantics.
*
* @alias androidpublisher.edits.apklistings.patch
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {integer} params.apkVersionCode The APK version code whose APK-specific listings should be read or modified.
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.language The language code (a BCP-47 language tag) of the APK-specific localized listing to read or modify. For example, to select Austrian German, pass "de-AT".
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {androidpublisher(v2).ApkListing} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
patch: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/apks/{apkVersionCode}/listings/{language}',
method: 'PATCH'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'apkVersionCode', 'language'],
pathParams: ['apkVersionCode', 'editId', 'language', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.apklistings.update
*
* @desc Updates or creates the APK-specific localized listing for a specified APK and language code.
*
* @alias androidpublisher.edits.apklistings.update
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {integer} params.apkVersionCode The APK version code whose APK-specific listings should be read or modified.
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.language The language code (a BCP-47 language tag) of the APK-specific localized listing to read or modify. For example, to select Austrian German, pass "de-AT".
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {androidpublisher(v2).ApkListing} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
update: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/apks/{apkVersionCode}/listings/{language}',
method: 'PUT'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'apkVersionCode', 'language'],
pathParams: ['apkVersionCode', 'editId', 'language', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
}
},
apks: {
/**
* androidpublisher.edits.apks.addexternallyhosted
*
* @desc Creates a new APK without uploading the APK itself to Google Play, instead hosting the APK at a specified URL. This function is only available to enterprises using Google Play for Work whose application is configured to restrict distribution to the enterprise domain.
*
* @alias androidpublisher.edits.apks.addexternallyhosted
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {androidpublisher(v2).ApksAddExternallyHostedRequest} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
addexternallyhosted: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/apks/externallyHosted',
method: 'POST'
}, options),
params: params,
requiredParams: ['packageName', 'editId'],
pathParams: ['editId', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.apks.list
*
*
*
* @alias androidpublisher.edits.apks.list
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
list: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/apks',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName', 'editId'],
pathParams: ['editId', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.apks.upload
*
*
*
* @alias androidpublisher.edits.apks.upload
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} params.media Media object
* @param {string} params.media.mimeType Media mime-type
* @param {string|object} params.media.body Media body contents
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
upload: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/apks',
method: 'POST'
}, options),
params: params,
mediaUrl: 'https://www.googleapis.com/upload/androidpublisher/v2/applications/{packageName}/edits/{editId}/apks',
requiredParams: ['packageName', 'editId'],
pathParams: ['editId', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
}
},
deobfuscationfiles: {
/**
* androidpublisher.edits.deobfuscationfiles.upload
*
* @desc Uploads the deobfuscation file of the specified APK. If a deobfuscation file already exists, it will be replaced.
*
* @alias androidpublisher.edits.deobfuscationfiles.upload
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {integer} params.apkVersionCode The version code of the APK whose deobfuscation file is being uploaded.
* @param {string} params.deobfuscationFileType
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier of the Android app for which the deobfuscatiuon files are being uploaded; for example, "com.spiffygame".
* @param {object} params.media Media object
* @param {string} params.media.mimeType Media mime-type
* @param {string|object} params.media.body Media body contents
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
upload: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/apks/{apkVersionCode}/deobfuscationFiles/{deobfuscationFileType}',
method: 'POST'
}, options),
params: params,
mediaUrl: 'https://www.googleapis.com/upload/androidpublisher/v2/applications/{packageName}/edits/{editId}/apks/{apkVersionCode}/deobfuscationFiles/{deobfuscationFileType}',
requiredParams: ['packageName', 'editId', 'apkVersionCode', 'deobfuscationFileType'],
pathParams: ['apkVersionCode', 'deobfuscationFileType', 'editId', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
}
},
details: {
/**
* androidpublisher.edits.details.get
*
* @desc Fetches app details for this edit. This includes the default language and developer support contact information.
*
* @alias androidpublisher.edits.details.get
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
get: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/details',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName', 'editId'],
pathParams: ['editId', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.details.patch
*
* @desc Updates app details for this edit. This method supports patch semantics.
*
* @alias androidpublisher.edits.details.patch
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {androidpublisher(v2).AppDetails} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
patch: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/details',
method: 'PATCH'
}, options),
params: params,
requiredParams: ['packageName', 'editId'],
pathParams: ['editId', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.details.update
*
* @desc Updates app details for this edit.
*
* @alias androidpublisher.edits.details.update
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {androidpublisher(v2).AppDetails} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
update: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/details',
method: 'PUT'
}, options),
params: params,
requiredParams: ['packageName', 'editId'],
pathParams: ['editId', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
}
},
expansionfiles: {
/**
* androidpublisher.edits.expansionfiles.get
*
* @desc Fetches the Expansion File configuration for the APK specified.
*
* @alias androidpublisher.edits.expansionfiles.get
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {integer} params.apkVersionCode The version code of the APK whose Expansion File configuration is being read or modified.
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.expansionFileType
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
get: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/apks/{apkVersionCode}/expansionFiles/{expansionFileType}',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'apkVersionCode', 'expansionFileType'],
pathParams: ['apkVersionCode', 'editId', 'expansionFileType', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.expansionfiles.patch
*
* @desc Updates the APK's Expansion File configuration to reference another APK's Expansion Files. To add a new Expansion File use the Upload method. This method supports patch semantics.
*
* @alias androidpublisher.edits.expansionfiles.patch
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {integer} params.apkVersionCode The version code of the APK whose Expansion File configuration is being read or modified.
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.expansionFileType
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {androidpublisher(v2).ExpansionFile} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
patch: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/apks/{apkVersionCode}/expansionFiles/{expansionFileType}',
method: 'PATCH'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'apkVersionCode', 'expansionFileType'],
pathParams: ['apkVersionCode', 'editId', 'expansionFileType', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.expansionfiles.update
*
* @desc Updates the APK's Expansion File configuration to reference another APK's Expansion Files. To add a new Expansion File use the Upload method.
*
* @alias androidpublisher.edits.expansionfiles.update
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {integer} params.apkVersionCode The version code of the APK whose Expansion File configuration is being read or modified.
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.expansionFileType
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {androidpublisher(v2).ExpansionFile} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
update: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/apks/{apkVersionCode}/expansionFiles/{expansionFileType}',
method: 'PUT'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'apkVersionCode', 'expansionFileType'],
pathParams: ['apkVersionCode', 'editId', 'expansionFileType', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.expansionfiles.upload
*
* @desc Uploads and attaches a new Expansion File to the APK specified.
*
* @alias androidpublisher.edits.expansionfiles.upload
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {integer} params.apkVersionCode The version code of the APK whose Expansion File configuration is being read or modified.
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.expansionFileType
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} params.media Media object
* @param {string} params.media.mimeType Media mime-type
* @param {string|object} params.media.body Media body contents
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
upload: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/apks/{apkVersionCode}/expansionFiles/{expansionFileType}',
method: 'POST'
}, options),
params: params,
mediaUrl: 'https://www.googleapis.com/upload/androidpublisher/v2/applications/{packageName}/edits/{editId}/apks/{apkVersionCode}/expansionFiles/{expansionFileType}',
requiredParams: ['packageName', 'editId', 'apkVersionCode', 'expansionFileType'],
pathParams: ['apkVersionCode', 'editId', 'expansionFileType', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
}
},
images: {
/**
* androidpublisher.edits.images.delete
*
* @desc Deletes the image (specified by id) from the edit.
*
* @alias androidpublisher.edits.images.delete
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.imageId Unique identifier an image within the set of images attached to this edit.
* @param {string} params.imageType
* @param {string} params.language The language code (a BCP-47 language tag) of the localized listing whose images are to read or modified. For example, to select Austrian German, pass "de-AT".
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
delete: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/listings/{language}/{imageType}/{imageId}',
method: 'DELETE'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'language', 'imageType', 'imageId'],
pathParams: ['editId', 'imageId', 'imageType', 'language', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.images.deleteall
*
* @desc Deletes all images for the specified language and image type.
*
* @alias androidpublisher.edits.images.deleteall
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.imageType
* @param {string} params.language The language code (a BCP-47 language tag) of the localized listing whose images are to read or modified. For example, to select Austrian German, pass "de-AT".
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
deleteall: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/listings/{language}/{imageType}',
method: 'DELETE'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'language', 'imageType'],
pathParams: ['editId', 'imageType', 'language', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.images.list
*
* @desc Lists all images for the specified language and image type.
*
* @alias androidpublisher.edits.images.list
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.imageType
* @param {string} params.language The language code (a BCP-47 language tag) of the localized listing whose images are to read or modified. For example, to select Austrian German, pass "de-AT".
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
list: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/listings/{language}/{imageType}',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'language', 'imageType'],
pathParams: ['editId', 'imageType', 'language', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.images.upload
*
* @desc Uploads a new image and adds it to the list of images for the specified language and image type.
*
* @alias androidpublisher.edits.images.upload
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.imageType
* @param {string} params.language The language code (a BCP-47 language tag) of the localized listing whose images are to read or modified. For example, to select Austrian German, pass "de-AT".
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} params.media Media object
* @param {string} params.media.mimeType Media mime-type
* @param {string|object} params.media.body Media body contents
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
upload: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/listings/{language}/{imageType}',
method: 'POST'
}, options),
params: params,
mediaUrl: 'https://www.googleapis.com/upload/androidpublisher/v2/applications/{packageName}/edits/{editId}/listings/{language}/{imageType}',
requiredParams: ['packageName', 'editId', 'language', 'imageType'],
pathParams: ['editId', 'imageType', 'language', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
}
},
listings: {
/**
* androidpublisher.edits.listings.delete
*
* @desc Deletes the specified localized store listing from an edit.
*
* @alias androidpublisher.edits.listings.delete
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.language The language code (a BCP-47 language tag) of the localized listing to read or modify. For example, to select Austrian German, pass "de-AT".
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
delete: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/listings/{language}',
method: 'DELETE'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'language'],
pathParams: ['editId', 'language', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.listings.deleteall
*
* @desc Deletes all localized listings from an edit.
*
* @alias androidpublisher.edits.listings.deleteall
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
deleteall: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/listings',
method: 'DELETE'
}, options),
params: params,
requiredParams: ['packageName', 'editId'],
pathParams: ['editId', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.listings.get
*
* @desc Fetches information about a localized store listing.
*
* @alias androidpublisher.edits.listings.get
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.language The language code (a BCP-47 language tag) of the localized listing to read or modify. For example, to select Austrian German, pass "de-AT".
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
get: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/listings/{language}',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'language'],
pathParams: ['editId', 'language', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.listings.list
*
* @desc Returns all of the localized store listings attached to this edit.
*
* @alias androidpublisher.edits.listings.list
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
list: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/listings',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName', 'editId'],
pathParams: ['editId', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.listings.patch
*
* @desc Creates or updates a localized store listing. This method supports patch semantics.
*
* @alias androidpublisher.edits.listings.patch
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.language The language code (a BCP-47 language tag) of the localized listing to read or modify. For example, to select Austrian German, pass "de-AT".
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {androidpublisher(v2).Listing} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
patch: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/listings/{language}',
method: 'PATCH'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'language'],
pathParams: ['editId', 'language', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.listings.update
*
* @desc Creates or updates a localized store listing.
*
* @alias androidpublisher.edits.listings.update
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.language The language code (a BCP-47 language tag) of the localized listing to read or modify. For example, to select Austrian German, pass "de-AT".
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {androidpublisher(v2).Listing} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
update: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/listings/{language}',
method: 'PUT'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'language'],
pathParams: ['editId', 'language', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
}
},
testers: {
/**
* androidpublisher.edits.testers.get
*
*
*
* @alias androidpublisher.edits.testers.get
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {string} params.track
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
get: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/testers/{track}',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'track'],
pathParams: ['editId', 'packageName', 'track'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.testers.patch
*
*
*
* @alias androidpublisher.edits.testers.patch
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {string} params.track
* @param {androidpublisher(v2).Testers} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
patch: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/testers/{track}',
method: 'PATCH'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'track'],
pathParams: ['editId', 'packageName', 'track'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.testers.update
*
*
*
* @alias androidpublisher.edits.testers.update
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {string} params.track
* @param {androidpublisher(v2).Testers} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
update: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/testers/{track}',
method: 'PUT'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'track'],
pathParams: ['editId', 'packageName', 'track'],
context: self
};
return createAPIRequest(parameters, callback);
}
},
tracks: {
/**
* androidpublisher.edits.tracks.get
*
* @desc Fetches the track configuration for the specified track type. Includes the APK version codes that are in this track.
*
* @alias androidpublisher.edits.tracks.get
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {string} params.track The track type to read or modify.
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
get: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/tracks/{track}',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'track'],
pathParams: ['editId', 'packageName', 'track'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.tracks.list
*
* @desc Lists all the track configurations for this edit.
*
* @alias androidpublisher.edits.tracks.list
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
list: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/tracks',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName', 'editId'],
pathParams: ['editId', 'packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.tracks.patch
*
* @desc Updates the track configuration for the specified track type. When halted, the rollout track cannot be updated without adding new APKs, and adding new APKs will cause it to resume. This method supports patch semantics.
*
* @alias androidpublisher.edits.tracks.patch
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {string} params.track The track type to read or modify.
* @param {androidpublisher(v2).Track} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
patch: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/tracks/{track}',
method: 'PATCH'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'track'],
pathParams: ['editId', 'packageName', 'track'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.edits.tracks.update
*
* @desc Updates the track configuration for the specified track type. When halted, the rollout track cannot be updated without adding new APKs, and adding new APKs will cause it to resume.
*
* @alias androidpublisher.edits.tracks.update
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.editId Unique identifier for this edit.
* @param {string} params.packageName Unique identifier for the Android app that is being updated; for example, "com.spiffygame".
* @param {string} params.track The track type to read or modify.
* @param {androidpublisher(v2).Track} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
update: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/edits/{editId}/tracks/{track}',
method: 'PUT'
}, options),
params: params,
requiredParams: ['packageName', 'editId', 'track'],
pathParams: ['editId', 'packageName', 'track'],
context: self
};
return createAPIRequest(parameters, callback);
}
}
};
self.entitlements = {
/**
* androidpublisher.entitlements.list
*
* @desc Lists the user's current inapp item or subscription entitlements
*
* @alias androidpublisher.entitlements.list
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {integer=} params.maxResults
* @param {string} params.packageName The package name of the application the inapp product was sold in (for example, 'com.some.thing').
* @param {string=} params.productId The product id of the inapp product (for example, 'sku1'). This can be used to restrict the result set.
* @param {integer=} params.startIndex
* @param {string=} params.token
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
list: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/entitlements',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName'],
pathParams: ['packageName'],
context: self
};
return createAPIRequest(parameters, callback);
}
};
self.inappproducts = {
/**
* androidpublisher.inappproducts.batch
*
*
*
* @alias androidpublisher.inappproducts.batch
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {androidpublisher(v2).InappproductsBatchRequest} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
batch: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/inappproducts/batch',
method: 'POST'
}, options),
params: params,
requiredParams: [],
pathParams: [],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.inappproducts.delete
*
* @desc Delete an in-app product for an app.
*
* @alias androidpublisher.inappproducts.delete
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.packageName Unique identifier for the Android app with the in-app product; for example, "com.spiffygame".
* @param {string} params.sku Unique identifier for the in-app product.
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
delete: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/inappproducts/{sku}',
method: 'DELETE'
}, options),
params: params,
requiredParams: ['packageName', 'sku'],
pathParams: ['packageName', 'sku'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.inappproducts.get
*
* @desc Returns information about the in-app product specified.
*
* @alias androidpublisher.inappproducts.get
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.packageName
* @param {string} params.sku Unique identifier for the in-app product.
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
get: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/inappproducts/{sku}',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName', 'sku'],
pathParams: ['packageName', 'sku'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.inappproducts.insert
*
* @desc Creates a new in-app product for an app.
*
* @alias androidpublisher.inappproducts.insert
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {boolean=} params.autoConvertMissingPrices If true the prices for all regions targeted by the parent app that don't have a price specified for this in-app product will be auto converted to the target currency based on the default price. Defaults to false.
* @param {string} params.packageName Unique identifier for the Android app; for example, "com.spiffygame".
* @param {androidpublisher(v2).InAppProduct} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
insert: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/inappproducts',
method: 'POST'
}, options),
params: params,
requiredParams: ['packageName'],
pathParams: ['packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.inappproducts.list
*
* @desc List all the in-app products for an Android app, both subscriptions and managed in-app products..
*
* @alias androidpublisher.inappproducts.list
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {integer=} params.maxResults
* @param {string} params.packageName Unique identifier for the Android app with in-app products; for example, "com.spiffygame".
* @param {integer=} params.startIndex
* @param {string=} params.token
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
list: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/inappproducts',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName'],
pathParams: ['packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.inappproducts.patch
*
* @desc Updates the details of an in-app product. This method supports patch semantics.
*
* @alias androidpublisher.inappproducts.patch
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {boolean=} params.autoConvertMissingPrices If true the prices for all regions targeted by the parent app that don't have a price specified for this in-app product will be auto converted to the target currency based on the default price. Defaults to false.
* @param {string} params.packageName Unique identifier for the Android app with the in-app product; for example, "com.spiffygame".
* @param {string} params.sku Unique identifier for the in-app product.
* @param {androidpublisher(v2).InAppProduct} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
patch: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/inappproducts/{sku}',
method: 'PATCH'
}, options),
params: params,
requiredParams: ['packageName', 'sku'],
pathParams: ['packageName', 'sku'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.inappproducts.update
*
* @desc Updates the details of an in-app product.
*
* @alias androidpublisher.inappproducts.update
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {boolean=} params.autoConvertMissingPrices If true the prices for all regions targeted by the parent app that don't have a price specified for this in-app product will be auto converted to the target currency based on the default price. Defaults to false.
* @param {string} params.packageName Unique identifier for the Android app with the in-app product; for example, "com.spiffygame".
* @param {string} params.sku Unique identifier for the in-app product.
* @param {androidpublisher(v2).InAppProduct} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
update: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/inappproducts/{sku}',
method: 'PUT'
}, options),
params: params,
requiredParams: ['packageName', 'sku'],
pathParams: ['packageName', 'sku'],
context: self
};
return createAPIRequest(parameters, callback);
}
};
self.purchases = {
products: {
/**
* androidpublisher.purchases.products.get
*
* @desc Checks the purchase and consumption status of an inapp item.
*
* @alias androidpublisher.purchases.products.get
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.packageName The package name of the application the inapp product was sold in (for example, 'com.some.thing').
* @param {string} params.productId The inapp product SKU (for example, 'com.some.thing.inapp1').
* @param {string} params.token The token provided to the user's device when the inapp product was purchased.
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
get: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/purchases/products/{productId}/tokens/{token}',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName', 'productId', 'token'],
pathParams: ['packageName', 'productId', 'token'],
context: self
};
return createAPIRequest(parameters, callback);
}
},
subscriptions: {
/**
* androidpublisher.purchases.subscriptions.cancel
*
* @desc Cancels a user's subscription purchase. The subscription remains valid until its expiration time.
*
* @alias androidpublisher.purchases.subscriptions.cancel
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.packageName The package name of the application for which this subscription was purchased (for example, 'com.some.thing').
* @param {string} params.subscriptionId The purchased subscription ID (for example, 'monthly001').
* @param {string} params.token The token provided to the user's device when the subscription was purchased.
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
cancel: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/purchases/subscriptions/{subscriptionId}/tokens/{token}:cancel',
method: 'POST'
}, options),
params: params,
requiredParams: ['packageName', 'subscriptionId', 'token'],
pathParams: ['packageName', 'subscriptionId', 'token'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.purchases.subscriptions.defer
*
* @desc Defers a user's subscription purchase until a specified future expiration time.
*
* @alias androidpublisher.purchases.subscriptions.defer
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.packageName The package name of the application for which this subscription was purchased (for example, 'com.some.thing').
* @param {string} params.subscriptionId The purchased subscription ID (for example, 'monthly001').
* @param {string} params.token The token provided to the user's device when the subscription was purchased.
* @param {androidpublisher(v2).SubscriptionPurchasesDeferRequest} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
defer: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/purchases/subscriptions/{subscriptionId}/tokens/{token}:defer',
method: 'POST'
}, options),
params: params,
requiredParams: ['packageName', 'subscriptionId', 'token'],
pathParams: ['packageName', 'subscriptionId', 'token'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.purchases.subscriptions.get
*
* @desc Checks whether a user's subscription purchase is valid and returns its expiry time.
*
* @alias androidpublisher.purchases.subscriptions.get
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.packageName The package name of the application for which this subscription was purchased (for example, 'com.some.thing').
* @param {string} params.subscriptionId The purchased subscription ID (for example, 'monthly001').
* @param {string} params.token The token provided to the user's device when the subscription was purchased.
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
get: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/purchases/subscriptions/{subscriptionId}/tokens/{token}',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName', 'subscriptionId', 'token'],
pathParams: ['packageName', 'subscriptionId', 'token'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.purchases.subscriptions.refund
*
* @desc Refunds a user's subscription purchase, but the subscription remains valid until its expiration time and it will continue to recur.
*
* @alias androidpublisher.purchases.subscriptions.refund
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.packageName The package name of the application for which this subscription was purchased (for example, 'com.some.thing').
* @param {string} params.subscriptionId The purchased subscription ID (for example, 'monthly001').
* @param {string} params.token The token provided to the user's device when the subscription was purchased.
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
refund: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/purchases/subscriptions/{subscriptionId}/tokens/{token}:refund',
method: 'POST'
}, options),
params: params,
requiredParams: ['packageName', 'subscriptionId', 'token'],
pathParams: ['packageName', 'subscriptionId', 'token'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.purchases.subscriptions.revoke
*
* @desc Refunds and immediately revokes a user's subscription purchase. Access to the subscription will be terminated immediately and it will stop recurring.
*
* @alias androidpublisher.purchases.subscriptions.revoke
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.packageName The package name of the application for which this subscription was purchased (for example, 'com.some.thing').
* @param {string} params.subscriptionId The purchased subscription ID (for example, 'monthly001').
* @param {string} params.token The token provided to the user's device when the subscription was purchased.
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
revoke: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/purchases/subscriptions/{subscriptionId}/tokens/{token}:revoke',
method: 'POST'
}, options),
params: params,
requiredParams: ['packageName', 'subscriptionId', 'token'],
pathParams: ['packageName', 'subscriptionId', 'token'],
context: self
};
return createAPIRequest(parameters, callback);
}
},
voidedpurchases: {
/**
* androidpublisher.purchases.voidedpurchases.list
*
* @desc Lists the purchases that were cancelled, refunded or charged-back.
*
* @alias androidpublisher.purchases.voidedpurchases.list
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string=} params.endTime The time, in milliseconds since the Epoch, of the newest voided in-app product purchase that you want to see in the response. The value of this parameter cannot be greater than the current time and is ignored if a pagination token is set. Default value is current time.
* @param {integer=} params.maxResults
* @param {string} params.packageName The package name of the application for which voided purchases need to be returned (for example, 'com.some.thing').
* @param {integer=} params.startIndex
* @param {string=} params.startTime The time, in milliseconds since the Epoch, of the oldest voided in-app product purchase that you want to see in the response. The value of this parameter cannot be older than 30 days and is ignored if a pagination token is set. Default value is current time minus 30 days.
* @param {string=} params.token
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
list: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/purchases/voidedpurchases',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName'],
pathParams: ['packageName'],
context: self
};
return createAPIRequest(parameters, callback);
}
}
};
self.reviews = {
/**
* androidpublisher.reviews.get
*
* @desc Returns a single review.
*
* @alias androidpublisher.reviews.get
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.packageName Unique identifier for the Android app for which we want reviews; for example, "com.spiffygame".
* @param {string} params.reviewId
* @param {string=} params.translationLanguage
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
get: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/reviews/{reviewId}',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName', 'reviewId'],
pathParams: ['packageName', 'reviewId'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.reviews.list
*
* @desc Returns a list of reviews. Only reviews from last week will be returned.
*
* @alias androidpublisher.reviews.list
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {integer=} params.maxResults
* @param {string} params.packageName Unique identifier for the Android app for which we want reviews; for example, "com.spiffygame".
* @param {integer=} params.startIndex
* @param {string=} params.token
* @param {string=} params.translationLanguage
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
list: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/reviews',
method: 'GET'
}, options),
params: params,
requiredParams: ['packageName'],
pathParams: ['packageName'],
context: self
};
return createAPIRequest(parameters, callback);
},
/**
* androidpublisher.reviews.reply
*
* @desc Reply to a single review, or update an existing reply.
*
* @alias androidpublisher.reviews.reply
* @memberOf! androidpublisher(v2)
*
* @param {object} params Parameters for request
* @param {string} params.packageName Unique identifier for the Android app for which we want reviews; for example, "com.spiffygame".
* @param {string} params.reviewId
* @param {androidpublisher(v2).ReviewsReplyRequest} params.resource Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
reply: function (params, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
options || (options = {});
const parameters = {
options: utils.extend({
url: 'https://www.googleapis.com/androidpublisher/v2/applications/{packageName}/reviews/{reviewId}:reply',
method: 'POST'
}, options),
params: params,
requiredParams: ['packageName', 'reviewId'],
pathParams: ['packageName', 'reviewId'],
context: self
};
return createAPIRequest(parameters, callback);
}
};
}
/**
* @typedef Apk
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).ApkBinary} binary Information about the binary payload of this APK.
* @property {integer} versionCode The version code of the APK, as specified in the APK's manifest file.
*/
/**
* @typedef ApkBinary
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} sha1 A sha1 hash of the APK payload, encoded as a hex string and matching the output of the sha1sum command.
*/
/**
* @typedef ApkListing
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} language The language code, in BCP 47 format (eg "en-US").
* @property {string} recentChanges Describe what's new in your APK.
*/
/**
* @typedef ApkListingsListResponse
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} kind Identifies what kind of resource this is. Value: the fixed string "androidpublisher#apkListingsListResponse".
* @property {androidpublisher(v2).ApkListing[]} listings
*/
/**
* @typedef ApksAddExternallyHostedRequest
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).ExternallyHostedApk} externallyHostedApk The definition of the externally-hosted APK and where it is located.
*/
/**
* @typedef ApksAddExternallyHostedResponse
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).ExternallyHostedApk} externallyHostedApk The definition of the externally-hosted APK and where it is located.
*/
/**
* @typedef ApksListResponse
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).Apk[]} apks
* @property {string} kind Identifies what kind of resource this is. Value: the fixed string "androidpublisher#apksListResponse".
*/
/**
* @typedef AppDetails
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} contactEmail The user-visible support email for this app.
* @property {string} contactPhone The user-visible support telephone number for this app.
* @property {string} contactWebsite The user-visible website for this app.
* @property {string} defaultLanguage Default language code, in BCP 47 format (eg "en-US").
*/
/**
* @typedef AppEdit
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} expiryTimeSeconds The time at which the edit will expire and will be no longer valid for use in any subsequent API calls (encoded as seconds since the Epoch).
* @property {string} id The ID of the edit that can be used in subsequent API calls.
*/
/**
* @typedef Comment
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).DeveloperComment} developerComment A comment from a developer.
* @property {androidpublisher(v2).UserComment} userComment A comment from a user.
*/
/**
* @typedef DeobfuscationFile
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} symbolType The type of the deobfuscation file.
*/
/**
* @typedef DeobfuscationFilesUploadResponse
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).DeobfuscationFile} deobfuscationFile
*/
/**
* @typedef DeveloperComment
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).Timestamp} lastModified The last time at which this comment was updated.
* @property {string} text The content of the comment, i.e. reply body.
*/
/**
* @typedef DeviceMetadata
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} cpuMake Device CPU make e.g. "Qualcomm"
* @property {string} cpuModel Device CPU model e.g. "MSM8974"
* @property {string} deviceClass Device class (e.g. tablet)
* @property {integer} glEsVersion OpenGL version
* @property {string} manufacturer Device manufacturer (e.g. Motorola)
* @property {string} nativePlatform Comma separated list of native platforms (e.g. "arm", "arm7")
* @property {string} productName Device model name (e.g. Droid)
* @property {integer} ramMb Device RAM in Megabytes e.g. "2048"
* @property {integer} screenDensityDpi Screen density in DPI
* @property {integer} screenHeightPx Screen height in pixels
* @property {integer} screenWidthPx Screen width in pixels
*/
/**
* @typedef Entitlement
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} kind This kind represents an entitlement object in the androidpublisher service.
* @property {string} productId The SKU of the product.
* @property {string} productType The type of the inapp product. Possible values are:
- In-app item: "inapp"
- Subscription: "subs"
* @property {string} token The token which can be verified using the subscriptions or products API.
*/
/**
* @typedef EntitlementsListResponse
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).PageInfo} pageInfo
* @property {androidpublisher(v2).Entitlement[]} resources
* @property {androidpublisher(v2).TokenPagination} tokenPagination
*/
/**
* @typedef ExpansionFile
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} fileSize If set this field indicates that this APK has an Expansion File uploaded to it: this APK does not reference another APK's Expansion File. The field's value is the size of the uploaded Expansion File in bytes.
* @property {integer} referencesVersion If set this APK's Expansion File references another APK's Expansion File. The file_size field will not be set.
*/
/**
* @typedef ExpansionFilesUploadResponse
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).ExpansionFile} expansionFile
*/
/**
* @typedef ExternallyHostedApk
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} applicationLabel The application label.
* @property {string[]} certificateBase64s A certificate (or array of certificates if a certificate-chain is used) used to signed this APK, represented as a base64 encoded byte array.
* @property {string} externallyHostedUrl The URL at which the APK is hosted. This must be an https URL.
* @property {string} fileSha1Base64 The SHA1 checksum of this APK, represented as a base64 encoded byte array.
* @property {string} fileSha256Base64 The SHA256 checksum of this APK, represented as a base64 encoded byte array.
* @property {string} fileSize The file size in bytes of this APK.
* @property {string} iconBase64 The icon image from the APK, as a base64 encoded byte array.
* @property {integer} maximumSdk The maximum SDK supported by this APK (optional).
* @property {integer} minimumSdk The minimum SDK targeted by this APK.
* @property {string[]} nativeCodes The native code environments supported by this APK (optional).
* @property {string} packageName The package name.
* @property {string[]} usesFeatures The features required by this APK (optional).
* @property {androidpublisher(v2).ExternallyHostedApkUsesPermission[]} usesPermissions The permissions requested by this APK.
* @property {integer} versionCode The version code of this APK.
* @property {string} versionName The version name of this APK.
*/
/**
* @typedef ExternallyHostedApkUsesPermission
* @memberOf! androidpublisher(v2)
* @type object
* @property {integer} maxSdkVersion Optionally, the maximum SDK version for which the permission is required.
* @property {string} name The name of the permission requested.
*/
/**
* @typedef Image
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} id A unique id representing this image.
* @property {string} sha1 A sha1 hash of the image that was uploaded.
* @property {string} url A URL that will serve a preview of the image.
*/
/**
* @typedef ImagesDeleteAllResponse
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).Image[]} deleted
*/
/**
* @typedef ImagesListResponse
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).Image[]} images
*/
/**
* @typedef ImagesUploadResponse
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).Image} image
*/
/**
* @typedef InAppProduct
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} defaultLanguage The default language of the localized data, as defined by BCP 47. e.g. "en-US", "en-GB".
* @property {androidpublisher(v2).Price} defaultPrice Default price cannot be zero. In-app products can never be free. Default price is always in the developer's Checkout merchant currency.
* @property {object} listings List of localized title and description data.
* @property {string} packageName The package name of the parent app.
* @property {object} prices Prices per buyer region. None of these prices should be zero. In-app products can never be free.
* @property {string} purchaseType Purchase type enum value. Unmodifiable after creation.
* @property {androidpublisher(v2).Season} season Definition of a season for a seasonal subscription. Can be defined only for yearly subscriptions.
* @property {string} sku The stock-keeping-unit (SKU) of the product, unique within an app.
* @property {string} status
* @property {string} subscriptionPeriod Subscription period, specified in ISO 8601 format. Acceptable values are "P1W" (one week), "P1M" (one month), "P3M" (three months), "P6M" (six months), and "P1Y" (one year).
* @property {string} trialPeriod Trial period, specified in ISO 8601 format. Acceptable values are anything between "P7D" (seven days) and "P999D" (999 days). Seasonal subscriptions cannot have a trial period.
*/
/**
* @typedef InAppProductListing
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} description
* @property {string} title
*/
/**
* @typedef InappproductsBatchRequest
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).InappproductsBatchRequestEntry[]} entrys
*/
/**
* @typedef InappproductsBatchRequestEntry
* @memberOf! androidpublisher(v2)
* @type object
* @property {integer} batchId
* @property {androidpublisher(v2).InappproductsInsertRequest} inappproductsinsertrequest
* @property {androidpublisher(v2).InappproductsUpdateRequest} inappproductsupdaterequest
* @property {string} methodName
*/
/**
* @typedef InappproductsBatchResponse
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).InappproductsBatchResponseEntry[]} entrys
* @property {string} kind Identifies what kind of resource this is. Value: the fixed string "androidpublisher#inappproductsBatchResponse".
*/
/**
* @typedef InappproductsBatchResponseEntry
* @memberOf! androidpublisher(v2)
* @type object
* @property {integer} batchId
* @property {androidpublisher(v2).InappproductsInsertResponse} inappproductsinsertresponse
* @property {androidpublisher(v2).InappproductsUpdateResponse} inappproductsupdateresponse
*/
/**
* @typedef InappproductsInsertRequest
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).InAppProduct} inappproduct
*/
/**
* @typedef InappproductsInsertResponse
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).InAppProduct} inappproduct
*/
/**
* @typedef InappproductsListResponse
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).InAppProduct[]} inappproduct
* @property {string} kind Identifies what kind of resource this is. Value: the fixed string "androidpublisher#inappproductsListResponse".
* @property {androidpublisher(v2).PageInfo} pageInfo
* @property {androidpublisher(v2).TokenPagination} tokenPagination
*/
/**
* @typedef InappproductsUpdateRequest
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).InAppProduct} inappproduct
*/
/**
* @typedef InappproductsUpdateResponse
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).InAppProduct} inappproduct
*/
/**
* @typedef Listing
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} fullDescription Full description of the app; this may be up to 4000 characters in length.
* @property {string} language Language localization code (for example, "de-AT" for Austrian German).
* @property {string} shortDescription Short description of the app (previously known as promo text); this may be up to 80 characters in length.
* @property {string} title App's localized title.
* @property {string} video URL of a promotional YouTube video for the app.
*/
/**
* @typedef ListingsListResponse
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} kind Identifies what kind of resource this is. Value: the fixed string "androidpublisher#listingsListResponse".
* @property {androidpublisher(v2).Listing[]} listings
*/
/**
* @typedef MonthDay
* @memberOf! androidpublisher(v2)
* @type object
* @property {integer} day Day of a month, value in [1, 31] range. Valid range depends on the specified month.
* @property {integer} month Month of a year. e.g. 1 = JAN, 2 = FEB etc.
*/
/**
* @typedef PageInfo
* @memberOf! androidpublisher(v2)
* @type object
* @property {integer} resultPerPage
* @property {integer} startIndex
* @property {integer} totalResults
*/
/**
* @typedef Price
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} currency 3 letter Currency code, as defined by ISO 4217.
* @property {string} priceMicros The price in millionths of the currency base unit represented as a string.
*/
/**
* @typedef ProductPurchase
* @memberOf! androidpublisher(v2)
* @type object
* @property {integer} consumptionState The consumption state of the inapp product. Possible values are:
- Yet to be consumed
- Consumed
* @property {string} developerPayload A developer-specified string that contains supplemental information about an order.
* @property {string} kind This kind represents an inappPurchase object in the androidpublisher service.
* @property {integer} purchaseState The purchase state of the order. Possible values are:
- Purchased
- Cancelled
* @property {string} purchaseTimeMillis The time the product was purchased, in milliseconds since the epoch (Jan 1, 1970).
*/
/**
* @typedef Prorate
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).Price} defaultPrice Default price cannot be zero and must be less than the full subscription price. Default price is always in the developer's Checkout merchant currency. Targeted countries have their prices set automatically based on the default_price.
* @property {androidpublisher(v2).MonthDay} start Defines the first day on which the price takes effect.
*/
/**
* @typedef Review
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} authorName The name of the user who wrote the review.
* @property {androidpublisher(v2).Comment[]} comments A repeated field containing comments for the review.
* @property {string} reviewId Unique identifier for this review.
*/
/**
* @typedef ReviewReplyResult
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).Timestamp} lastEdited The time at which the reply took effect.
* @property {string} replyText The reply text that was applied.
*/
/**
* @typedef ReviewsListResponse
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).PageInfo} pageInfo
* @property {androidpublisher(v2).Review[]} reviews
* @property {androidpublisher(v2).TokenPagination} tokenPagination
*/
/**
* @typedef ReviewsReplyRequest
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} replyText The text to set as the reply. Replies of more than approximately 350 characters will be rejected. HTML tags will be stripped.
*/
/**
* @typedef ReviewsReplyResponse
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).ReviewReplyResult} result
*/
/**
* @typedef Season
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).MonthDay} end Inclusive end date of the recurrence period.
* @property {androidpublisher(v2).Prorate[]} prorations Optionally present list of prorations for the season. Each proration is a one-off discounted entry into a subscription. Each proration contains the first date on which the discount is available and the new pricing information.
* @property {androidpublisher(v2).MonthDay} start Inclusive start date of the recurrence period.
*/
/**
* @typedef SubscriptionDeferralInfo
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} desiredExpiryTimeMillis The desired next expiry time to assign to the subscription, in milliseconds since the Epoch. The given time must be later/greater than the current expiry time for the subscription.
* @property {string} expectedExpiryTimeMillis The expected expiry time for the subscription. If the current expiry time for the subscription is not the value specified here, the deferral will not occur.
*/
/**
* @typedef SubscriptionPurchase
* @memberOf! androidpublisher(v2)
* @type object
* @property {boolean} autoRenewing Whether the subscription will automatically be renewed when it reaches its current expiry time.
* @property {integer} cancelReason The reason why a subscription was cancelled or is not auto-renewing. Possible values are:
- User cancelled the subscription
- Subscription was cancelled by the system, for example because of a billing problem
* @property {string} countryCode ISO 3166-1 alpha-2 billing country/region code of the user at the time the subscription was granted.
* @property {string} developerPayload A developer-specified string that contains supplemental information about an order.
* @property {string} expiryTimeMillis Time at which the subscription will expire, in milliseconds since the Epoch.
* @property {string} kind This kind represents a subscriptionPurchase object in the androidpublisher service.
* @property {integer} paymentState The payment state of the subscription. Possible values are:
- Payment pending
- Payment received
* @property {string} priceAmountMicros Price of the subscription, not including tax. Price is expressed in micro-units, where 1,000,000 micro-units represents one unit of the currency. For example, if the subscription price is €1.99, price_amount_micros is 1990000.
* @property {string} priceCurrencyCode ISO 4217 currency code for the subscription price. For example, if the price is specified in British pounds sterling, price_currency_code is "GBP".
* @property {string} startTimeMillis Time at which the subscription was granted, in milliseconds since the Epoch.
* @property {string} userCancellationTimeMillis The time at which the subscription was canceled by the user, in milliseconds since the epoch. Only present if cancelReason is 0.
*/
/**
* @typedef SubscriptionPurchasesDeferRequest
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).SubscriptionDeferralInfo} deferralInfo The information about the new desired expiry time for the subscription.
*/
/**
* @typedef SubscriptionPurchasesDeferResponse
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} newExpiryTimeMillis The new expiry time for the subscription in milliseconds since the Epoch.
*/
/**
* @typedef Testers
* @memberOf! androidpublisher(v2)
* @type object
* @property {string[]} googleGroups
* @property {string[]} googlePlusCommunities
*/
/**
* @typedef Timestamp
* @memberOf! androidpublisher(v2)
* @type object
* @property {integer} nanos
* @property {string} seconds
*/
/**
* @typedef TokenPagination
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} nextPageToken
* @property {string} previousPageToken
*/
/**
* @typedef Track
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} track
* @property {number} userFraction
* @property {integer[]} versionCodes
*/
/**
* @typedef TracksListResponse
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} kind Identifies what kind of resource this is. Value: the fixed string "androidpublisher#tracksListResponse".
* @property {androidpublisher(v2).Track[]} tracks
*/
/**
* @typedef UserComment
* @memberOf! androidpublisher(v2)
* @type object
* @property {integer} androidOsVersion Integer Android SDK version of the user's device at the time the review was written, e.g. 23 is Marshmallow. May be absent.
* @property {integer} appVersionCode Integer version code of the app as installed at the time the review was written. May be absent.
* @property {string} appVersionName String version name of the app as installed at the time the review was written. May be absent.
* @property {string} device Codename for the reviewer's device, e.g. klte, flounder. May be absent.
* @property {androidpublisher(v2).DeviceMetadata} deviceMetadata Some information about the characteristics of the user's device
* @property {androidpublisher(v2).Timestamp} lastModified The last time at which this comment was updated.
* @property {string} originalText Untranslated text of the review, in the case where the review has been translated. If the review has not been translated this is left blank.
* @property {string} reviewerLanguage Language code for the reviewer. This is taken from the device settings so is not guaranteed to match the language the review is written in. May be absent.
* @property {integer} starRating The star rating associated with the review, from 1 to 5.
* @property {string} text The content of the comment, i.e. review body. In some cases users have been able to write a review with separate title and body; in those cases the title and body are concatenated and separated by a tab character.
* @property {integer} thumbsDownCount Number of users who have given this review a thumbs down
* @property {integer} thumbsUpCount Number of users who have given this review a thumbs up
*/
/**
* @typedef VoidedPurchase
* @memberOf! androidpublisher(v2)
* @type object
* @property {string} kind This kind represents a voided purchase object in the androidpublisher service.
* @property {string} purchaseTimeMillis The time at which the purchase was made, in milliseconds since the epoch (Jan 1, 1970).
* @property {string} purchaseToken The token that was generated when a purchase was made. This uniquely identifies a purchase.
* @property {string} voidedTimeMillis The time at which the purchase was cancelled/refunded/charged-back, in milliseconds since the epoch (Jan 1, 1970).
*/
/**
* @typedef VoidedPurchasesListResponse
* @memberOf! androidpublisher(v2)
* @type object
* @property {androidpublisher(v2).PageInfo} pageInfo
* @property {androidpublisher(v2).TokenPagination} tokenPagination
* @property {androidpublisher(v2).VoidedPurchase[]} voidedPurchases
*/
export = Androidpublisher;
|
},
|
TestGenMaxwellPlaneStrain.py
|
#!/usr/bin/env python
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ======================================================================
#
## @file tests/pytests/materials/TestGenMaxwellPlaneStrain.py
## @brief Unit testing of GenMaxwellPlaneStrain object.
import unittest
from pylith.materials.GenMaxwellPlaneStrain import GenMaxwellPlaneStrain
# ----------------------------------------------------------------------
class TestGenMaxwellPlaneStrain(unittest.TestCase):
"""
Unit testing of GenMaxwellPlaneStrain object.
"""
def setUp(self):
"""
Setup test subject.
"""
self.material = GenMaxwellPlaneStrain()
return
def test_constructor(self):
"""
Test constructor.
"""
self.assertEqual(2, self.material.dimension())
return
def test_useElasticBehavior(self):
"""
Test useElasticBehavior().
"""
self.material.useElasticBehavior(False)
return
def testHasStateVars(self):
|
def testTensorSize(self):
self.assertEqual(3, self.material.tensorSize())
return
def testNeedNewJacobian(self):
"""
Test needNewJacobian().
"""
# Default should be False.
self.failIf(self.material.needNewJacobian())
# Changing time step should require new Jacobian.
self.material.timeStep(1.0)
self.material.timeStep(2.0)
self.failUnless(self.material.needNewJacobian())
return
def test_factory(self):
"""
Test factory method.
"""
from pylith.materials.GenMaxwellPlaneStrain import material
m = material()
return
# End of file
|
self.failUnless(self.material.hasStateVars())
return
|
parse.js
|
import babel from 'npmjs.com/babel-core'
import babylonParser from 'npmjs.com/babel-core/node_modules/babylon/lib/parser'
import 'jo/ast/macro'
import fs from 'asyncfs'
// ParseResult is returned from a succeful call to Parse
type ParseResult = {
ast:ASTProgram;
macros:MacroDefs;
diagnostics:Diagnostic[];
};
// Diagnostic describes a non-critical event or piece of potentially helpful information
// found during parsing.
type Diagnostic = {
type:string; // 'warn'|'info'
message:string; // Human-readable description
pos:int; // start offset in source. -1 if not available.
endPos:int; // end offset in source. -1 if not available.
loc?:SrcLoc; // start and end line-and-column in source
};
type SrcLoc = {
start:{line:int,column:int};
end:{line:int,column:int};
};
// Mode modifies the behaviour of parsing
const ImportsOnly:Mode = 1 << 0 // Stop parsing after import declarations
, ParseComments:Mode = 2 << 1 // Parse and include comments in the AST
, NoJSX:Mode = 3 << 2 // Disable parsing of JSX
, NoFlowTypes:Mode = 4 << 3 // Disable parsing of Flow types
, NoMacros:Mode = 5 << 4 // Disable parsing and expansion of macros
// Parses source code into AST
interface Parser {
mode:Mode;
parse(fset:FileSet, filename:string, src:string):File;
}
// Creates a new parser
function CreateParser(mode?:Mode) { //:Parser
var opts = {
allowImportExportEverywhere: false,
allowReturnOutsideFunction: false,
strictMode: false,
sourceType: 'module',
allowReserved: true,
plugins: { jsx: 1, flow: 1 }, // TODO: set from babylonParser in some way
features: {},
};
for (let k in babel.pipeline.transformers) {
opts.features[k] = true;
}
if (mode & NoJSX) { delete opts.plugins.jsx; }
if (mode & NoFlowTypes) { delete opts.plugins.flow; }
let p = new babylonParser(opts, '');
if (!(mode & NoMacros)) {
macro.Plugin(p, {
includeComments: (mode & ParseComments),
includeDefinitions: true, // Generate MacroDefinition AST nodes instead of Noop nodes
});
}
return configureFileParser(p, mode); // defined in file.js
}
// reusable parsers
var parsers; // Map<Mode,[Parser,ParseState]>
function
|
(mode?:Mode) { //:Parser
let p;
if (!parsers || !(p = parsers.get(mode))) {
p = CreateParser(mode)
if (!parsers) { parsers = new Map }
parsers.set(mode, [p, Object.freeze(p.state.clone())])
} else {
p[0].state = p[1].clone();
p = p[0];
}
return p;
}
// Parses a file loaded from and located at `filename`.
// If `src` is a string or Buffer, `filename` is only used to record positions in fset.
async function ParseFile(fset:FileSet, filename:string, src?:string, mode?:Mode):ast.File {
if (src) {
switch (typeof src) {
case 'string': break;
case 'object': src = src.toString('utf8'); break;
default: throw new TypeError('src is not a string');
}
} else {
src = await fs.readFile(filename, {encoding:'utf8'});
}
return getParser(mode).parse(fset, filename, src);
}
// Decide is a file is to be included in parsing of a directory.
// The st object contains a non-standard property: name:string; -- file's basename.
type FileFilter = (st:fs.Stat)=>bool;
// Default pattern for matching filenames considered source files
// Examples that match: "foo.js", "bar-lol cat.js"
// Examples that don't match: ".foo.js", "lol.foo"
var FilenamePattern = /^[^\.].*\.js$/;
// ParseDir calls ParseFile for all files with names ending in ".js" in the directory specified
// by path and returns a Pkg.
//
// If filterfn is provided, only the files with fs.Stat entries passing through the filter
// are considered. The mode bits are passed to ParseFile unchanged. Position information is
// recorded in fset.
//
async function ParseDir(fset:FileSet, dirname:string, filterfn?:FileFilter, mode?:Mode) { //:Pkg
let files = [];
await readdir(dirname, filterfn, async f => {
files.push(await ParseFile(fset, dirname + '/' + f.name, null, mode))
})
files.sort((L, R) => R.name < L.name ? -1 : 1)
return { // __proto__: Pkg.prototype
dirname: dirname,
files: files,
};
}
async function readdir(dirname, filterfn, cb) {
let entries = await fs.readdir(dirname);
let basepath = dirname + '/';
await Promise.all(map(entries, async (filename, i) => {
let f = await fs.stat(basepath + filename);
if (f) {
f.name = filename;
if (filterfn) {
if (!filterfn(f)) {
return;
}
} else if (!FilenamePattern.test(filename)) {
return;
}
await cb(f);
}
}))
}
function* filter(it, fn) {
let i = 0;
for (let v of it) { yield fn(v, i++) }
}
function* map(it, fn) {
let i = 0;
for (let v of it) { yield fn(v, i++) }
}
// let sources = await Promise.all(map(fset, f =>
// fs.readFile(f, {encoding:'utf8'}) ))
// Convenience function to parse a source-code string
function ParseExpr(src:string, mode?:Mode) {
return getParser(mode).parse(null, '<anon>', src)
}
|
getParser
|
main.go
|
// Copyright 2016-2021, Pulumi Corporation. All rights reserved.
package main
import (
"fmt"
"strings"
"github.com/pulumi/pulumi-azure-native/sdk/go/azure/cdn"
"github.com/pulumi/pulumi-azure-native/sdk/go/azure/resources"
"github.com/pulumi/pulumi-azure-native/sdk/go/azure/storage"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
resourceGroup, err := resources.NewResourceGroup(ctx, "website-rg", nil)
if err != nil {
return err
}
profile, err := cdn.NewProfile(ctx, "profile", &cdn.ProfileArgs{
ResourceGroupName: resourceGroup.Name,
Sku: &cdn.SkuArgs{
Name: pulumi.String(cdn.SkuName_Standard_Microsoft),
},
})
if err != nil {
return err
}
storageAccount, err := storage.NewStorageAccount(ctx, "sa", &storage.StorageAccountArgs{
ResourceGroupName: resourceGroup.Name,
Kind: pulumi.String(storage.KindStorageV2),
Sku: &storage.SkuArgs{
Name: pulumi.String(storage.SkuName_Standard_LRS),
},
})
if err != nil {
return err
}
endpointOrigin := storageAccount.PrimaryEndpoints.Web().ApplyT(func(endpoint string) string {
endpoint = strings.ReplaceAll(endpoint, "https://", "")
endpoint = strings.ReplaceAll(endpoint, "/", "")
return endpoint
}).(pulumi.StringOutput)
queryStringCachingBehaviorNotSet := cdn.QueryStringCachingBehaviorNotSet
endpoint, err := cdn.NewEndpoint(ctx, "endpoint", &cdn.EndpointArgs{
IsHttpAllowed: pulumi.Bool(false),
IsHttpsAllowed: pulumi.Bool(true),
OriginHostHeader: endpointOrigin,
Origins: cdn.DeepCreatedOriginArray{
&cdn.DeepCreatedOriginArgs{
HostName: endpointOrigin,
HttpsPort: pulumi.Int(443),
Name: pulumi.String("origin-storage-account"),
},
},
ProfileName: profile.Name,
QueryStringCachingBehavior: &queryStringCachingBehaviorNotSet,
ResourceGroupName: resourceGroup.Name,
})
if err != nil {
return err
}
// Enable static website support
staticWebsite, err := storage.NewStorageAccountStaticWebsite(ctx, "staticWebsite", &storage.StorageAccountStaticWebsiteArgs{
AccountName: storageAccount.Name,
ResourceGroupName: resourceGroup.Name,
IndexDocument: pulumi.String("index.html"),
Error404Document: pulumi.String("404.html"),
})
if err != nil {
return err
}
// Upload the files
_, err = storage.NewBlob(ctx, "index.html", &storage.BlobArgs{
ResourceGroupName: resourceGroup.Name,
AccountName: storageAccount.Name,
ContainerName: staticWebsite.ContainerName,
Source: pulumi.NewFileAsset("./wwwroot/index.html"),
ContentType: pulumi.String("text/html"),
})
if err != nil {
return err
}
_, err = storage.NewBlob(ctx, "404.html", &storage.BlobArgs{
ResourceGroupName: resourceGroup.Name,
AccountName: storageAccount.Name,
ContainerName: staticWebsite.ContainerName,
|
if err != nil {
return err
}
// Web endpoint to the website
ctx.Export("staticEndpoint", storageAccount.PrimaryEndpoints.Web())
// CDN endpoint to the website.
// Allow it some time after the deployment to get ready.
ctx.Export("cdnEndpoint", endpoint.HostName.ApplyT(func(hostName string) string {
return fmt.Sprintf("%v%v", "https://", hostName)
}))
return nil
})
}
|
Source: pulumi.NewFileAsset("./wwwroot/404.html"),
ContentType: pulumi.String("text/html"),
})
|
player.rs
|
use rltk::{VirtualKeyCode, Rltk, Point};
use specs::prelude::*;
use std::cmp::{max, min};
use super::{Position, Player, Viewshed, State, Map, RunState, Attributes, WantsToMelee, Item,
gamelog::GameLog, WantsToPickupItem, TileType, HungerClock, HungerState,
EntityMoved, Door, BlocksTile, BlocksVisibility, Renderable, Pools, Faction,
raws::Reaction, Vendor, VendorMode};
pub fn try_move_player(delta_x: i32, delta_y: i32, ecs: &mut World) -> RunState {
let mut positions = ecs.write_storage::<Position>();
let players = ecs.read_storage::<Player>();
let mut viewsheds = ecs.write_storage::<Viewshed>();
let entities = ecs.entities();
let combat_stats = ecs.read_storage::<Attributes>();
let map = ecs.fetch::<Map>();
let mut wants_to_melee = ecs.write_storage::<WantsToMelee>();
let mut entity_moved = ecs.write_storage::<EntityMoved>();
let mut doors = ecs.write_storage::<Door>();
let mut blocks_visibility = ecs.write_storage::<BlocksVisibility>();
let mut blocks_movement = ecs.write_storage::<BlocksTile>();
let mut renderables = ecs.write_storage::<Renderable>();
let factions = ecs.read_storage::<Faction>();
let vendors = ecs.read_storage::<Vendor>();
let mut result = RunState::AwaitingInput;
let mut swap_entities : Vec<(Entity, i32, i32)> = Vec::new();
for (entity, _player, pos, viewshed) in (&entities, &players, &mut positions, &mut viewsheds).join() {
if pos.x + delta_x < 1 || pos.x + delta_x > map.width-1 || pos.y + delta_y < 1 || pos.y + delta_y > map.height-1 { return RunState::AwaitingInput; }
let destination_idx = map.xy_idx(pos.x + delta_x, pos.y + delta_y);
crate::spatial::for_each_tile_content(destination_idx, |potential_target| {
if let Some(_vendor) = vendors.get(potential_target) {
result = RunState::ShowVendor{ vendor: potential_target, mode : VendorMode::Sell }
}
let mut hostile = true;
if combat_stats.get(potential_target).is_some() {
if let Some(faction) = factions.get(potential_target) {
let reaction = crate::raws::faction_reaction(
&faction.name,
"Player",
&crate::raws::RAWS.lock().unwrap()
);
if reaction != Reaction::Attack { hostile = false; }
}
}
if !hostile {
// Note that we want to move the bystander
swap_entities.push((potential_target, pos.x, pos.y));
// Move the player
pos.x = min(map.width-1 , max(0, pos.x + delta_x));
pos.y = min(map.height-1, max(0, pos.y + delta_y));
entity_moved.insert(entity, EntityMoved{}).expect("Unable to insert marker");
viewshed.dirty = true;
let mut ppos = ecs.write_resource::<Point>();
ppos.x = pos.x;
ppos.y = pos.y;
} else {
let target = combat_stats.get(potential_target);
if let Some(_target) = target {
wants_to_melee.insert(entity, WantsToMelee{ target: potential_target }).expect("Add target failed");
result = RunState::Ticking;
}
}
let door = doors.get_mut(potential_target);
if let Some(door) = door {
door.open = true;
blocks_visibility.remove(potential_target);
blocks_movement.remove(potential_target);
let glyph = renderables.get_mut(potential_target).unwrap();
glyph.glyph = rltk::to_cp437('/');
viewshed.dirty = true;
result = RunState::Ticking;
}
});
if crate::spatial::is_blocked(destination_idx) {
pos.x = min(map.width-1 , max(0, pos.x + delta_x));
pos.y = min(map.height-1, max(0, pos.y + delta_y));
entity_moved.insert(entity, EntityMoved{}).expect("Unable to insert marker");
viewshed.dirty = true;
let mut ppos = ecs.write_resource::<Point>();
ppos.x = pos.x;
ppos.y = pos.y;
result = RunState::Ticking;
match map.tiles[destination_idx] {
TileType::DownStairs => result = RunState::NextLevel,
TileType::UpStairs => result = RunState::PreviousLevel,
_ => {}
}
}
}
for m in swap_entities.iter() {
let their_pos = positions.get_mut(m.0);
if let Some(their_pos) = their_pos {
their_pos.x = m.1;
their_pos.y = m.2;
}
}
result
}
pub fn try_next_level(ecs: &mut World) -> bool {
let player_pos = ecs.fetch::<Point>();
let map = ecs.fetch::<Map>();
let player_idx = map.xy_idx(player_pos.x, player_pos.y);
if map.tiles[player_idx] == TileType::DownStairs {
true
} else {
let mut gamelog = ecs.fetch_mut::<GameLog>();
gamelog.entries.push("There is no way down from here.".to_string());
false
}
}
pub fn
|
(ecs: &mut World) -> bool {
let player_pos = ecs.fetch::<Point>();
let map = ecs.fetch::<Map>();
let player_idx = map.xy_idx(player_pos.x, player_pos.y);
if map.tiles[player_idx] == TileType::UpStairs {
true
} else {
let mut gamelog = ecs.fetch_mut::<GameLog>();
gamelog.entries.push("There is no way up from here.".to_string());
false
}
}
fn get_item(ecs: &mut World) {
let player_pos = ecs.fetch::<Point>();
let player_entity = ecs.fetch::<Entity>();
let entities = ecs.entities();
let items = ecs.read_storage::<Item>();
let positions = ecs.read_storage::<Position>();
let mut gamelog = ecs.fetch_mut::<GameLog>();
let mut target_item : Option<Entity> = None;
for (item_entity, _item, position) in (&entities, &items, &positions).join() {
if position.x == player_pos.x && position.y == player_pos.y {
target_item = Some(item_entity);
}
}
match target_item {
None => gamelog.entries.push("There is nothing here to pick up.".to_string()),
Some(item) => {
let mut pickup = ecs.write_storage::<WantsToPickupItem>();
pickup.insert(*player_entity, WantsToPickupItem{ collected_by: *player_entity, item }).expect("Unable to insert want to pickup");
}
}
}
fn skip_turn(ecs: &mut World) -> RunState {
let player_entity = ecs.fetch::<Entity>();
let viewshed_components = ecs.read_storage::<Viewshed>();
let factions = ecs.read_storage::<Faction>();
let worldmap_resource = ecs.fetch::<Map>();
let mut can_heal = true;
let viewshed = viewshed_components.get(*player_entity).unwrap();
for tile in viewshed.visible_tiles.iter() {
let idx = worldmap_resource.xy_idx(tile.x, tile.y);
crate::spatial::for_each_tile_content(idx, |entity_id| {
let faction = factions.get(entity_id);
match faction {
None => {}
Some(faction) => {
let reaction = crate::raws::faction_reaction(
&faction.name,
"Player",
&crate::raws::RAWS.lock().unwrap()
);
if reaction == Reaction::Attack {
can_heal = false;
}
}
}
});
}
let hunger_clocks = ecs.read_storage::<HungerClock>();
let hc = hunger_clocks.get(*player_entity);
if let Some(hc) = hc {
match hc.state {
HungerState::Hungry => can_heal = false,
HungerState::Starving => can_heal = false,
_ => {}
}
}
if can_heal {
let mut health_components = ecs.write_storage::<Pools>();
let pools = health_components.get_mut(*player_entity).unwrap();
pools.hit_points.current = i32::min(pools.hit_points.current + 1, pools.hit_points.max);
}
RunState::Ticking
}
fn use_consumable_hotkey(gs: &mut State, key: i32) -> RunState {
use super::{Consumable, InBackpack, WantsToUseItem};
let consumables = gs.ecs.read_storage::<Consumable>();
let backpack = gs.ecs.read_storage::<InBackpack>();
let player_entity = gs.ecs.fetch::<Entity>();
let entities = gs.ecs.entities();
let mut carried_consumables = Vec::new();
for (entity, carried_by, _consumable) in (&entities, &backpack, &consumables).join() {
if carried_by.owner == *player_entity {
carried_consumables.push(entity);
}
}
if (key as usize) < carried_consumables.len() {
use crate::components::Ranged;
if let Some(ranged) = gs.ecs.read_storage::<Ranged>().get(carried_consumables[key as usize]) {
return RunState::ShowTargeting{ range: ranged.range, item: carried_consumables[key as usize] };
}
let mut intent = gs.ecs.write_storage::<WantsToUseItem>();
intent.insert(
*player_entity,
WantsToUseItem{ item: carried_consumables[key as usize], target: None }
).expect("Unable to insert intent");
return RunState::Ticking;
}
RunState::Ticking
}
pub fn player_input(gs: &mut State, ctx: &mut Rltk) -> RunState {
// Hotkeys
if ctx.shift && ctx.key.is_some() {
let key : Option<i32> =
match ctx.key.unwrap() {
VirtualKeyCode::Key1 => Some(1),
VirtualKeyCode::Key2 => Some(2),
VirtualKeyCode::Key3 => Some(3),
VirtualKeyCode::Key4 => Some(4),
VirtualKeyCode::Key5 => Some(5),
VirtualKeyCode::Key6 => Some(6),
VirtualKeyCode::Key7 => Some(7),
VirtualKeyCode::Key8 => Some(8),
VirtualKeyCode::Key9 => Some(9),
_ => None
};
if let Some(key) = key {
return use_consumable_hotkey(gs, key-1);
}
}
// Player movement
match ctx.key {
None => { return RunState::AwaitingInput } // Nothing happened
Some(key) => match key {
VirtualKeyCode::Left |
VirtualKeyCode::Numpad4 |
VirtualKeyCode::H => return try_move_player(-1, 0, &mut gs.ecs),
VirtualKeyCode::Right |
VirtualKeyCode::Numpad6 |
VirtualKeyCode::L => return try_move_player(1, 0, &mut gs.ecs),
VirtualKeyCode::Up |
VirtualKeyCode::Numpad8 |
VirtualKeyCode::K => return try_move_player(0, -1, &mut gs.ecs),
VirtualKeyCode::Down |
VirtualKeyCode::Numpad2 |
VirtualKeyCode::J => return try_move_player(0, 1, &mut gs.ecs),
// Diagonals
VirtualKeyCode::Numpad9 |
VirtualKeyCode::U => return try_move_player(1, -1, &mut gs.ecs),
VirtualKeyCode::Numpad7 |
VirtualKeyCode::Y => return try_move_player(-1, -1, &mut gs.ecs),
VirtualKeyCode::Numpad3 |
VirtualKeyCode::N => return try_move_player(1, 1, &mut gs.ecs),
VirtualKeyCode::Numpad1 |
VirtualKeyCode::B => return try_move_player(-1, 1, &mut gs.ecs),
// Skip Turn
VirtualKeyCode::Numpad5 |
VirtualKeyCode::Space => return skip_turn(&mut gs.ecs),
// Level changes
VirtualKeyCode::Period => {
if try_next_level(&mut gs.ecs) {
return RunState::NextLevel;
}
}
VirtualKeyCode::Comma => {
if try_previous_level(&mut gs.ecs) {
return RunState::PreviousLevel;
}
}
// Picking up items
VirtualKeyCode::G => get_item(&mut gs.ecs),
VirtualKeyCode::I => return RunState::ShowInventory,
VirtualKeyCode::D => return RunState::ShowDropItem,
VirtualKeyCode::R => return RunState::ShowRemoveItem,
// Save and Quit
VirtualKeyCode::Escape => return RunState::SaveGame,
// Cheating!
VirtualKeyCode::Backslash => return RunState::ShowCheatMenu,
_ => { return RunState::AwaitingInput }
},
}
RunState::Ticking
}
|
try_previous_level
|
handler.go
|
// Copyright (c) 2017-2020 Uber Technologies Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package history
import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/pborman/uuid"
"go.uber.org/yarpc/yarpcerrors"
"github.com/uber/cadence/.gen/go/health"
"github.com/uber/cadence/.gen/go/health/metaserver"
hist "github.com/uber/cadence/.gen/go/history"
"github.com/uber/cadence/.gen/go/history/historyserviceserver"
r "github.com/uber/cadence/.gen/go/replicator"
gen "github.com/uber/cadence/.gen/go/shared"
"github.com/uber/cadence/common"
"github.com/uber/cadence/common/definition"
"github.com/uber/cadence/common/log"
"github.com/uber/cadence/common/log/tag"
"github.com/uber/cadence/common/messaging"
"github.com/uber/cadence/common/metrics"
"github.com/uber/cadence/common/persistence"
"github.com/uber/cadence/common/quotas"
"github.com/uber/cadence/service/history/config"
"github.com/uber/cadence/service/history/engine"
"github.com/uber/cadence/service/history/events"
"github.com/uber/cadence/service/history/failover"
"github.com/uber/cadence/service/history/replication"
"github.com/uber/cadence/service/history/resource"
"github.com/uber/cadence/service/history/shard"
"github.com/uber/cadence/service/history/task"
)
// Handler - Thrift handler interface for history service
type (
Handler struct {
resource.Resource
shuttingDown int32
controller shard.Controller
tokenSerializer common.TaskTokenSerializer
startWG sync.WaitGroup
config *config.Config
historyEventNotifier events.Notifier
publisher messaging.Producer
rateLimiter quotas.Limiter
replicationTaskFetchers replication.TaskFetchers
queueTaskProcessor task.Processor
failoverCoordinator failover.Coordinator
}
)
var _ historyserviceserver.Interface = (*Handler)(nil)
var _ shard.EngineFactory = (*Handler)(nil)
var (
errDomainNotSet = &gen.BadRequestError{Message: "Domain not set on request."}
errWorkflowExecutionNotSet = &gen.BadRequestError{Message: "WorkflowExecution not set on request."}
errTaskListNotSet = &gen.BadRequestError{Message: "Tasklist not set."}
errWorkflowIDNotSet = &gen.BadRequestError{Message: "WorkflowId is not set on request."}
errRunIDNotValid = &gen.BadRequestError{Message: "RunID is not valid UUID."}
errSourceClusterNotSet = &gen.BadRequestError{Message: "Source Cluster not set on request."}
errShardIDNotSet = &gen.BadRequestError{Message: "Shard ID not set on request."}
errTimestampNotSet = &gen.BadRequestError{Message: "Timestamp not set on request."}
errInvalidTaskType = &gen.BadRequestError{Message: "Invalid task type"}
errHistoryHostThrottle = &gen.ServiceBusyError{Message: "History host rps exceeded"}
errShuttingDown = &gen.InternalServiceError{Message: "Shutting down"}
)
// NewHandler creates a thrift handler for the history service
func
|
(
resource resource.Resource,
config *config.Config,
) *Handler {
handler := &Handler{
Resource: resource,
config: config,
tokenSerializer: common.NewJSONTaskTokenSerializer(),
rateLimiter: quotas.NewDynamicRateLimiter(
func() float64 {
return float64(config.RPS())
},
),
}
// prevent us from trying to serve requests before shard controller is started and ready
handler.startWG.Add(1)
return handler
}
// RegisterHandler register this handler, must be called before Start()
func (h *Handler) RegisterHandler() {
h.GetDispatcher().Register(historyserviceserver.New(h))
h.GetDispatcher().Register(metaserver.New(h))
}
// Start starts the handler
func (h *Handler) Start() {
if h.GetClusterMetadata().IsGlobalDomainEnabled() {
var err error
h.publisher, err = h.GetMessagingClient().NewProducerWithClusterName(h.GetClusterMetadata().GetCurrentClusterName())
if err != nil {
h.GetLogger().Fatal("Creating kafka producer failed", tag.Error(err))
}
}
h.replicationTaskFetchers = replication.NewTaskFetchers(
h.GetLogger(),
h.config,
h.GetClusterMetadata().GetReplicationConsumerConfig(),
h.GetClusterMetadata(),
h.GetClientBean(),
)
h.replicationTaskFetchers.Start()
if h.config.EnablePriorityTaskProcessor() {
var err error
taskPriorityAssigner := task.NewPriorityAssigner(
h.GetClusterMetadata().GetCurrentClusterName(),
h.GetDomainCache(),
h.GetLogger(),
h.GetMetricsClient(),
h.config,
)
h.queueTaskProcessor, err = task.NewProcessor(
taskPriorityAssigner,
h.config,
h.GetLogger(),
h.GetMetricsClient(),
)
if err != nil {
h.GetLogger().Fatal("Creating priority task processor failed", tag.Error(err))
}
h.queueTaskProcessor.Start()
}
h.controller = shard.NewShardController(
h.Resource,
h,
h.config,
)
h.historyEventNotifier = events.NewNotifier(h.GetTimeSource(), h.GetMetricsClient(), h.config.GetShardID)
// events notifier must starts before controller
h.historyEventNotifier.Start()
h.failoverCoordinator = failover.NewCoordinator(
h.GetMetadataManager(),
h.GetHistoryClient(),
h.GetTimeSource(),
h.config,
h.GetMetricsClient(),
h.GetLogger(),
)
if h.config.EnableGracefulFailover() {
h.failoverCoordinator.Start()
}
h.controller.Start()
h.startWG.Done()
}
// Stop stops the handler
func (h *Handler) Stop() {
h.PrepareToStop()
h.replicationTaskFetchers.Stop()
if h.queueTaskProcessor != nil {
h.queueTaskProcessor.Stop()
}
h.controller.Stop()
h.historyEventNotifier.Stop()
h.failoverCoordinator.Stop()
}
// PrepareToStop starts graceful traffic drain in preparation for shutdown
func (h *Handler) PrepareToStop() {
atomic.StoreInt32(&h.shuttingDown, 1)
}
func (h *Handler) isShuttingDown() bool {
return atomic.LoadInt32(&h.shuttingDown) != 0
}
// CreateEngine is implementation for HistoryEngineFactory used for creating the engine instance for shard
func (h *Handler) CreateEngine(
shardContext shard.Context,
) engine.Engine {
return NewEngineWithShardContext(
shardContext,
h.GetVisibilityManager(),
h.GetMatchingClient(),
h.GetHistoryClient(),
h.GetSDKClient(),
h.historyEventNotifier,
h.publisher,
h.config,
h.replicationTaskFetchers,
h.GetMatchingRawClient(),
h.queueTaskProcessor,
h.failoverCoordinator,
)
}
// Health is for health check
func (h *Handler) Health(ctx context.Context) (*health.HealthStatus, error) {
h.startWG.Wait()
h.GetLogger().Debug("History health check endpoint reached.")
hs := &health.HealthStatus{Ok: true, Msg: common.StringPtr("OK")}
return hs, nil
}
// RecordActivityTaskHeartbeat - Record Activity Task Heart beat.
func (h *Handler) RecordActivityTaskHeartbeat(
ctx context.Context,
wrappedRequest *hist.RecordActivityTaskHeartbeatRequest,
) (resp *gen.RecordActivityTaskHeartbeatResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRecordActivityTaskHeartbeatScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
domainID := wrappedRequest.GetDomainUUID()
if domainID == "" {
return nil, h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, domainID, "")
}
heartbeatRequest := wrappedRequest.HeartbeatRequest
token, err0 := h.tokenSerializer.Deserialize(heartbeatRequest.TaskToken)
if err0 != nil {
err0 = &gen.BadRequestError{Message: fmt.Sprintf("Error deserializing task token. Error: %v", err0)}
return nil, h.error(err0, scope, domainID, "")
}
err0 = validateTaskToken(token)
if err0 != nil {
return nil, h.error(err0, scope, domainID, "")
}
workflowID := token.WorkflowID
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return nil, h.error(err1, scope, domainID, workflowID)
}
response, err2 := engine.RecordActivityTaskHeartbeat(ctx, wrappedRequest)
if err2 != nil {
return nil, h.error(err2, scope, domainID, workflowID)
}
return response, nil
}
// RecordActivityTaskStarted - Record Activity Task started.
func (h *Handler) RecordActivityTaskStarted(
ctx context.Context,
recordRequest *hist.RecordActivityTaskStartedRequest,
) (resp *hist.RecordActivityTaskStartedResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRecordActivityTaskStartedScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
domainID := recordRequest.GetDomainUUID()
workflowExecution := recordRequest.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
if recordRequest.GetDomainUUID() == "" {
return nil, h.error(errDomainNotSet, scope, domainID, workflowID)
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, domainID, workflowID)
}
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return nil, h.error(err1, scope, domainID, workflowID)
}
response, err2 := engine.RecordActivityTaskStarted(ctx, recordRequest)
if err2 != nil {
return nil, h.error(err2, scope, domainID, workflowID)
}
return response, nil
}
// RecordDecisionTaskStarted - Record Decision Task started.
func (h *Handler) RecordDecisionTaskStarted(
ctx context.Context,
recordRequest *hist.RecordDecisionTaskStartedRequest,
) (resp *hist.RecordDecisionTaskStartedResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
h.GetLogger().Debug(fmt.Sprintf("RecordDecisionTaskStarted. DomainID: %v, WorkflowID: %v, RunID: %v, ScheduleID: %v",
recordRequest.GetDomainUUID(),
recordRequest.WorkflowExecution.GetWorkflowId(),
common.StringDefault(recordRequest.WorkflowExecution.RunId),
recordRequest.GetScheduleId()))
scope := metrics.HistoryRecordDecisionTaskStartedScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
domainID := recordRequest.GetDomainUUID()
workflowExecution := recordRequest.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
if domainID == "" {
return nil, h.error(errDomainNotSet, scope, domainID, workflowID)
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, domainID, workflowID)
}
if recordRequest.PollRequest == nil || recordRequest.PollRequest.TaskList.GetName() == "" {
return nil, h.error(errTaskListNotSet, scope, domainID, workflowID)
}
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
h.GetLogger().Error("RecordDecisionTaskStarted failed.",
tag.Error(err1),
tag.WorkflowID(recordRequest.WorkflowExecution.GetWorkflowId()),
tag.WorkflowRunID(recordRequest.WorkflowExecution.GetRunId()),
tag.WorkflowScheduleID(recordRequest.GetScheduleId()),
)
return nil, h.error(err1, scope, domainID, workflowID)
}
response, err2 := engine.RecordDecisionTaskStarted(ctx, recordRequest)
if err2 != nil {
return nil, h.error(err2, scope, domainID, workflowID)
}
return response, nil
}
// RespondActivityTaskCompleted - records completion of an activity task
func (h *Handler) RespondActivityTaskCompleted(
ctx context.Context,
wrappedRequest *hist.RespondActivityTaskCompletedRequest,
) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRespondActivityTaskCompletedScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
domainID := wrappedRequest.GetDomainUUID()
if domainID == "" {
return h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return h.error(errHistoryHostThrottle, scope, domainID, "")
}
completeRequest := wrappedRequest.CompleteRequest
token, err0 := h.tokenSerializer.Deserialize(completeRequest.TaskToken)
if err0 != nil {
err0 = &gen.BadRequestError{Message: fmt.Sprintf("Error deserializing task token. Error: %v", err0)}
return h.error(err0, scope, domainID, "")
}
err0 = validateTaskToken(token)
if err0 != nil {
return h.error(err0, scope, domainID, "")
}
workflowID := token.WorkflowID
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return h.error(err1, scope, domainID, workflowID)
}
err2 := engine.RespondActivityTaskCompleted(ctx, wrappedRequest)
if err2 != nil {
return h.error(err2, scope, domainID, workflowID)
}
return nil
}
// RespondActivityTaskFailed - records failure of an activity task
func (h *Handler) RespondActivityTaskFailed(
ctx context.Context,
wrappedRequest *hist.RespondActivityTaskFailedRequest,
) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRespondActivityTaskFailedScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
domainID := wrappedRequest.GetDomainUUID()
if domainID == "" {
return h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return h.error(errHistoryHostThrottle, scope, domainID, "")
}
failRequest := wrappedRequest.FailedRequest
token, err0 := h.tokenSerializer.Deserialize(failRequest.TaskToken)
if err0 != nil {
err0 = &gen.BadRequestError{Message: fmt.Sprintf("Error deserializing task token. Error: %v", err0)}
return h.error(err0, scope, domainID, "")
}
err0 = validateTaskToken(token)
if err0 != nil {
return h.error(err0, scope, domainID, "")
}
workflowID := token.WorkflowID
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return h.error(err1, scope, domainID, workflowID)
}
err2 := engine.RespondActivityTaskFailed(ctx, wrappedRequest)
if err2 != nil {
return h.error(err2, scope, domainID, workflowID)
}
return nil
}
// RespondActivityTaskCanceled - records failure of an activity task
func (h *Handler) RespondActivityTaskCanceled(
ctx context.Context,
wrappedRequest *hist.RespondActivityTaskCanceledRequest,
) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRespondActivityTaskCanceledScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
domainID := wrappedRequest.GetDomainUUID()
if domainID == "" {
return h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return h.error(errHistoryHostThrottle, scope, domainID, "")
}
cancelRequest := wrappedRequest.CancelRequest
token, err0 := h.tokenSerializer.Deserialize(cancelRequest.TaskToken)
if err0 != nil {
err0 = &gen.BadRequestError{Message: fmt.Sprintf("Error deserializing task token. Error: %v", err0)}
return h.error(err0, scope, domainID, "")
}
err0 = validateTaskToken(token)
if err0 != nil {
return h.error(err0, scope, domainID, "")
}
workflowID := token.WorkflowID
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return h.error(err1, scope, domainID, workflowID)
}
err2 := engine.RespondActivityTaskCanceled(ctx, wrappedRequest)
if err2 != nil {
return h.error(err2, scope, domainID, workflowID)
}
return nil
}
// RespondDecisionTaskCompleted - records completion of a decision task
func (h *Handler) RespondDecisionTaskCompleted(
ctx context.Context,
wrappedRequest *hist.RespondDecisionTaskCompletedRequest,
) (resp *hist.RespondDecisionTaskCompletedResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRespondDecisionTaskCompletedScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
domainID := wrappedRequest.GetDomainUUID()
if domainID == "" {
return nil, h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, domainID, "")
}
completeRequest := wrappedRequest.CompleteRequest
if len(completeRequest.Decisions) == 0 {
h.GetMetricsClient().IncCounter(scope, metrics.EmptyCompletionDecisionsCounter)
}
token, err0 := h.tokenSerializer.Deserialize(completeRequest.TaskToken)
if err0 != nil {
err0 = &gen.BadRequestError{Message: fmt.Sprintf("Error deserializing task token. Error: %v", err0)}
return nil, h.error(err0, scope, domainID, "")
}
h.GetLogger().Debug(fmt.Sprintf("RespondDecisionTaskCompleted. DomainID: %v, WorkflowID: %v, RunID: %v, ScheduleID: %v",
token.DomainID,
token.WorkflowID,
token.RunID,
token.ScheduleID))
err0 = validateTaskToken(token)
if err0 != nil {
return nil, h.error(err0, scope, domainID, "")
}
workflowID := token.WorkflowID
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return nil, h.error(err1, scope, domainID, workflowID)
}
response, err2 := engine.RespondDecisionTaskCompleted(ctx, wrappedRequest)
if err2 != nil {
return nil, h.error(err2, scope, domainID, workflowID)
}
return response, nil
}
// RespondDecisionTaskFailed - failed response to decision task
func (h *Handler) RespondDecisionTaskFailed(
ctx context.Context,
wrappedRequest *hist.RespondDecisionTaskFailedRequest,
) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRespondDecisionTaskFailedScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
domainID := wrappedRequest.GetDomainUUID()
if domainID == "" {
return h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return h.error(errHistoryHostThrottle, scope, domainID, "")
}
failedRequest := wrappedRequest.FailedRequest
token, err0 := h.tokenSerializer.Deserialize(failedRequest.TaskToken)
if err0 != nil {
err0 = &gen.BadRequestError{Message: fmt.Sprintf("Error deserializing task token. Error: %v", err0)}
return h.error(err0, scope, domainID, "")
}
h.GetLogger().Debug(fmt.Sprintf("RespondDecisionTaskFailed. DomainID: %v, WorkflowID: %v, RunID: %v, ScheduleID: %v",
token.DomainID,
token.WorkflowID,
token.RunID,
token.ScheduleID))
if failedRequest != nil && failedRequest.GetCause() == gen.DecisionTaskFailedCauseUnhandledDecision {
h.GetLogger().Info("Non-Deterministic Error", tag.WorkflowDomainID(token.DomainID), tag.WorkflowID(token.WorkflowID), tag.WorkflowRunID(token.RunID))
domainName, err := h.GetDomainCache().GetDomainName(token.DomainID)
var domainTag metrics.Tag
if err == nil {
domainTag = metrics.DomainTag(domainName)
} else {
domainTag = metrics.DomainUnknownTag()
}
h.GetMetricsClient().Scope(scope, domainTag).IncCounter(metrics.CadenceErrNonDeterministicCounter)
}
err0 = validateTaskToken(token)
if err0 != nil {
return h.error(err0, scope, domainID, "")
}
workflowID := token.WorkflowID
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return h.error(err1, scope, domainID, workflowID)
}
err2 := engine.RespondDecisionTaskFailed(ctx, wrappedRequest)
if err2 != nil {
return h.error(err2, scope, domainID, workflowID)
}
return nil
}
// StartWorkflowExecution - creates a new workflow execution
func (h *Handler) StartWorkflowExecution(
ctx context.Context,
wrappedRequest *hist.StartWorkflowExecutionRequest,
) (resp *gen.StartWorkflowExecutionResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryStartWorkflowExecutionScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
domainID := wrappedRequest.GetDomainUUID()
if domainID == "" {
return nil, h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, domainID, "")
}
startRequest := wrappedRequest.StartRequest
workflowID := startRequest.GetWorkflowId()
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return nil, h.error(err1, scope, domainID, workflowID)
}
response, err2 := engine.StartWorkflowExecution(ctx, wrappedRequest)
if err2 != nil {
return nil, h.error(err2, scope, domainID, workflowID)
}
return response, nil
}
// DescribeHistoryHost returns information about the internal states of a history host
func (h *Handler) DescribeHistoryHost(
ctx context.Context,
request *gen.DescribeHistoryHostRequest,
) (resp *gen.DescribeHistoryHostResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
numOfItemsInCacheByID, numOfItemsInCacheByName := h.GetDomainCache().GetCacheSize()
status := ""
switch h.controller.Status() {
case common.DaemonStatusInitialized:
status = "initialized"
case common.DaemonStatusStarted:
status = "started"
case common.DaemonStatusStopped:
status = "stopped"
}
resp = &gen.DescribeHistoryHostResponse{
NumberOfShards: common.Int32Ptr(int32(h.controller.NumShards())),
ShardIDs: h.controller.ShardIDs(),
DomainCache: &gen.DomainCacheInfo{
NumOfItemsInCacheByID: &numOfItemsInCacheByID,
NumOfItemsInCacheByName: &numOfItemsInCacheByName,
},
ShardControllerStatus: &status,
Address: common.StringPtr(h.GetHostInfo().GetAddress()),
}
return resp, nil
}
// RemoveTask returns information about the internal states of a history host
func (h *Handler) RemoveTask(
ctx context.Context,
request *gen.RemoveTaskRequest,
) (retError error) {
executionMgr, err := h.GetExecutionManager(int(request.GetShardID()))
if err != nil {
return err
}
switch taskType := common.TaskType(request.GetType()); taskType {
case common.TaskTypeTransfer:
return executionMgr.CompleteTransferTask(&persistence.CompleteTransferTaskRequest{
TaskID: request.GetTaskID(),
})
case common.TaskTypeTimer:
return executionMgr.CompleteTimerTask(&persistence.CompleteTimerTaskRequest{
VisibilityTimestamp: time.Unix(0, request.GetVisibilityTimestamp()),
TaskID: request.GetTaskID(),
})
case common.TaskTypeReplication:
return executionMgr.CompleteReplicationTask(&persistence.CompleteReplicationTaskRequest{
TaskID: request.GetTaskID(),
})
default:
return errInvalidTaskType
}
}
// CloseShard closes a shard hosted by this instance
func (h *Handler) CloseShard(
ctx context.Context,
request *gen.CloseShardRequest,
) (retError error) {
h.controller.RemoveEngineForShard(int(request.GetShardID()))
return nil
}
// ResetQueue resets processing queue states
func (h *Handler) ResetQueue(
ctx context.Context,
request *gen.ResetQueueRequest,
) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryResetQueueScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
engine, err := h.controller.GetEngineForShard(int(request.GetShardID()))
if err != nil {
return h.error(err, scope, "", "")
}
switch taskType := common.TaskType(request.GetType()); taskType {
case common.TaskTypeTransfer:
err = engine.ResetTransferQueue(ctx, request.GetClusterName())
case common.TaskTypeTimer:
err = engine.ResetTimerQueue(ctx, request.GetClusterName())
default:
err = errInvalidTaskType
}
if err != nil {
return h.error(err, scope, "", "")
}
return nil
}
// DescribeQueue describes processing queue states
func (h *Handler) DescribeQueue(
ctx context.Context,
request *gen.DescribeQueueRequest,
) (resp *gen.DescribeQueueResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryDescribeQueueScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
engine, err := h.controller.GetEngineForShard(int(request.GetShardID()))
if err != nil {
return nil, h.error(err, scope, "", "")
}
switch taskType := common.TaskType(request.GetType()); taskType {
case common.TaskTypeTransfer:
resp, err = engine.DescribeTransferQueue(ctx, request.GetClusterName())
case common.TaskTypeTimer:
resp, err = engine.DescribeTimerQueue(ctx, request.GetClusterName())
default:
err = errInvalidTaskType
}
if err != nil {
return nil, h.error(err, scope, "", "")
}
return resp, nil
}
// DescribeMutableState - returns the internal analysis of workflow execution state
func (h *Handler) DescribeMutableState(
ctx context.Context,
request *hist.DescribeMutableStateRequest,
) (resp *hist.DescribeMutableStateResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryDescribeMutabelStateScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
domainID := request.GetDomainUUID()
if domainID == "" {
return nil, h.error(errDomainNotSet, scope, domainID, "")
}
workflowExecution := request.Execution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return nil, h.error(err1, scope, domainID, workflowID)
}
resp, err2 := engine.DescribeMutableState(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, domainID, workflowID)
}
return resp, nil
}
// GetMutableState - returns the id of the next event in the execution's history
func (h *Handler) GetMutableState(
ctx context.Context,
getRequest *hist.GetMutableStateRequest,
) (resp *hist.GetMutableStateResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryGetMutableStateScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
domainID := getRequest.GetDomainUUID()
if domainID == "" {
return nil, h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, domainID, "")
}
workflowExecution := getRequest.Execution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return nil, h.error(err1, scope, domainID, workflowID)
}
resp, err2 := engine.GetMutableState(ctx, getRequest)
if err2 != nil {
return nil, h.error(err2, scope, domainID, workflowID)
}
return resp, nil
}
// PollMutableState - returns the id of the next event in the execution's history
func (h *Handler) PollMutableState(
ctx context.Context,
getRequest *hist.PollMutableStateRequest,
) (resp *hist.PollMutableStateResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryPollMutableStateScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
domainID := getRequest.GetDomainUUID()
if domainID == "" {
return nil, h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, domainID, "")
}
workflowExecution := getRequest.Execution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return nil, h.error(err1, scope, domainID, workflowID)
}
resp, err2 := engine.PollMutableState(ctx, getRequest)
if err2 != nil {
return nil, h.error(err2, scope, domainID, workflowID)
}
return resp, nil
}
// DescribeWorkflowExecution returns information about the specified workflow execution.
func (h *Handler) DescribeWorkflowExecution(
ctx context.Context,
request *hist.DescribeWorkflowExecutionRequest,
) (resp *gen.DescribeWorkflowExecutionResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryDescribeWorkflowExecutionScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
domainID := request.GetDomainUUID()
if domainID == "" {
return nil, h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, domainID, "")
}
workflowExecution := request.Request.Execution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return nil, h.error(err1, scope, domainID, workflowID)
}
resp, err2 := engine.DescribeWorkflowExecution(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, domainID, workflowID)
}
return resp, nil
}
// RequestCancelWorkflowExecution - requests cancellation of a workflow
func (h *Handler) RequestCancelWorkflowExecution(
ctx context.Context,
request *hist.RequestCancelWorkflowExecutionRequest,
) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRequestCancelWorkflowExecutionScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
if h.isShuttingDown() {
return errShuttingDown
}
domainID := request.GetDomainUUID()
if domainID == "" || request.CancelRequest.GetDomain() == "" {
return h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return h.error(errHistoryHostThrottle, scope, domainID, "")
}
cancelRequest := request.CancelRequest
h.GetLogger().Debug(fmt.Sprintf("RequestCancelWorkflowExecution. DomainID: %v/%v, WorkflowID: %v, RunID: %v.",
cancelRequest.GetDomain(),
request.GetDomainUUID(),
cancelRequest.WorkflowExecution.GetWorkflowId(),
cancelRequest.WorkflowExecution.GetRunId()))
workflowID := cancelRequest.WorkflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return h.error(err1, scope, domainID, workflowID)
}
err2 := engine.RequestCancelWorkflowExecution(ctx, request)
if err2 != nil {
return h.error(err2, scope, domainID, workflowID)
}
return nil
}
// SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in
// WorkflowExecutionSignaled event recorded in the history and a decision task being created for the execution.
func (h *Handler) SignalWorkflowExecution(
ctx context.Context,
wrappedRequest *hist.SignalWorkflowExecutionRequest,
) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistorySignalWorkflowExecutionScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
if h.isShuttingDown() {
return errShuttingDown
}
domainID := wrappedRequest.GetDomainUUID()
if domainID == "" {
return h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return h.error(errHistoryHostThrottle, scope, domainID, "")
}
workflowExecution := wrappedRequest.SignalRequest.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return h.error(err1, scope, domainID, workflowID)
}
err2 := engine.SignalWorkflowExecution(ctx, wrappedRequest)
if err2 != nil {
return h.error(err2, scope, domainID, workflowID)
}
return nil
}
// SignalWithStartWorkflowExecution is used to ensure sending a signal event to a workflow execution.
// If workflow is running, this results in WorkflowExecutionSignaled event recorded in the history
// and a decision task being created for the execution.
// If workflow is not running or not found, this results in WorkflowExecutionStarted and WorkflowExecutionSignaled
// event recorded in history, and a decision task being created for the execution
func (h *Handler) SignalWithStartWorkflowExecution(
ctx context.Context,
wrappedRequest *hist.SignalWithStartWorkflowExecutionRequest,
) (resp *gen.StartWorkflowExecutionResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistorySignalWithStartWorkflowExecutionScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
if h.isShuttingDown() {
return nil, errShuttingDown
}
domainID := wrappedRequest.GetDomainUUID()
if domainID == "" {
return nil, h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, domainID, "")
}
signalWithStartRequest := wrappedRequest.SignalWithStartRequest
workflowID := signalWithStartRequest.GetWorkflowId()
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return nil, h.error(err1, scope, domainID, workflowID)
}
resp, err2 := engine.SignalWithStartWorkflowExecution(ctx, wrappedRequest)
if err2 != nil {
return nil, h.error(err2, scope, domainID, workflowID)
}
return resp, nil
}
// RemoveSignalMutableState is used to remove a signal request ID that was previously recorded. This is currently
// used to clean execution info when signal decision finished.
func (h *Handler) RemoveSignalMutableState(
ctx context.Context,
wrappedRequest *hist.RemoveSignalMutableStateRequest,
) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRemoveSignalMutableStateScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
if h.isShuttingDown() {
return errShuttingDown
}
domainID := wrappedRequest.GetDomainUUID()
if domainID == "" {
return h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return h.error(errHistoryHostThrottle, scope, domainID, "")
}
workflowExecution := wrappedRequest.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return h.error(err1, scope, domainID, workflowID)
}
err2 := engine.RemoveSignalMutableState(ctx, wrappedRequest)
if err2 != nil {
return h.error(err2, scope, domainID, workflowID)
}
return nil
}
// TerminateWorkflowExecution terminates an existing workflow execution by recording WorkflowExecutionTerminated event
// in the history and immediately terminating the execution instance.
func (h *Handler) TerminateWorkflowExecution(
ctx context.Context,
wrappedRequest *hist.TerminateWorkflowExecutionRequest,
) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryTerminateWorkflowExecutionScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
if h.isShuttingDown() {
return errShuttingDown
}
domainID := wrappedRequest.GetDomainUUID()
if domainID == "" {
return h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return h.error(errHistoryHostThrottle, scope, domainID, "")
}
workflowExecution := wrappedRequest.TerminateRequest.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return h.error(err1, scope, domainID, workflowID)
}
err2 := engine.TerminateWorkflowExecution(ctx, wrappedRequest)
if err2 != nil {
return h.error(err2, scope, domainID, workflowID)
}
return nil
}
// ResetWorkflowExecution reset an existing workflow execution
// in the history and immediately terminating the execution instance.
func (h *Handler) ResetWorkflowExecution(
ctx context.Context,
wrappedRequest *hist.ResetWorkflowExecutionRequest,
) (resp *gen.ResetWorkflowExecutionResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryResetWorkflowExecutionScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
if h.isShuttingDown() {
return nil, errShuttingDown
}
domainID := wrappedRequest.GetDomainUUID()
if domainID == "" {
return nil, h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, domainID, "")
}
workflowExecution := wrappedRequest.ResetRequest.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return nil, h.error(err1, scope, domainID, workflowID)
}
resp, err2 := engine.ResetWorkflowExecution(ctx, wrappedRequest)
if err2 != nil {
return nil, h.error(err2, scope, domainID, workflowID)
}
return resp, nil
}
// QueryWorkflow queries a workflow.
func (h *Handler) QueryWorkflow(
ctx context.Context,
request *hist.QueryWorkflowRequest,
) (resp *hist.QueryWorkflowResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryQueryWorkflowScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
if h.isShuttingDown() {
return nil, errShuttingDown
}
domainID := request.GetDomainUUID()
if domainID == "" {
return nil, h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, domainID, "")
}
workflowID := request.GetRequest().GetExecution().GetWorkflowId()
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return nil, h.error(err1, scope, domainID, workflowID)
}
resp, err2 := engine.QueryWorkflow(ctx, request)
if err2 != nil {
return nil, h.error(err2, scope, domainID, workflowID)
}
return resp, nil
}
// ScheduleDecisionTask is used for creating a decision task for already started workflow execution. This is mainly
// used by transfer queue processor during the processing of StartChildWorkflowExecution task, where it first starts
// child execution without creating the decision task and then calls this API after updating the mutable state of
// parent execution.
func (h *Handler) ScheduleDecisionTask(
ctx context.Context,
request *hist.ScheduleDecisionTaskRequest,
) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryScheduleDecisionTaskScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
if h.isShuttingDown() {
return errShuttingDown
}
domainID := request.GetDomainUUID()
if domainID == "" {
return h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return h.error(errHistoryHostThrottle, scope, domainID, "")
}
if request.WorkflowExecution == nil {
return h.error(errWorkflowExecutionNotSet, scope, domainID, "")
}
workflowExecution := request.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return h.error(err1, scope, domainID, workflowID)
}
err2 := engine.ScheduleDecisionTask(ctx, request)
if err2 != nil {
return h.error(err2, scope, domainID, workflowID)
}
return nil
}
// RecordChildExecutionCompleted is used for reporting the completion of child workflow execution to parent.
// This is mainly called by transfer queue processor during the processing of DeleteExecution task.
func (h *Handler) RecordChildExecutionCompleted(
ctx context.Context,
request *hist.RecordChildExecutionCompletedRequest,
) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryRecordChildExecutionCompletedScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
if h.isShuttingDown() {
return errShuttingDown
}
domainID := request.GetDomainUUID()
if domainID == "" {
return h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return h.error(errHistoryHostThrottle, scope, domainID, "")
}
if request.WorkflowExecution == nil {
return h.error(errWorkflowExecutionNotSet, scope, domainID, "")
}
workflowExecution := request.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return h.error(err1, scope, domainID, workflowID)
}
err2 := engine.RecordChildExecutionCompleted(ctx, request)
if err2 != nil {
return h.error(err2, scope, domainID, workflowID)
}
return nil
}
// ResetStickyTaskList reset the volatile information in mutable state of a given workflow.
// Volatile information are the information related to client, such as:
// 1. StickyTaskList
// 2. StickyScheduleToStartTimeout
// 3. ClientLibraryVersion
// 4. ClientFeatureVersion
// 5. ClientImpl
func (h *Handler) ResetStickyTaskList(
ctx context.Context,
resetRequest *hist.ResetStickyTaskListRequest,
) (resp *hist.ResetStickyTaskListResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryResetStickyTaskListScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
if h.isShuttingDown() {
return nil, errShuttingDown
}
domainID := resetRequest.GetDomainUUID()
if domainID == "" {
return nil, h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return nil, h.error(errHistoryHostThrottle, scope, domainID, "")
}
workflowID := resetRequest.Execution.GetWorkflowId()
engine, err := h.controller.GetEngine(workflowID)
if err != nil {
return nil, h.error(err, scope, domainID, workflowID)
}
resp, err = engine.ResetStickyTaskList(ctx, resetRequest)
if err != nil {
return nil, h.error(err, scope, domainID, workflowID)
}
return resp, nil
}
// ReplicateEvents is called by processor to replicate history events for passive domains
func (h *Handler) ReplicateEvents(
ctx context.Context,
replicateRequest *hist.ReplicateEventsRequest,
) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryReplicateEventsScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
if h.isShuttingDown() {
return errShuttingDown
}
domainID := replicateRequest.GetDomainUUID()
if domainID == "" {
return h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return h.error(errHistoryHostThrottle, scope, domainID, "")
}
workflowExecution := replicateRequest.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return h.error(err1, scope, domainID, workflowID)
}
err2 := engine.ReplicateEvents(ctx, replicateRequest)
if err2 != nil {
return h.error(err2, scope, domainID, workflowID)
}
return nil
}
// ReplicateRawEvents is called by processor to replicate history raw events for passive domains
func (h *Handler) ReplicateRawEvents(
ctx context.Context,
replicateRequest *hist.ReplicateRawEventsRequest,
) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
if h.isShuttingDown() {
return errShuttingDown
}
scope := metrics.HistoryReplicateRawEventsScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
domainID := replicateRequest.GetDomainUUID()
if domainID == "" {
return h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return h.error(errHistoryHostThrottle, scope, domainID, "")
}
workflowExecution := replicateRequest.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return h.error(err1, scope, domainID, workflowID)
}
err2 := engine.ReplicateRawEvents(ctx, replicateRequest)
if err2 != nil {
return h.error(err2, scope, domainID, workflowID)
}
return nil
}
// ReplicateEventsV2 is called by processor to replicate history events for passive domains
func (h *Handler) ReplicateEventsV2(
ctx context.Context,
replicateRequest *hist.ReplicateEventsV2Request,
) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
if h.isShuttingDown() {
return errShuttingDown
}
scope := metrics.HistoryReplicateEventsV2Scope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
domainID := replicateRequest.GetDomainUUID()
if domainID == "" {
return h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return h.error(errHistoryHostThrottle, scope, domainID, "")
}
workflowExecution := replicateRequest.WorkflowExecution
workflowID := workflowExecution.GetWorkflowId()
engine, err1 := h.controller.GetEngine(workflowID)
if err1 != nil {
return h.error(err1, scope, domainID, workflowID)
}
err2 := engine.ReplicateEventsV2(ctx, replicateRequest)
if err2 != nil {
return h.error(err2, scope, domainID, workflowID)
}
return nil
}
// SyncShardStatus is called by processor to sync history shard information from another cluster
func (h *Handler) SyncShardStatus(
ctx context.Context,
syncShardStatusRequest *hist.SyncShardStatusRequest,
) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistorySyncShardStatusScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
if h.isShuttingDown() {
return errShuttingDown
}
if ok := h.rateLimiter.Allow(); !ok {
return h.error(errHistoryHostThrottle, scope, "", "")
}
if syncShardStatusRequest.SourceCluster == nil {
return h.error(errSourceClusterNotSet, scope, "", "")
}
if syncShardStatusRequest.ShardId == nil {
return h.error(errShardIDNotSet, scope, "", "")
}
if syncShardStatusRequest.Timestamp == nil {
return h.error(errTimestampNotSet, scope, "", "")
}
// shard ID is already provided in the request
engine, err := h.controller.GetEngineForShard(int(syncShardStatusRequest.GetShardId()))
if err != nil {
return h.error(err, scope, "", "")
}
err = engine.SyncShardStatus(ctx, syncShardStatusRequest)
if err != nil {
return h.error(err, scope, "", "")
}
return nil
}
// SyncActivity is called by processor to sync activity
func (h *Handler) SyncActivity(
ctx context.Context,
syncActivityRequest *hist.SyncActivityRequest,
) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistorySyncActivityScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
if h.isShuttingDown() {
return errShuttingDown
}
domainID := syncActivityRequest.GetDomainId()
if syncActivityRequest.DomainId == nil || uuid.Parse(syncActivityRequest.GetDomainId()) == nil {
return h.error(errDomainNotSet, scope, domainID, "")
}
if ok := h.rateLimiter.Allow(); !ok {
return h.error(errHistoryHostThrottle, scope, domainID, "")
}
if syncActivityRequest.WorkflowId == nil {
return h.error(errWorkflowIDNotSet, scope, domainID, "")
}
if syncActivityRequest.RunId == nil || uuid.Parse(syncActivityRequest.GetRunId()) == nil {
return h.error(errRunIDNotValid, scope, domainID, "")
}
workflowID := syncActivityRequest.GetWorkflowId()
engine, err := h.controller.GetEngine(workflowID)
if err != nil {
return h.error(err, scope, domainID, workflowID)
}
err = engine.SyncActivity(ctx, syncActivityRequest)
if err != nil {
return h.error(err, scope, domainID, workflowID)
}
return nil
}
// GetReplicationMessages is called by remote peers to get replicated messages for cross DC replication
func (h *Handler) GetReplicationMessages(
ctx context.Context,
request *r.GetReplicationMessagesRequest,
) (resp *r.GetReplicationMessagesResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
h.GetLogger().Debug("Received GetReplicationMessages call.")
scope := metrics.HistoryGetReplicationMessagesScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
if h.isShuttingDown() {
return nil, errShuttingDown
}
var wg sync.WaitGroup
wg.Add(len(request.Tokens))
result := new(sync.Map)
for _, token := range request.Tokens {
go func(token *r.ReplicationToken) {
defer wg.Done()
engine, err := h.controller.GetEngineForShard(int(token.GetShardID()))
if err != nil {
h.GetLogger().Warn("History engine not found for shard", tag.Error(err))
return
}
tasks, err := engine.GetReplicationMessages(
ctx,
request.GetClusterName(),
token.GetLastRetrievedMessageId(),
)
if err != nil {
h.GetLogger().Warn("Failed to get replication tasks for shard", tag.Error(err))
return
}
result.Store(token.GetShardID(), tasks)
}(token)
}
wg.Wait()
messagesByShard := make(map[int32]*r.ReplicationMessages)
result.Range(func(key, value interface{}) bool {
shardID := key.(int32)
tasks := value.(*r.ReplicationMessages)
messagesByShard[shardID] = tasks
return true
})
h.GetLogger().Debug("GetReplicationMessages succeeded.")
return &r.GetReplicationMessagesResponse{MessagesByShard: messagesByShard}, nil
}
// GetDLQReplicationMessages is called by remote peers to get replicated messages for DLQ merging
func (h *Handler) GetDLQReplicationMessages(
ctx context.Context,
request *r.GetDLQReplicationMessagesRequest,
) (resp *r.GetDLQReplicationMessagesResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryGetDLQReplicationMessagesScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
if h.isShuttingDown() {
return nil, errShuttingDown
}
taskInfoPerExecution := map[definition.WorkflowIdentifier][]*r.ReplicationTaskInfo{}
// do batch based on workflow ID and run ID
for _, taskInfo := range request.GetTaskInfos() {
identity := definition.NewWorkflowIdentifier(
taskInfo.GetDomainID(),
taskInfo.GetWorkflowID(),
taskInfo.GetRunID(),
)
if _, ok := taskInfoPerExecution[identity]; !ok {
taskInfoPerExecution[identity] = []*r.ReplicationTaskInfo{}
}
taskInfoPerExecution[identity] = append(taskInfoPerExecution[identity], taskInfo)
}
var wg sync.WaitGroup
wg.Add(len(taskInfoPerExecution))
tasksChan := make(chan *r.ReplicationTask, len(request.GetTaskInfos()))
handleTaskInfoPerExecution := func(taskInfos []*r.ReplicationTaskInfo) {
defer wg.Done()
if len(taskInfos) == 0 {
return
}
engine, err := h.controller.GetEngine(
taskInfos[0].GetWorkflowID(),
)
if err != nil {
h.GetLogger().Warn("History engine not found for workflow ID.", tag.Error(err))
return
}
tasks, err := engine.GetDLQReplicationMessages(
ctx,
taskInfos,
)
if err != nil {
h.GetLogger().Error("Failed to get dlq replication tasks.", tag.Error(err))
return
}
for _, task := range tasks {
tasksChan <- task
}
}
for _, replicationTaskInfos := range taskInfoPerExecution {
go handleTaskInfoPerExecution(replicationTaskInfos)
}
wg.Wait()
close(tasksChan)
replicationTasks := make([]*r.ReplicationTask, 0, len(tasksChan))
for task := range tasksChan {
replicationTasks = append(replicationTasks, task)
}
return &r.GetDLQReplicationMessagesResponse{
ReplicationTasks: replicationTasks,
}, nil
}
// ReapplyEvents applies stale events to the current workflow and the current run
func (h *Handler) ReapplyEvents(
ctx context.Context,
request *hist.ReapplyEventsRequest,
) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryReapplyEventsScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
if h.isShuttingDown() {
return errShuttingDown
}
domainID := request.GetDomainUUID()
workflowID := request.GetRequest().GetWorkflowExecution().GetWorkflowId()
engine, err := h.controller.GetEngine(workflowID)
if err != nil {
return h.error(err, scope, domainID, workflowID)
}
// deserialize history event object
historyEvents, err := h.GetPayloadSerializer().DeserializeBatchEvents(&persistence.DataBlob{
Encoding: common.EncodingTypeThriftRW,
Data: request.GetRequest().GetEvents().GetData(),
})
if err != nil {
return h.error(err, scope, domainID, workflowID)
}
execution := request.GetRequest().GetWorkflowExecution()
if err := engine.ReapplyEvents(
ctx,
request.GetDomainUUID(),
execution.GetWorkflowId(),
execution.GetRunId(),
historyEvents,
); err != nil {
return h.error(err, scope, domainID, workflowID)
}
return nil
}
// ReadDLQMessages reads replication DLQ messages
func (h *Handler) ReadDLQMessages(
ctx context.Context,
request *r.ReadDLQMessagesRequest,
) (resp *r.ReadDLQMessagesResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryReadDLQMessagesScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
if h.isShuttingDown() {
return nil, errShuttingDown
}
engine, err := h.controller.GetEngineForShard(int(request.GetShardID()))
if err != nil {
return nil, h.error(err, scope, "", "")
}
return engine.ReadDLQMessages(ctx, request)
}
// PurgeDLQMessages deletes replication DLQ messages
func (h *Handler) PurgeDLQMessages(
ctx context.Context,
request *r.PurgeDLQMessagesRequest,
) (retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
scope := metrics.HistoryPurgeDLQMessagesScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
if h.isShuttingDown() {
return errShuttingDown
}
engine, err := h.controller.GetEngineForShard(int(request.GetShardID()))
if err != nil {
return h.error(err, scope, "", "")
}
return engine.PurgeDLQMessages(ctx, request)
}
// MergeDLQMessages reads and applies replication DLQ messages
func (h *Handler) MergeDLQMessages(
ctx context.Context,
request *r.MergeDLQMessagesRequest,
) (resp *r.MergeDLQMessagesResponse, retError error) {
defer log.CapturePanic(h.GetLogger(), &retError)
h.startWG.Wait()
if h.isShuttingDown() {
return nil, errShuttingDown
}
scope := metrics.HistoryMergeDLQMessagesScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
engine, err := h.controller.GetEngineForShard(int(request.GetShardID()))
if err != nil {
return nil, h.error(err, scope, "", "")
}
return engine.MergeDLQMessages(ctx, request)
}
// RefreshWorkflowTasks refreshes all the tasks of a workflow
func (h *Handler) RefreshWorkflowTasks(
ctx context.Context,
request *hist.RefreshWorkflowTasksRequest) (retError error) {
scope := metrics.HistoryRefreshWorkflowTasksScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
if h.isShuttingDown() {
return errShuttingDown
}
domainID := request.GetDomainUIID()
execution := request.GetRequest().GetExecution()
workflowID := execution.GetWorkflowId()
engine, err := h.controller.GetEngine(workflowID)
if err != nil {
return h.error(err, scope, domainID, workflowID)
}
err = engine.RefreshWorkflowTasks(
ctx,
domainID,
gen.WorkflowExecution{
WorkflowId: execution.WorkflowId,
RunId: execution.RunId,
},
)
if err != nil {
return h.error(err, scope, domainID, workflowID)
}
return nil
}
// NotifyFailoverMarkers sends the failover markers to failover coordinator.
// The coordinator decides when the failover finishes based on received failover marker.
func (h *Handler) NotifyFailoverMarkers(
ctx context.Context,
request *hist.NotifyFailoverMarkersRequest,
) (retError error) {
scope := metrics.HistoryNotifyFailoverMarkersScope
h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests)
sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency)
defer sw.Stop()
for _, token := range request.GetFailoverMarkerTokens() {
marker := token.GetFailoverMarker()
h.GetLogger().Debug("Handling failover maker", tag.WorkflowDomainID(marker.GetDomainID()))
h.failoverCoordinator.ReceiveFailoverMarkers(token.GetShardIDs(), token.GetFailoverMarker())
}
return nil
}
// convertError is a helper method to convert ShardOwnershipLostError from persistence layer returned by various
// HistoryEngine API calls to ShardOwnershipLost error return by HistoryService for client to be redirected to the
// correct shard.
func (h *Handler) convertError(err error) error {
switch err.(type) {
case *persistence.ShardOwnershipLostError:
shardID := err.(*persistence.ShardOwnershipLostError).ShardID
info, err := h.GetHistoryServiceResolver().Lookup(string(shardID))
if err == nil {
return shard.CreateShardOwnershipLostError(h.GetHostInfo().GetAddress(), info.GetAddress())
}
return shard.CreateShardOwnershipLostError(h.GetHostInfo().GetAddress(), "")
case *persistence.WorkflowExecutionAlreadyStartedError:
err := err.(*persistence.WorkflowExecutionAlreadyStartedError)
return &gen.InternalServiceError{Message: err.Msg}
case *persistence.CurrentWorkflowConditionFailedError:
err := err.(*persistence.CurrentWorkflowConditionFailedError)
return &gen.InternalServiceError{Message: err.Msg}
case *persistence.TransactionSizeLimitError:
err := err.(*persistence.TransactionSizeLimitError)
return &gen.BadRequestError{Message: err.Msg}
}
return err
}
func (h *Handler) updateErrorMetric(
scope int,
domainID string,
workflowID string,
err error,
) {
if err == context.DeadlineExceeded || err == context.Canceled {
h.GetMetricsClient().IncCounter(scope, metrics.CadenceErrContextTimeoutCounter)
return
}
switch err := err.(type) {
case *hist.ShardOwnershipLostError:
h.GetMetricsClient().IncCounter(scope, metrics.CadenceErrShardOwnershipLostCounter)
case *hist.EventAlreadyStartedError:
h.GetMetricsClient().IncCounter(scope, metrics.CadenceErrEventAlreadyStartedCounter)
case *gen.BadRequestError:
h.GetMetricsClient().IncCounter(scope, metrics.CadenceErrBadRequestCounter)
case *gen.DomainNotActiveError:
h.GetMetricsClient().IncCounter(scope, metrics.CadenceErrBadRequestCounter)
case *gen.WorkflowExecutionAlreadyStartedError:
h.GetMetricsClient().IncCounter(scope, metrics.CadenceErrExecutionAlreadyStartedCounter)
case *gen.EntityNotExistsError:
h.GetMetricsClient().IncCounter(scope, metrics.CadenceErrEntityNotExistsCounter)
case *gen.CancellationAlreadyRequestedError:
h.GetMetricsClient().IncCounter(scope, metrics.CadenceErrCancellationAlreadyRequestedCounter)
case *gen.LimitExceededError:
h.GetMetricsClient().IncCounter(scope, metrics.CadenceErrLimitExceededCounter)
case *gen.RetryTaskError:
h.GetMetricsClient().IncCounter(scope, metrics.CadenceErrRetryTaskCounter)
case *gen.RetryTaskV2Error:
h.GetMetricsClient().IncCounter(scope, metrics.CadenceErrRetryTaskCounter)
case *gen.ServiceBusyError:
h.GetMetricsClient().IncCounter(scope, metrics.CadenceErrServiceBusyCounter)
case *yarpcerrors.Status:
if err.Code() == yarpcerrors.CodeDeadlineExceeded {
h.GetMetricsClient().IncCounter(scope, metrics.CadenceErrContextTimeoutCounter)
}
h.GetMetricsClient().IncCounter(scope, metrics.CadenceFailures)
case *gen.InternalServiceError:
h.GetMetricsClient().IncCounter(scope, metrics.CadenceFailures)
h.GetLogger().Error("Internal service error",
tag.Error(err),
tag.WorkflowID(workflowID),
tag.WorkflowDomainID(domainID))
default:
h.GetMetricsClient().IncCounter(scope, metrics.CadenceFailures)
h.getLoggerWithTags(domainID, workflowID).Error("Uncategorized error", tag.Error(err))
}
}
func (h *Handler) error(
err error,
scope int,
domainID string,
workflowID string,
) error {
err = h.convertError(err)
h.updateErrorMetric(scope, domainID, workflowID, err)
return err
}
func (h *Handler) getLoggerWithTags(
domainID string,
workflowID string,
) log.Logger {
logger := h.GetLogger()
if domainID != "" {
logger = logger.WithTags(tag.WorkflowDomainID(domainID))
}
if workflowID != "" {
logger = logger.WithTags(tag.WorkflowID(workflowID))
}
return logger
}
func validateTaskToken(token *common.TaskToken) error {
if token.WorkflowID == "" {
return errWorkflowIDNotSet
}
if token.RunID != "" && uuid.Parse(token.RunID) == nil {
return errRunIDNotValid
}
return nil
}
|
NewHandler
|
bal_table.go
|
// Copyright (c) 2019 The BFE Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// table for maintain backend cluster
package bfe_balance
import (
"fmt"
"strings"
"sync"
)
import (
"github.com/baidu/go-lib/log"
)
import (
"github.com/bfenetworks/bfe/bfe_balance/backend"
"github.com/bfenetworks/bfe/bfe_balance/bal_gslb"
"github.com/bfenetworks/bfe/bfe_config/bfe_cluster_conf/cluster_table_conf"
"github.com/bfenetworks/bfe/bfe_config/bfe_cluster_conf/gslb_conf"
"github.com/bfenetworks/bfe/bfe_route"
)
// BalMap holds mappings from clusterName to BalanceGslb.
type BalMap map[string]*bal_gslb.BalanceGslb
type BalTable struct {
lock sync.RWMutex
balTable BalMap // from cluster to balancer
versions BalVersion
}
type BalVersion struct {
ClusterTableConfVer string // cluster table conf version
GslbConfTimeStamp string // timestamp of gslb-conf
GslbConfSrc string // which gslb-scheduler come from?
}
type BalTableState struct {
Balancers map[string]*bal_gslb.GslbState // state of cluster
BackendNum int // size of backendTable
}
func
|
(checkConfFetcher backend.CheckConfFetcher) *BalTable {
t := new(BalTable)
t.balTable = make(BalMap)
backend.SetCheckConfFetcher(checkConfFetcher)
return t
}
func (t *BalTable) BalTableConfLoad(gslbConfFilename, clusterTableFilename string) (
gslb_conf.GslbConf, cluster_table_conf.ClusterTableConf, error) {
var gslbConf gslb_conf.GslbConf
var backendConf cluster_table_conf.ClusterTableConf
var err error
gslbConf, err = gslb_conf.GslbConfLoad(gslbConfFilename)
if err != nil {
log.Logger.Error("gslb_conf.GslbConfLoad err [%s]", err)
return gslbConf, backendConf, err
}
backendConf, err = cluster_table_conf.ClusterTableLoad(clusterTableFilename)
if err != nil {
log.Logger.Error("clusterBackendConfLoad err [%s]", err)
}
return gslbConf, backendConf, err
}
func (t *BalTable) Init(gslbConfFilename, clusterTableFilename string) error {
gslbConf, backendConf, err := t.BalTableConfLoad(gslbConfFilename, clusterTableFilename)
if err != nil {
log.Logger.Error("BalTable conf load err %s", err)
return err
}
// init gslb
if err := t.gslbInit(gslbConf); err != nil {
log.Logger.Error("clusterTable gslb init err [%s]", err)
return err
}
// init backend
if err := t.backendInit(backendConf); err != nil {
log.Logger.Error("clusterTable backend init err [%s]", err)
return err
}
log.Logger.Info("init bal table success")
return nil
}
func (t *BalTable) gslbInit(gslbConfs gslb_conf.GslbConf) error {
fails := make([]string, 0)
for clusterName, gslbConf := range *gslbConfs.Clusters {
bal := bal_gslb.NewBalanceGslb(clusterName)
err := bal.Init(gslbConf)
if err != nil {
log.Logger.Error("BalTable.gslbInit():err[%s] in bal_gslb.GslbInit() for %s",
err.Error(), clusterName)
fails = append(fails, clusterName)
continue
}
t.balTable[clusterName] = bal
}
// update versions
t.versions.GslbConfTimeStamp = *gslbConfs.Ts
t.versions.GslbConfSrc = *gslbConfs.Hostname
if len(fails) != 0 {
return fmt.Errorf("error in ClusterTable.gslbInit() for [%s]",
strings.Join(fails, ","))
}
return nil
}
func (t *BalTable) backendInit(backendConfs cluster_table_conf.ClusterTableConf) error {
fails := make([]string, 0)
for clusterName, bal := range t.balTable {
// get gslbConf
backendConf, ok := (*backendConfs.Config)[clusterName]
if !ok {
// external checking guarantee. should not come here in theory
log.Logger.Error("BalTable.backendInit():no backend conf for %s", clusterName)
fails = append(fails, clusterName)
continue
}
// initialize
err := bal.BackendInit(backendConf)
if err != nil {
log.Logger.Error("ClusterTable.backendInit():err[%s] in cluster.BackendInit() for %s",
err.Error(), clusterName)
fails = append(fails, clusterName)
continue
}
}
// update versions
t.versions.ClusterTableConfVer = *backendConfs.Version
if len(fails) != 0 {
return fmt.Errorf("error in ClusterTable.backendInit() for [%s]",
strings.Join(fails, ","))
}
return nil
}
// SetGslbBasic sets gslb basic conf (from server data conf) for BalTable.
//
// Note:
// - SetGslbBasic() is called after server reload gslb conf or server data conf
// - SetGslbBasic() should be concurrency safe
func (t *BalTable) SetGslbBasic(clusterTable *bfe_route.ClusterTable) {
t.lock.Lock()
defer t.lock.Unlock()
if clusterTable == nil {
return
}
for clusterName, bal := range t.balTable {
cluster, err := clusterTable.Lookup(clusterName)
if err != nil {
continue
}
bal.SetGslbBasic(*cluster.GslbBasic)
}
}
func (t *BalTable) BalTableReload(gslbConfs gslb_conf.GslbConf,
backendConfs cluster_table_conf.ClusterTableConf) error {
t.lock.Lock()
var fails []string
bmNew := make(BalMap)
for clusterName, gslbConf := range *gslbConfs.Clusters {
bal, ok := t.balTable[clusterName]
if !ok {
// new one balance
bal = bal_gslb.NewBalanceGslb(clusterName)
} else {
delete(t.balTable, clusterName)
}
// update balance
if err := bal.Reload(gslbConf); err != nil {
log.Logger.Error("BalTableReload():err[%s] in bal.Reload() for %s",
err.Error(), clusterName)
fails = append(fails, clusterName)
}
bmNew[clusterName] = bal
}
// remove bal not in configure file
for _, remainder := range t.balTable {
remainder.Release()
}
t.balTable = bmNew
for clusterName, bal := range t.balTable {
backendConf, ok1 := (*backendConfs.Config)[clusterName]
if !ok1 {
// never comes here
log.Logger.Error("BalTableReload():no backend conf for %s", clusterName)
fails = append(fails, clusterName)
continue
}
if err := bal.BackendReload(backendConf); err != nil {
log.Logger.Error("BalTableReload():err[%s] in bal.BackendReload() for %s",
err.Error(), clusterName)
fails = append(fails, clusterName)
}
}
// update versions
t.versions.ClusterTableConfVer = *backendConfs.Version
t.versions.GslbConfTimeStamp = *gslbConfs.Ts
t.versions.GslbConfSrc = *gslbConfs.Hostname
t.lock.Unlock()
if len(fails) != 0 {
return fmt.Errorf("error in BalTableReload() for [%s]", strings.Join(fails, ","))
}
return nil
}
func (t *BalTable) lookup(clusterName string) (*bal_gslb.BalanceGslb, error) {
bal, ok := t.balTable[clusterName]
if !ok {
return nil, fmt.Errorf("no bal found for %s", clusterName)
}
return bal, nil
}
// Lookup lookup BalanceGslb by cluster name.
func (t *BalTable) Lookup(clusterName string) (*bal_gslb.BalanceGslb, error) {
t.lock.RLock()
res, err := t.lookup(clusterName)
t.lock.RUnlock()
return res, err
}
func NewBalTableState() *BalTableState {
state := new(BalTableState)
state.Balancers = make(map[string]*bal_gslb.GslbState)
return state
}
// GetState returns state of BalTable.
func (t *BalTable) GetState() *BalTableState {
state := NewBalTableState()
t.lock.RLock()
// go through clusters
for name, bal := range t.balTable {
gs := bal_gslb.State(bal)
state.Balancers[name] = gs
state.BackendNum += gs.BackendNum
}
t.lock.RUnlock()
return state
}
// GetVersions returns versions of BalTable.
func (t *BalTable) GetVersions() BalVersion {
t.lock.RLock()
versions := t.versions
t.lock.RUnlock()
return versions
}
|
NewBalTable
|
_Figure_S18.py
|
#!/usr/bin/env python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Custom style
plt.style.use('scientific')
# absolute tolerances for chimera
absolutes = np.array([0.67, 1080000, 0.2, 0.15848931924611134])
# load in gryffin runs with Naive score as objective
df_naive = pd.read_pickle('Optimization/runs/gryffin_runs_naive.pkl')
# make the plot
fig, axes = plt.subplots(nrows=4, ncols=1, sharex=True, figsize=(8, 10))
sns.lineplot(x='eval', y='peak_score', data=df_naive, ax=axes[0], label='Naive Score Included')
axes[0].axhline(absolutes[0], ls='--', linewidth=2, c='k', alpha=0.6)
axes[0].fill_between(df_naive['eval'], absolutes[0], np.amin(df_naive['peak_score']), color='#8C9196', alpha=0.25)
axes[0].set_ylim(0.25, 0.9)
axes[0].set_ylabel('Peak score ', fontsize=15)
axes[0].tick_params(labelsize=13)
axes[0].legend(loc='lower right', ncol=1, fontsize=15)
sns.lineplot(x='eval', y='naive_score', data=df_naive, ax=axes[1])
axes[1].set_yscale('log')
axes[1].axhline(absolutes[1], ls='--', linewidth=2, c='k', alpha=0.6)
axes[1].fill_between(df_naive['eval'], absolutes[1], np.amax(df_naive['naive_score']), color='#8C9196', alpha=0.25)
axes[1].set_ylim(np.amin(df_naive['naive_score']), np.amax(df_naive['naive_score']))
axes[1].set_ylabel('Naive score \n$( \$ \cdot (mol \ target)^{-1}$)', fontsize=15)
axes[1].tick_params(labelsize=13)
sns.lineplot(x='eval', y='spectral_overlap', data=df_naive, ax=axes[2])
axes[2].axhline(absolutes[2], ls='--', linewidth=2, c='k', alpha=0.6)
axes[2].fill_between(df_naive['eval'], absolutes[2], np.amax(df_naive['spectral_overlap']), color='#8C9196', alpha=0.25)
|
sns.lineplot(x='eval', y='fluo_rate', data=df_naive, ax=axes[3])
axes[3].axhline(absolutes[3], ls='--', linewidth=2, c='k', alpha=0.6)
axes[3].fill_between(df_naive['eval'], absolutes[3], np.amin(df_naive['fluo_rate']), color='#8C9196', alpha=0.25)
axes[3].set_ylim(0., 0.6)
axes[3].set_ylabel('Fluorescence \nrate (ns$^{-1}$)', fontsize=15)
axes[3].tick_params(labelsize=13)
axes[3].set_xlabel('Number of evaluations', fontsize=15)
for ax in axes:
ax.set_xlim(0, 500)
plt.tight_layout()
plt.savefig('Figure_S18.png', dpi=300)
plt.show()
|
axes[2].set_ylim(0., 0.3)
axes[2].set_ylabel('Spectral \noverlap', fontsize=15)
axes[2].tick_params(labelsize=13)
|
filesystem_linux.go
|
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !nofilesystem
package collector
import (
"bufio"
"fmt"
"io"
"os"
"strings"
"sync"
"time"
"github.com/prometheus/common/log"
"golang.org/x/sys/unix"
)
const (
defIgnoredMountPoints = "^/(dev|proc|sys|var/lib/docker/.+)($|/)"
defIgnoredFSTypes = "^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$"
mountTimeout = 30 * time.Second
)
var stuckMounts = make(map[string]struct{})
var stuckMountsMtx = &sync.Mutex{}
// GetStats returns filesystem stats.
func (c *filesystemCollector) GetStats() ([]filesystemStats, error) {
mps, err := mountPointDetails()
if err != nil {
return nil, err
}
stats := []filesystemStats{}
for _, labels := range mps {
if c.ignoredMountPointsPattern.MatchString(labels.mountPoint) {
log.Debugf("Ignoring mount point: %s", labels.mountPoint)
continue
}
if c.ignoredFSTypesPattern.MatchString(labels.fsType) {
log.Debugf("Ignoring fs type: %s", labels.fsType)
continue
}
stuckMountsMtx.Lock()
if _, ok := stuckMounts[labels.mountPoint]; ok {
stats = append(stats, filesystemStats{
labels: labels,
deviceError: 1,
})
log.Debugf("Mount point %q is in an unresponsive state", labels.mountPoint)
stuckMountsMtx.Unlock()
continue
}
stuckMountsMtx.Unlock()
// The success channel is used do tell the "watcher" that the stat
// finished successfully. The channel is closed on success.
success := make(chan struct{})
go stuckMountWatcher(labels.mountPoint, success)
buf := new(unix.Statfs_t)
err = unix.Statfs(rootfsFilePath(labels.mountPoint), buf)
stuckMountsMtx.Lock()
close(success)
// If the mount has been marked as stuck, unmark it and log it's recovery.
if _, ok := stuckMounts[labels.mountPoint]; ok {
log.Debugf("Mount point %q has recovered, monitoring will resume", labels.mountPoint)
delete(stuckMounts, labels.mountPoint)
}
stuckMountsMtx.Unlock()
if err != nil {
stats = append(stats, filesystemStats{
labels: labels,
deviceError: 1,
})
log.Debugf("Error on statfs() system call for %q: %s", rootfsFilePath(labels.mountPoint), err)
continue
}
var ro float64
for _, option := range strings.Split(labels.options, ",") {
if option == "ro" {
ro = 1
break
}
}
stats = append(stats, filesystemStats{
labels: labels,
size: float64(buf.Blocks) * float64(buf.Bsize),
free: float64(buf.Bfree) * float64(buf.Bsize),
avail: float64(buf.Bavail) * float64(buf.Bsize),
files: float64(buf.Files),
filesFree: float64(buf.Ffree),
ro: ro,
})
}
return stats, nil
}
// stuckMountWatcher listens on the given success channel and if the channel closes
// then the watcher does nothing. If instead the timeout is reached, the
// mount point that is being watched is marked as stuck.
func
|
(mountPoint string, success chan struct{}) {
select {
case <-success:
// Success
case <-time.After(mountTimeout):
// Timed out, mark mount as stuck
stuckMountsMtx.Lock()
select {
case <-success:
// Success came in just after the timeout was reached, don't label the mount as stuck
default:
log.Debugf("Mount point %q timed out, it is being labeled as stuck and will not be monitored", mountPoint)
stuckMounts[mountPoint] = struct{}{}
}
stuckMountsMtx.Unlock()
}
}
func mountPointDetails() ([]filesystemLabels, error) {
file, err := os.Open(procFilePath("1/mounts"))
if os.IsNotExist(err) {
// Fallback to `/proc/mounts` if `/proc/1/mounts` is missing due hidepid.
log.Debugf("Got %q reading root mounts, falling back to system mounts", err)
file, err = os.Open(procFilePath("mounts"))
}
if err != nil {
return nil, err
}
defer file.Close()
return parseFilesystemLabels(file)
}
func parseFilesystemLabels(r io.Reader) ([]filesystemLabels, error) {
var filesystems []filesystemLabels
scanner := bufio.NewScanner(r)
for scanner.Scan() {
parts := strings.Fields(scanner.Text())
if len(parts) < 4 {
return nil, fmt.Errorf("malformed mount point information: %q", scanner.Text())
}
// Ensure we handle the translation of \040 and \011
// as per fstab(5).
parts[1] = strings.Replace(parts[1], "\\040", " ", -1)
parts[1] = strings.Replace(parts[1], "\\011", "\t", -1)
filesystems = append(filesystems, filesystemLabels{
device: parts[0],
mountPoint: parts[1],
fsType: parts[2],
options: parts[3],
})
}
return filesystems, scanner.Err()
}
|
stuckMountWatcher
|
staffCreation.py
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from realtor.models import Realtors, Positions
class Realtor(forms.ModelForm):
name = forms.CharField(required=True, widget=forms.TextInput(attrs={'class': 'form-control validate'}))
email = forms.EmailField(required=True, widget=forms.EmailInput(attrs={'class': 'form-control validate'}))
description = forms.CharField(required=True, widget=forms.Textarea(attrs={'class': 'form-control validate'}))
class Meta:
model = Realtors
exclude = ['id','password','image']
fields = [
'name',
'email',
'description',
'positions',
]
class RegisterForm(UserCreationForm):
|
first_name = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control validate', 'id': 'orangeForm-pass'}),
required=True)
username = forms.EmailField(
widget=forms.EmailInput(attrs={'class': 'form-control validate', 'id': 'orangeForm-pass'}),
required=True)
password1 = forms.CharField(
widget=forms.PasswordInput(attrs={'class': 'form-control validate', 'id': 'orangeForm-pass'}),
required=True)
password2 = forms.CharField(
widget=forms.PasswordInput(attrs={'class': 'form-control validate', 'id': 'orangeForm-pass'}),
required=True,min_length=4)
class Meta:
model = User
fields = [
'first_name',
'username',
'password1',
'password2',
'is_staff',
]
def save(self, commit=True):
user = super(RegisterForm, self).save(commit=False)
user.first_name = self.cleaned_data['first_name']
user.email = user.username
if commit:
user.save()
return user
def getUser(self):
return self.cleaned_data['username']
|
|
interfaces.ts
|
import { CompilerOptions } from "ts-morph";
export interface KeysConfiguration {
[key: string]: Configuration;
}
export interface ModelConfiguration {
path: string;
}
export interface IncludeConfiguration {
path: string;
}
export interface DeclarationConfiguration {
path: string;
}
export interface BindindConfiguration {
server: string;
path: string;
}
export interface Configuration {
runner: string;
buildPath: string;
path: string;
srcFolder: string;
packageName: string;
contractNameServer: string;
contractNameClient: string;
npmrc: string;
tsConfig: string;
npmignore: string;
models: Map<string, ModelConfiguration>;
contracts: Map<string, ModelConfiguration>;
includes: Map<string, IncludeConfiguration>;
declarations: Map<string, DeclarationConfiguration>;
bindings: Map<string, BindindConfiguration>;
dependencies: Map<string, string>;
protobuf: {
buildPath: string;
}
}
export const HEADER = `
// Methodus contract.
// Generated at: ${new Date()}
`;
export class BuildOptions {
constructor(isClient, publish, isMocked?) {
this.isClient = isClient;
this.publish = publish;
|
}
isClient: boolean;
publish: boolean;
isMocked?: boolean;
isProtobuf?: boolean;
tsConfig?: string;
compilerOptions?: CompilerOptions;
target: string = '';
source: string = '';
}
|
this.isMocked = isMocked;
|
views.py
|
from django.contrib import messages
from django.http import Http404
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.generics import ConfirmView, SimpleView, SingleObjectListView
from .classes import Statistic, StatisticNamespace
from .permissions import permission_statistics_view
from .tasks import task_execute_statistic
class NamespaceListView(SingleObjectListView):
extra_context = {
'hide_link': True,
'title': _('Statistics namespaces'),
}
template_name = 'appearance/generic_list.html'
view_permission = permission_statistics_view
def get_source_queryset(self):
return StatisticNamespace.get_all()
class NamespaceDetailView(SingleObjectListView):
view_permission = permission_statistics_view
def get_extra_context(self):
return {
'hide_link': True,
'object': self.get_namespace(),
'title': _('Namespace details for: %s') % self.get_namespace(),
}
|
def get_source_queryset(self):
return self.get_namespace().statistics
class StatisticDetailView(SimpleView):
view_permission = permission_statistics_view
def get_extra_context(self):
obj = self.get_object()
return {
'chart_data': obj.get_chart_data(),
'namespace': obj.namespace,
'navigation_object_list': ('namespace', 'object'),
'no_data': not obj.get_results_data()['series'],
'object': obj,
'title': _('Results for: %s') % obj,
}
def get_object(self):
try:
return Statistic.get(self.kwargs['slug'])
except KeyError:
raise Http404(_('Statistic "%s" not found.') % self.kwargs['slug'])
def get_template_names(self):
return (self.get_object().renderer.template_name,)
class StatisticQueueView(ConfirmView):
view_permission = permission_statistics_view
def get_extra_context(self):
obj = self.get_object()
return {
'namespace': obj.namespace,
'object': obj,
# Translators: This text is asking users if they want to queue
# (to send to the queue) a statistic for it to be update ahead
# of schedule
'title': _(
'Queue statistic "%s" to be updated?'
) % obj,
}
def get_object(self):
try:
return Statistic.get(slug=self.kwargs['slug'])
except KeyError:
raise Http404(_('Statistic "%s" not found.') % self.kwargs['slug'])
def view_action(self):
task_execute_statistic.delay(slug=self.get_object().slug)
messages.success(
message=_(
'Statistic "%s" queued successfully for update.'
) % self.get_object().label, request=self.request
)
|
def get_namespace(self):
return StatisticNamespace.get(slug=self.kwargs['slug'])
|
Stripe.js
|
/*
Copyright 2020-2021 Lowdefy, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import StripeRequest from './StripeRequest/StripeRequest.js';
import schema from './schema.js';
|
StripeRequest,
},
};
|
export default {
schema,
requests: {
|
cleanup.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Code pertaining to cleanup of temporaries as well as execution of
//! drop glue. See discussion in `doc.rs` for a high-level summary.
pub use self::ScopeId::*;
pub use self::CleanupScopeKind::*;
pub use self::EarlyExitLabel::*;
pub use self::Heap::*;
use llvm::{BasicBlockRef, ValueRef};
use trans::base;
use trans::build;
use trans::callee;
use trans::common;
use trans::common::{Block, FunctionContext, ExprId, NodeInfo};
use trans::debuginfo;
use trans::glue;
// Temporary due to slicing syntax hacks (KILLME)
//use middle::region;
use trans::type_::Type;
use middle::ty::{self, Ty};
use std::fmt;
use syntax::ast;
use util::ppaux::Repr;
pub struct CleanupScope<'blk, 'tcx: 'blk> {
// The id of this cleanup scope. If the id is None,
// this is a *temporary scope* that is pushed during trans to
// cleanup miscellaneous garbage that trans may generate whose
// lifetime is a subset of some expression. See module doc for
// more details.
kind: CleanupScopeKind<'blk, 'tcx>,
// Cleanups to run upon scope exit.
cleanups: Vec<CleanupObj<'tcx>>,
// The debug location any drop calls generated for this scope will be
// associated with.
debug_loc: Option<NodeInfo>,
cached_early_exits: Vec<CachedEarlyExit>,
cached_landing_pad: Option<BasicBlockRef>,
}
#[derive(Copy, Show)]
pub struct CustomScopeIndex {
index: uint
}
pub const EXIT_BREAK: uint = 0;
pub const EXIT_LOOP: uint = 1;
pub const EXIT_MAX: uint = 2;
pub enum CleanupScopeKind<'blk, 'tcx: 'blk> {
CustomScopeKind,
AstScopeKind(ast::NodeId),
LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>; EXIT_MAX])
}
impl<'blk, 'tcx: 'blk> fmt::Show for CleanupScopeKind<'blk, 'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
CustomScopeKind => write!(f, "CustomScopeKind"),
AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid),
LoopScopeKind(nid, ref blks) => {
try!(write!(f, "LoopScopeKind({}, [", nid));
for blk in blks.iter() {
try!(write!(f, "{:p}, ", blk));
}
write!(f, "])")
}
}
}
}
#[derive(Copy, PartialEq, Show)]
pub enum EarlyExitLabel {
UnwindExit,
ReturnExit,
LoopExit(ast::NodeId, uint)
}
#[derive(Copy)]
pub struct CachedEarlyExit {
label: EarlyExitLabel,
cleanup_block: BasicBlockRef,
}
pub trait Cleanup<'tcx> {
fn must_unwind(&self) -> bool;
fn clean_on_unwind(&self) -> bool;
fn is_lifetime_end(&self) -> bool;
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
debug_loc: Option<NodeInfo>)
-> Block<'blk, 'tcx>;
}
pub type CleanupObj<'tcx> = Box<Cleanup<'tcx>+'tcx>;
#[derive(Copy, Show)]
pub enum ScopeId {
AstScope(ast::NodeId),
CustomScope(CustomScopeIndex)
}
impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
/// Invoked when we start to trans the code contained within a new cleanup scope.
fn push_ast_cleanup_scope(&self, debug_loc: NodeInfo) {
debug!("push_ast_cleanup_scope({})",
self.ccx.tcx().map.node_to_string(debug_loc.id));
// FIXME(#2202) -- currently closure bodies have a parent
// region, which messes up the assertion below, since there
// are no cleanup scopes on the stack at the start of
// trans'ing a closure body. I think though that this should
// eventually be fixed by closure bodies not having a parent
// region, though that's a touch unclear, and it might also be
// better just to narrow this assertion more (i.e., by
// excluding id's that correspond to closure bodies only). For
// now we just say that if there is already an AST scope on the stack,
// this new AST scope had better be its immediate child.
// Temporarily removed due to slicing syntax hacks (KILLME).
/*let top_scope = self.top_ast_scope();
if top_scope.is_some() {
assert_eq!(self.ccx
.tcx()
.region_maps
.opt_encl_scope(region::CodeExtent::from_node_id(debug_loc.id))
.map(|s|s.node_id()),
top_scope);
}*/
self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
Some(debug_loc)));
}
fn push_loop_cleanup_scope(&self,
id: ast::NodeId,
exits: [Block<'blk, 'tcx>; EXIT_MAX]) {
debug!("push_loop_cleanup_scope({})",
self.ccx.tcx().map.node_to_string(id));
assert_eq!(Some(id), self.top_ast_scope());
// Just copy the debuginfo source location from the enclosing scope
let debug_loc = self.scopes
.borrow()
.last()
.unwrap()
.debug_loc;
self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc));
}
fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
let index = self.scopes_len();
debug!("push_custom_cleanup_scope(): {}", index);
// Just copy the debuginfo source location from the enclosing scope
let debug_loc = self.scopes
.borrow()
.last()
.map(|opt_scope| opt_scope.debug_loc)
.unwrap_or(None);
self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc));
CustomScopeIndex { index: index }
}
fn push_custom_cleanup_scope_with_debug_loc(&self,
debug_loc: NodeInfo)
-> CustomScopeIndex {
let index = self.scopes_len();
debug!("push_custom_cleanup_scope(): {}", index);
self.push_scope(CleanupScope::new(CustomScopeKind, Some(debug_loc)));
CustomScopeIndex { index: index }
}
/// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup
/// stack, and generates the code to do its cleanups for normal exit.
fn pop_and_trans_ast_cleanup_scope(&self,
bcx: Block<'blk, 'tcx>,
cleanup_scope: ast::NodeId)
-> Block<'blk, 'tcx> {
debug!("pop_and_trans_ast_cleanup_scope({})",
self.ccx.tcx().map.node_to_string(cleanup_scope));
assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
let scope = self.pop_scope();
self.trans_scope_cleanups(bcx, &scope)
}
/// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the
/// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by
/// branching to a block generated by `normal_exit_block`.
fn pop_loop_cleanup_scope(&self,
cleanup_scope: ast::NodeId) {
debug!("pop_loop_cleanup_scope({})",
self.ccx.tcx().map.node_to_string(cleanup_scope));
assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
let _ = self.pop_scope();
}
/// Removes the top cleanup scope from the stack without executing its cleanups. The top
/// cleanup scope must be the temporary scope `custom_scope`.
fn pop_custom_cleanup_scope(&self,
custom_scope: CustomScopeIndex) {
debug!("pop_custom_cleanup_scope({})", custom_scope.index);
assert!(self.is_valid_to_pop_custom_scope(custom_scope));
let _ = self.pop_scope();
}
/// Removes the top cleanup scope from the stack, which must be a temporary scope, and
/// generates the code to do its cleanups for normal exit.
fn pop_and_trans_custom_cleanup_scope(&self,
bcx: Block<'blk, 'tcx>,
custom_scope: CustomScopeIndex)
-> Block<'blk, 'tcx> {
debug!("pop_and_trans_custom_cleanup_scope({})", custom_scope);
assert!(self.is_valid_to_pop_custom_scope(custom_scope));
let scope = self.pop_scope();
self.trans_scope_cleanups(bcx, &scope)
}
/// Returns the id of the top-most loop scope
fn top_loop_scope(&self) -> ast::NodeId {
for scope in self.scopes.borrow().iter().rev() {
if let LoopScopeKind(id, _) = scope.kind {
return id;
}
}
self.ccx.sess().bug("no loop scope found");
}
/// Returns a block to branch to which will perform all pending cleanups and then
/// break/continue (depending on `exit`) out of the loop with id `cleanup_scope`
fn normal_exit_block(&'blk self,
cleanup_scope: ast::NodeId,
exit: uint) -> BasicBlockRef {
self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
}
/// Returns a block to branch to which will perform all pending cleanups and then return from
/// this function
fn return_exit_block(&'blk self) -> BasicBlockRef {
self.trans_cleanups_to_exit_scope(ReturnExit)
}
fn schedule_lifetime_end(&self,
cleanup_scope: ScopeId,
val: ValueRef) {
let drop = box LifetimeEnd {
ptr: val,
};
debug!("schedule_lifetime_end({}, val={})",
cleanup_scope,
self.ccx.tn().val_to_string(val));
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
/// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty`
fn schedule_drop_mem(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>) {
if !common::type_needs_drop(self.ccx.tcx(), ty) { return; }
let drop = box DropValue {
is_immediate: false,
must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
val: val,
ty: ty,
zero: false
};
debug!("schedule_drop_mem({}, val={}, ty={})",
cleanup_scope,
self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()));
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
/// Schedules a (deep) drop and zero-ing of `val`, which is a pointer to an instance of `ty`
fn schedule_drop_and_zero_mem(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>) {
if !common::type_needs_drop(self.ccx.tcx(), ty) { return; }
let drop = box DropValue {
is_immediate: false,
must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
val: val,
ty: ty,
zero: true
};
debug!("schedule_drop_and_zero_mem({}, val={}, ty={}, zero={})",
cleanup_scope,
self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()),
true);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
/// Schedules a (deep) drop of `val`, which is an instance of `ty`
fn
|
(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>) {
if !common::type_needs_drop(self.ccx.tcx(), ty) { return; }
let drop = box DropValue {
is_immediate: true,
must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
val: val,
ty: ty,
zero: false
};
debug!("schedule_drop_immediate({}, val={}, ty={})",
cleanup_scope,
self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()));
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
/// Schedules a call to `free(val)`. Note that this is a shallow operation.
fn schedule_free_value(&self,
cleanup_scope: ScopeId,
val: ValueRef,
heap: Heap,
content_ty: Ty<'tcx>) {
let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
debug!("schedule_free_value({}, val={}, heap={})",
cleanup_scope,
self.ccx.tn().val_to_string(val),
heap);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
/// Schedules a call to `free(val)`. Note that this is a shallow operation.
fn schedule_free_slice(&self,
cleanup_scope: ScopeId,
val: ValueRef,
size: ValueRef,
align: ValueRef,
heap: Heap) {
let drop = box FreeSlice { ptr: val, size: size, align: align, heap: heap };
debug!("schedule_free_slice({}, val={}, heap={})",
cleanup_scope,
self.ccx.tn().val_to_string(val),
heap);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
fn schedule_clean(&self,
cleanup_scope: ScopeId,
cleanup: CleanupObj<'tcx>) {
match cleanup_scope {
AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
}
}
/// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not
/// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary
/// scope.
fn schedule_clean_in_ast_scope(&self,
cleanup_scope: ast::NodeId,
cleanup: CleanupObj<'tcx>) {
debug!("schedule_clean_in_ast_scope(cleanup_scope={})",
cleanup_scope);
for scope in self.scopes.borrow_mut().iter_mut().rev() {
if scope.kind.is_ast_with_id(cleanup_scope) {
scope.cleanups.push(cleanup);
scope.clear_cached_exits();
return;
} else {
// will be adding a cleanup to some enclosing scope
scope.clear_cached_exits();
}
}
self.ccx.sess().bug(
format!("no cleanup scope {} found",
self.ccx.tcx().map.node_to_string(cleanup_scope))[]);
}
/// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope.
fn schedule_clean_in_custom_scope(&self,
custom_scope: CustomScopeIndex,
cleanup: CleanupObj<'tcx>) {
debug!("schedule_clean_in_custom_scope(custom_scope={})",
custom_scope.index);
assert!(self.is_valid_custom_scope(custom_scope));
let mut scopes = self.scopes.borrow_mut();
let scope = &mut (*scopes)[custom_scope.index];
scope.cleanups.push(cleanup);
scope.clear_cached_exits();
}
/// Returns true if there are pending cleanups that should execute on panic.
fn needs_invoke(&self) -> bool {
self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
}
/// Returns a basic block to branch to in the event of a panic. This block will run the panic
/// cleanups and eventually invoke the LLVM `Resume` instruction.
fn get_landing_pad(&'blk self) -> BasicBlockRef {
let _icx = base::push_ctxt("get_landing_pad");
debug!("get_landing_pad");
let orig_scopes_len = self.scopes_len();
assert!(orig_scopes_len > 0);
// Remove any scopes that do not have cleanups on panic:
let mut popped_scopes = vec!();
while !self.top_scope(|s| s.needs_invoke()) {
debug!("top scope does not need invoke");
popped_scopes.push(self.pop_scope());
}
// Check for an existing landing pad in the new topmost scope:
let llbb = self.get_or_create_landing_pad();
// Push the scopes we removed back on:
loop {
match popped_scopes.pop() {
Some(scope) => self.push_scope(scope),
None => break
}
}
assert_eq!(self.scopes_len(), orig_scopes_len);
return llbb;
}
}
impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
/// Returns the id of the current top-most AST scope, if any.
fn top_ast_scope(&self) -> Option<ast::NodeId> {
for scope in self.scopes.borrow().iter().rev() {
match scope.kind {
CustomScopeKind | LoopScopeKind(..) => {}
AstScopeKind(i) => {
return Some(i);
}
}
}
None
}
fn top_nonempty_cleanup_scope(&self) -> Option<uint> {
self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty())
}
fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
self.is_valid_custom_scope(custom_scope) &&
custom_scope.index == self.scopes.borrow().len() - 1
}
fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
let scopes = self.scopes.borrow();
custom_scope.index < scopes.len() &&
(*scopes)[custom_scope.index].kind.is_temp()
}
/// Generates the cleanups for `scope` into `bcx`
fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
bcx: Block<'blk, 'tcx>,
scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> {
let mut bcx = bcx;
if !bcx.unreachable.get() {
for cleanup in scope.cleanups.iter().rev() {
bcx = cleanup.trans(bcx, scope.debug_loc);
}
}
bcx
}
fn scopes_len(&self) -> uint {
self.scopes.borrow().len()
}
fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) {
self.scopes.borrow_mut().push(scope)
}
fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> {
debug!("popping cleanup scope {}, {} scopes remaining",
self.top_scope(|s| s.block_name("")),
self.scopes_len() - 1);
self.scopes.borrow_mut().pop().unwrap()
}
fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R {
f(self.scopes.borrow().last().unwrap())
}
/// Used when the caller wishes to jump to an early exit, such as a return, break, continue, or
/// unwind. This function will generate all cleanups between the top of the stack and the exit
/// `label` and return a basic block that the caller can branch to.
///
/// For example, if the current stack of cleanups were as follows:
///
/// AST 22
/// Custom 1
/// AST 23
/// Loop 23
/// Custom 2
/// AST 24
///
/// and the `label` specifies a break from `Loop 23`, then this function would generate a
/// series of basic blocks as follows:
///
/// Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
///
/// where `break_blk` is the block specified in `Loop 23` as the target for breaks. The return
/// value would be the first basic block in that sequence (`Cleanup(AST 24)`). The caller could
/// then branch to `Cleanup(AST 24)` and it will perform all cleanups and finally branch to the
/// `break_blk`.
fn trans_cleanups_to_exit_scope(&'blk self,
label: EarlyExitLabel)
-> BasicBlockRef {
debug!("trans_cleanups_to_exit_scope label={} scopes={}",
label, self.scopes_len());
let orig_scopes_len = self.scopes_len();
let mut prev_llbb;
let mut popped_scopes = vec!();
// First we pop off all the cleanup stacks that are
// traversed until the exit is reached, pushing them
// onto the side vector `popped_scopes`. No code is
// generated at this time.
//
// So, continuing the example from above, we would wind up
// with a `popped_scopes` vector of `[AST 24, Custom 2]`.
// (Presuming that there are no cached exits)
loop {
if self.scopes_len() == 0 {
match label {
UnwindExit => {
// Generate a block that will `Resume`.
let prev_bcx = self.new_block(true, "resume", None);
let personality = self.personality.get().expect(
"create_landing_pad() should have set this");
build::Resume(prev_bcx,
build::Load(prev_bcx, personality));
prev_llbb = prev_bcx.llbb;
break;
}
ReturnExit => {
prev_llbb = self.get_llreturn();
break;
}
LoopExit(id, _) => {
self.ccx.sess().bug(format!(
"cannot exit from scope {}, \
not in scope", id)[]);
}
}
}
// Check if we have already cached the unwinding of this
// scope for this label. If so, we can stop popping scopes
// and branch to the cached label, since it contains the
// cleanups for any subsequent scopes.
match self.top_scope(|s| s.cached_early_exit(label)) {
Some(cleanup_block) => {
prev_llbb = cleanup_block;
break;
}
None => { }
}
// Pop off the scope, since we will be generating
// unwinding code for it. If we are searching for a loop exit,
// and this scope is that loop, then stop popping and set
// `prev_llbb` to the appropriate exit block from the loop.
popped_scopes.push(self.pop_scope());
let scope = popped_scopes.last().unwrap();
match label {
UnwindExit | ReturnExit => { }
LoopExit(id, exit) => {
match scope.kind.early_exit_block(id, exit) {
Some(exitllbb) => {
prev_llbb = exitllbb;
break;
}
None => { }
}
}
}
}
debug!("trans_cleanups_to_exit_scope: popped {} scopes",
popped_scopes.len());
// Now push the popped scopes back on. As we go,
// we track in `prev_llbb` the exit to which this scope
// should branch when it's done.
//
// So, continuing with our example, we will start out with
// `prev_llbb` being set to `break_blk` (or possibly a cached
// early exit). We will then pop the scopes from `popped_scopes`
// and generate a basic block for each one, prepending it in the
// series and updating `prev_llbb`. So we begin by popping `Custom 2`
// and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)`
// branch to `prev_llbb == break_blk`, giving us a sequence like:
//
// Cleanup(Custom 2) -> prev_llbb
//
// We then pop `AST 24` and repeat the process, giving us the sequence:
//
// Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb
//
// At this point, `popped_scopes` is empty, and so the final block
// that we return to the user is `Cleanup(AST 24)`.
while !popped_scopes.is_empty() {
let mut scope = popped_scopes.pop().unwrap();
if scope.cleanups.iter().any(|c| cleanup_is_suitable_for(&**c, label))
{
let name = scope.block_name("clean");
debug!("generating cleanups for {}", name);
let bcx_in = self.new_block(label.is_unwind(),
name[],
None);
let mut bcx_out = bcx_in;
for cleanup in scope.cleanups.iter().rev() {
if cleanup_is_suitable_for(&**cleanup, label) {
bcx_out = cleanup.trans(bcx_out,
scope.debug_loc);
}
}
build::Br(bcx_out, prev_llbb);
prev_llbb = bcx_in.llbb;
} else {
debug!("no suitable cleanups in {}",
scope.block_name("clean"));
}
scope.add_cached_early_exit(label, prev_llbb);
self.push_scope(scope);
}
debug!("trans_cleanups_to_exit_scope: prev_llbb={}", prev_llbb);
assert_eq!(self.scopes_len(), orig_scopes_len);
prev_llbb
}
/// Creates a landing pad for the top scope, if one does not exist. The landing pad will
/// perform all cleanups necessary for an unwind and then `resume` to continue error
/// propagation:
///
/// landing_pad -> ... cleanups ... -> [resume]
///
/// (The cleanups and resume instruction are created by `trans_cleanups_to_exit_scope()`, not
/// in this function itself.)
fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
let pad_bcx;
debug!("get_or_create_landing_pad");
// Check if a landing pad block exists; if not, create one.
{
let mut scopes = self.scopes.borrow_mut();
let last_scope = scopes.last_mut().unwrap();
match last_scope.cached_landing_pad {
Some(llbb) => { return llbb; }
None => {
let name = last_scope.block_name("unwind");
pad_bcx = self.new_block(true, name[], None);
last_scope.cached_landing_pad = Some(pad_bcx.llbb);
}
}
}
// The landing pad return type (the type being propagated). Not sure what
// this represents but it's determined by the personality function and
// this is what the EH proposal example uses.
let llretty = Type::struct_(self.ccx,
&[Type::i8p(self.ccx), Type::i32(self.ccx)],
false);
// The exception handling personality function.
//
// If our compilation unit has the `eh_personality` lang item somewhere
// within it, then we just need to translate that. Otherwise, we're
// building an rlib which will depend on some upstream implementation of
// this function, so we just codegen a generic reference to it. We don't
// specify any of the types for the function, we just make it a symbol
// that LLVM can later use.
let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() {
Some(def_id) => {
callee::trans_fn_ref(pad_bcx.ccx(), def_id, ExprId(0),
pad_bcx.fcx.param_substs).val
}
None => {
let mut personality = self.ccx.eh_personality().borrow_mut();
match *personality {
Some(llpersonality) => llpersonality,
None => {
let fty = Type::variadic_func(&[], &Type::i32(self.ccx));
let f = base::decl_cdecl_fn(self.ccx,
"rust_eh_personality",
fty,
self.ccx.tcx().types.i32);
*personality = Some(f);
f
}
}
}
};
// The only landing pad clause will be 'cleanup'
let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1u);
// The landing pad block is a cleanup
build::SetCleanup(pad_bcx, llretval);
// We store the retval in a function-central alloca, so that calls to
// Resume can find it.
match self.personality.get() {
Some(addr) => {
build::Store(pad_bcx, llretval, addr);
}
None => {
let addr = base::alloca(pad_bcx, common::val_ty(llretval), "");
self.personality.set(Some(addr));
build::Store(pad_bcx, llretval, addr);
}
}
// Generate the cleanup block and branch to it.
let cleanup_llbb = self.trans_cleanups_to_exit_scope(UnwindExit);
build::Br(pad_bcx, cleanup_llbb);
return pad_bcx.llbb;
}
}
impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
fn new(kind: CleanupScopeKind<'blk, 'tcx>,
debug_loc: Option<NodeInfo>)
-> CleanupScope<'blk, 'tcx> {
CleanupScope {
kind: kind,
debug_loc: debug_loc,
cleanups: vec!(),
cached_early_exits: vec!(),
cached_landing_pad: None,
}
}
fn clear_cached_exits(&mut self) {
self.cached_early_exits = vec!();
self.cached_landing_pad = None;
}
fn cached_early_exit(&self,
label: EarlyExitLabel)
-> Option<BasicBlockRef> {
self.cached_early_exits.iter().
find(|e| e.label == label).
map(|e| e.cleanup_block)
}
fn add_cached_early_exit(&mut self,
label: EarlyExitLabel,
blk: BasicBlockRef) {
self.cached_early_exits.push(
CachedEarlyExit { label: label,
cleanup_block: blk });
}
/// True if this scope has cleanups that need unwinding
fn needs_invoke(&self) -> bool {
self.cached_landing_pad.is_some() ||
self.cleanups.iter().any(|c| c.must_unwind())
}
/// Returns a suitable name to use for the basic block that handles this cleanup scope
fn block_name(&self, prefix: &str) -> String {
match self.kind {
CustomScopeKind => format!("{}_custom_", prefix),
AstScopeKind(id) => format!("{}_ast_{}_", prefix, id),
LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id),
}
}
pub fn drop_non_lifetime_clean(&mut self) {
self.cleanups.retain(|c| c.is_lifetime_end());
}
}
impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
fn is_temp(&self) -> bool {
match *self {
CustomScopeKind => true,
LoopScopeKind(..) | AstScopeKind(..) => false,
}
}
fn is_ast_with_id(&self, id: ast::NodeId) -> bool {
match *self {
CustomScopeKind | LoopScopeKind(..) => false,
AstScopeKind(i) => i == id
}
}
fn is_loop_with_id(&self, id: ast::NodeId) -> bool {
match *self {
CustomScopeKind | AstScopeKind(..) => false,
LoopScopeKind(i, _) => i == id
}
}
/// If this is a loop scope with id `id`, return the early exit block `exit`, else `None`
fn early_exit_block(&self,
id: ast::NodeId,
exit: uint) -> Option<BasicBlockRef> {
match *self {
LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
_ => None,
}
}
}
impl EarlyExitLabel {
fn is_unwind(&self) -> bool {
match *self {
UnwindExit => true,
_ => false
}
}
}
///////////////////////////////////////////////////////////////////////////
// Cleanup types
#[derive(Copy)]
pub struct DropValue<'tcx> {
is_immediate: bool,
must_unwind: bool,
val: ValueRef,
ty: Ty<'tcx>,
zero: bool
}
impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
fn must_unwind(&self) -> bool {
self.must_unwind
}
fn clean_on_unwind(&self) -> bool {
self.must_unwind
}
fn is_lifetime_end(&self) -> bool {
false
}
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
debug_loc: Option<NodeInfo>)
-> Block<'blk, 'tcx> {
let bcx = if self.is_immediate {
glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc)
} else {
glue::drop_ty(bcx, self.val, self.ty, debug_loc)
};
if self.zero {
base::zero_mem(bcx, self.val, self.ty);
}
bcx
}
}
#[derive(Copy, Show)]
pub enum Heap {
HeapExchange
}
#[derive(Copy)]
pub struct FreeValue<'tcx> {
ptr: ValueRef,
heap: Heap,
content_ty: Ty<'tcx>
}
impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> {
fn must_unwind(&self) -> bool {
true
}
fn clean_on_unwind(&self) -> bool {
true
}
fn is_lifetime_end(&self) -> bool {
false
}
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
debug_loc: Option<NodeInfo>)
-> Block<'blk, 'tcx> {
apply_debug_loc(bcx.fcx, debug_loc);
match self.heap {
HeapExchange => {
glue::trans_exchange_free_ty(bcx, self.ptr, self.content_ty)
}
}
}
}
#[derive(Copy)]
pub struct FreeSlice {
ptr: ValueRef,
size: ValueRef,
align: ValueRef,
heap: Heap,
}
impl<'tcx> Cleanup<'tcx> for FreeSlice {
fn must_unwind(&self) -> bool {
true
}
fn clean_on_unwind(&self) -> bool {
true
}
fn is_lifetime_end(&self) -> bool {
false
}
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
debug_loc: Option<NodeInfo>)
-> Block<'blk, 'tcx> {
apply_debug_loc(bcx.fcx, debug_loc);
match self.heap {
HeapExchange => {
glue::trans_exchange_free_dyn(bcx, self.ptr, self.size, self.align)
}
}
}
}
#[derive(Copy)]
pub struct LifetimeEnd {
ptr: ValueRef,
}
impl<'tcx> Cleanup<'tcx> for LifetimeEnd {
fn must_unwind(&self) -> bool {
false
}
fn clean_on_unwind(&self) -> bool {
true
}
fn is_lifetime_end(&self) -> bool {
true
}
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
debug_loc: Option<NodeInfo>)
-> Block<'blk, 'tcx> {
apply_debug_loc(bcx.fcx, debug_loc);
base::call_lifetime_end(bcx, self.ptr);
bcx
}
}
pub fn temporary_scope(tcx: &ty::ctxt,
id: ast::NodeId)
-> ScopeId {
match tcx.region_maps.temporary_scope(id) {
Some(scope) => {
let r = AstScope(scope.node_id());
debug!("temporary_scope({}) = {}", id, r);
r
}
None => {
tcx.sess.bug(format!("no temporary scope available for expr {}",
id)[])
}
}
}
pub fn var_scope(tcx: &ty::ctxt,
id: ast::NodeId)
-> ScopeId {
let r = AstScope(tcx.region_maps.var_scope(id).node_id());
debug!("var_scope({}) = {}", id, r);
r
}
fn cleanup_is_suitable_for(c: &Cleanup,
label: EarlyExitLabel) -> bool {
!label.is_unwind() || c.clean_on_unwind()
}
fn apply_debug_loc(fcx: &FunctionContext, debug_loc: Option<NodeInfo>) {
match debug_loc {
Some(ref src_loc) => {
debuginfo::set_source_location(fcx, src_loc.id, src_loc.span);
}
None => {
debuginfo::clear_source_location(fcx);
}
}
}
///////////////////////////////////////////////////////////////////////////
// These traits just exist to put the methods into this file.
pub trait CleanupMethods<'blk, 'tcx> {
fn push_ast_cleanup_scope(&self, id: NodeInfo);
fn push_loop_cleanup_scope(&self,
id: ast::NodeId,
exits: [Block<'blk, 'tcx>; EXIT_MAX]);
fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
fn push_custom_cleanup_scope_with_debug_loc(&self,
debug_loc: NodeInfo)
-> CustomScopeIndex;
fn pop_and_trans_ast_cleanup_scope(&self,
bcx: Block<'blk, 'tcx>,
cleanup_scope: ast::NodeId)
-> Block<'blk, 'tcx>;
fn pop_loop_cleanup_scope(&self,
cleanup_scope: ast::NodeId);
fn pop_custom_cleanup_scope(&self,
custom_scope: CustomScopeIndex);
fn pop_and_trans_custom_cleanup_scope(&self,
bcx: Block<'blk, 'tcx>,
custom_scope: CustomScopeIndex)
-> Block<'blk, 'tcx>;
fn top_loop_scope(&self) -> ast::NodeId;
fn normal_exit_block(&'blk self,
cleanup_scope: ast::NodeId,
exit: uint) -> BasicBlockRef;
fn return_exit_block(&'blk self) -> BasicBlockRef;
fn schedule_lifetime_end(&self,
cleanup_scope: ScopeId,
val: ValueRef);
fn schedule_drop_mem(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>);
fn schedule_drop_and_zero_mem(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>);
fn schedule_drop_immediate(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>);
fn schedule_free_value(&self,
cleanup_scope: ScopeId,
val: ValueRef,
heap: Heap,
content_ty: Ty<'tcx>);
fn schedule_free_slice(&self,
cleanup_scope: ScopeId,
val: ValueRef,
size: ValueRef,
align: ValueRef,
heap: Heap);
fn schedule_clean(&self,
cleanup_scope: ScopeId,
cleanup: CleanupObj<'tcx>);
fn schedule_clean_in_ast_scope(&self,
cleanup_scope: ast::NodeId,
cleanup: CleanupObj<'tcx>);
fn schedule_clean_in_custom_scope(&self,
custom_scope: CustomScopeIndex,
cleanup: CleanupObj<'tcx>);
fn needs_invoke(&self) -> bool;
fn get_landing_pad(&'blk self) -> BasicBlockRef;
}
trait CleanupHelperMethods<'blk, 'tcx> {
fn top_ast_scope(&self) -> Option<ast::NodeId>;
fn top_nonempty_cleanup_scope(&self) -> Option<uint>;
fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
fn trans_scope_cleanups(&self,
bcx: Block<'blk, 'tcx>,
scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>;
fn trans_cleanups_to_exit_scope(&'blk self,
label: EarlyExitLabel)
-> BasicBlockRef;
fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef;
fn scopes_len(&self) -> uint;
fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>);
fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>;
fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R;
}
|
schedule_drop_immediate
|
09.py
|
import math
def main():
|
main()
|
length = float(input("ceiling length(m): "))
width = float(input("ceiling width(m): "))
liter_per_square_meter = 9
area = length * width
amount_of_paint = math.ceil(area / liter_per_square_meter)
result = (
"\nYou will need to purchase "
+ str(amount_of_paint)
+ " liter(s) of paint to cover "
+ str(area)
+ " square meter(s)."
)
print(result)
|
interactions_repos_test.go
|
// Copyright 2018 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
import (
"context"
json "github.com/json-iterator/go"
"fmt"
"net/http"
"reflect"
"testing"
)
func TestInteractionsService_GetRestrictionsForRepo(t *testing.T) {
client, mux, _, teardown := setup()
defer teardown()
mux.HandleFunc("/repos/o/r/interaction-limits", func(w http.ResponseWriter, r *http.Request) {
testMethod(t, r, "GET")
testHeader(t, r, "Accept", mediaTypeInteractionRestrictionsPreview)
fmt.Fprint(w, `{"origin":"repository"}`)
})
repoInteractions, _, err := client.Interactions.GetRestrictionsForRepo(context.Background(), "o", "r")
if err != nil {
t.Errorf("Interactions.GetRestrictionsForRepo returned error: %v", err)
}
want := &InteractionRestriction{Origin: String("repository")}
if !reflect.DeepEqual(repoInteractions, want) {
t.Errorf("Interactions.GetRestrictionsForRepo returned %+v, want %+v", repoInteractions, want)
}
}
func TestInteractionsService_UpdateRestrictionsForRepo(t *testing.T) {
client, mux, _, teardown := setup()
defer teardown()
input := &InteractionRestriction{Limit: String("existing_users")}
mux.HandleFunc("/repos/o/r/interaction-limits", func(w http.ResponseWriter, r *http.Request) {
v := new(InteractionRestriction)
json.NewDecoder(r.Body).Decode(v)
testMethod(t, r, "PUT")
testHeader(t, r, "Accept", mediaTypeInteractionRestrictionsPreview)
if !reflect.DeepEqual(v, input)
|
fmt.Fprint(w, `{"origin":"repository"}`)
})
repoInteractions, _, err := client.Interactions.UpdateRestrictionsForRepo(context.Background(), "o", "r", input.GetLimit())
if err != nil {
t.Errorf("Interactions.UpdateRestrictionsForRepo returned error: %v", err)
}
want := &InteractionRestriction{Origin: String("repository")}
if !reflect.DeepEqual(repoInteractions, want) {
t.Errorf("Interactions.UpdateRestrictionsForRepo returned %+v, want %+v", repoInteractions, want)
}
}
func TestInteractionsService_RemoveRestrictionsFromRepo(t *testing.T) {
client, mux, _, teardown := setup()
defer teardown()
mux.HandleFunc("/repos/o/r/interaction-limits", func(w http.ResponseWriter, r *http.Request) {
testMethod(t, r, "DELETE")
testHeader(t, r, "Accept", mediaTypeInteractionRestrictionsPreview)
})
_, err := client.Interactions.RemoveRestrictionsFromRepo(context.Background(), "o", "r")
if err != nil {
t.Errorf("Interactions.RemoveRestrictionsFromRepo returned error: %v", err)
}
}
|
{
t.Errorf("Request body = %+v, want %+v", v, input)
}
|
problem3.py
|
'''https://projecteuler.net/problem=3'''
'''Please see the README document for details'''
def run(upper_bound):
|
if __name__ == "__main__":
print "https://projecteuler.net/problem=2"
|
if(upper_bound%2 == 0):
upper_bound = upper_bound-1
for decrementor in range(upper_bound, 0,-2):
print str(decrementor)+", ",
counter = 2
while(counter < decrementor):
if(decrementor%counter == 0):
break
counter = counter+1
if(counter == decrementor):
print "Highest Prime lower that "+str(upper_bound)+" is "+str(decrementor)
return
|
source_extraction.py
|
# -*- coding: utf-8 -*-
"""
Created on 28/10/2017
@author: Carlos Eduardo Barbosa
Detection of sources in data and separation of bins prior to Voronoi
tesselation
"""
from __future__ import division, print_function
import os
import pyregion
import numpy as np
from astropy.io import fits
from astropy.convolution import Gaussian2DKernel, convolve
from astropy.table import Table
import matplotlib.pyplot as plt
from astropy.stats import SigmaClip
from photutils.background import Background2D, MedianBackground
import sewpy
import context
from misc import array_from_header
def
|
(imgname, redo=False, output=None, hdunum=1):
""" Remove background from the image """
data = fits.getdata(imgname, ext=1)
output = "detection.fits"
if os.path.exists(output) and not redo:
return output
sigma_clip = SigmaClip(sigma=3.)
bkg_estimator = MedianBackground()
bkg = Background2D(data, (8, 8), filter_size=(5, 5),
sigma_clip=sigma_clip, bkg_estimator = bkg_estimator)
outdata = data - bkg.background
fits.writeto(output, outdata, overwrite=True)
return output
def mask_from_regions(imgname, redo=False):
""" Mask regions marked in file mask.reg made in ds9. """
data = fits.getdata(imgname)
filename = "mask.reg"
outfile = "detection_masked.fits"
if os.path.exists(outfile) and not redo:
mask = fits.getdata(outfile)
return mask
r = pyregion.open(filename)
for i, region in enumerate(r.get_filter()):
mask = region.mask(data.shape)
data[mask] = np.nan
hdu = fits.PrimaryHDU(data)
hdu.writeto(outfile, overwrite=True)
return outfile
def run_sextractor(img, redo=False, outfile=None):
""" Produces a catalogue of sources in a given field. """
if outfile is None:
outfile = "source-catalog.fits"
if os.path.exists(outfile) and not redo:
return outfile
params = ["NUMBER", "X_IMAGE", "Y_IMAGE", "KRON_RADIUS", "ELLIPTICITY",
"THETA_IMAGE", "A_IMAGE", "B_IMAGE", "MAG_AUTO", "FLUX_RADIUS"]
config = {"CHECKIMAGE_TYPE": "BACKGROUND",
"CHECKIMAGE_NAME": "background.fits",
"DETECT_THRESH" : 1.5}
sew = sewpy.SEW(config=config, sexpath="source-extractor", params=params)
cat = sew(img)
cat["table"].write(outfile, format="fits", overwrite=True)
return outfile
def mask_sources(img, cat, ignore=None, redo=False, output=None):
""" Produces segmentation image with bins for detected sources using
elliptical regions. """
if output is None:
output = "sources_mask.fits"
if os.path.exists(output) and not redo:
return output
data = fits.getdata(img)
ydim, xdim = data.shape
xx, yy = np.meshgrid(np.arange(1, xdim + 1), np.arange(1, ydim + 1))
table = Table.read(cat, 1)
if ignore is not None:
idx = np.array([i for i,x in enumerate(table["NUMBER"]) if x not in
ignore])
table = table[idx]
axratio = table["B_IMAGE"] / table["A_IMAGE"]
# table = table[axratio > 0.4]
mask = np.zeros_like(data)
for source in table:
R = calc_isophotes(xx, yy, source["X_IMAGE"], source["Y_IMAGE"], \
source["THETA_IMAGE"] - 90, source["B_IMAGE"] /
source["A_IMAGE"])
Rmax = 1.5 * source["KRON_RADIUS"]
mask += np.where(R <= Rmax, 1, 0)
hdu = fits.PrimaryHDU(mask)
hdu.writeto(output, overwrite=True)
return output
def calc_isophotes(x, y, x0, y0, PA, q):
""" Calculate isophotes for a given component. """
x = np.copy(x) - x0
y = np.copy(y) - y0
shape = x.shape
theta = np.radians(PA)
c, s = np.cos(theta), np.sin(theta)
rot = np.array([[s, c], [-c, s]])
xy = np.dot(np.column_stack((x.flatten(), y.flatten())), rot).T
x = np.reshape(xy[0], newshape=shape)
y = np.reshape(xy[1], newshape=shape)
return np.sqrt(np.power(x, 2) + np.power(y / q, 2))
def run_ngc3311(redo=False):
data_dir = os.path.join(context.home_dir, "data")
fields = context.fields
for field in fields:
os.chdir(os.path.join(data_dir, field))
if field == "fieldA":
imgname = "ellipse_model.fits"
else:
imgname = f"sn_field{field[-1]}.fits"
detimg = background_removed_data(imgname, redo=redo)
immasked = mask_from_regions(detimg, redo=redo)
sexcat = run_sextractor(immasked, redo=redo)
mask_sources(immasked, sexcat, redo=redo)
if __name__ == "__main__":
run_ngc3311(redo=True)
|
background_removed_data
|
configuration.py
|
"""Let's Encrypt user-supplied configuration."""
import os
import urlparse
import zope.interface
from acme import challenges
from letsencrypt import constants
from letsencrypt import errors
from letsencrypt import interfaces
class NamespaceConfig(object):
"""Configuration wrapper around :class:`argparse.Namespace`.
For more documentation, including available attributes, please see
:class:`letsencrypt.interfaces.IConfig`. However, note that
the following attributes are dynamically resolved using
:attr:`~letsencrypt.interfaces.IConfig.work_dir` and relative
paths defined in :py:mod:`letsencrypt.constants`:
- `accounts_dir`
- `csr_dir`
- `in_progress_dir`
- `key_dir`
- `renewer_config_file`
- `temp_checkpoint_dir`
:ivar namespace: Namespace typically produced by
:meth:`argparse.ArgumentParser.parse_args`.
:type namespace: :class:`argparse.Namespace`
"""
zope.interface.implements(interfaces.IConfig)
def __init__(self, namespace):
self.namespace = namespace
if self.simple_http_port == self.dvsni_port:
raise errors.Error(
"Trying to run SimpleHTTP and DVSNI "
"on the same port ({0})".format(self.dvsni_port))
def __getattr__(self, name):
return getattr(self.namespace, name)
@property
def server_path(self):
"""File path based on ``server``."""
parsed = urlparse.urlparse(self.namespace.server)
return (parsed.netloc + parsed.path).replace('/', os.path.sep)
@property
def accounts_dir(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.config_dir, constants.ACCOUNTS_DIR, self.server_path)
@property
def backup_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.work_dir, constants.BACKUP_DIR)
@property
def csr_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.CSR_DIR)
@property
def in_progress_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.work_dir, constants.IN_PROGRESS_DIR)
@property
def key_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.KEY_DIR)
@property
def temp_checkpoint_dir(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.work_dir, constants.TEMP_CHECKPOINT_DIR)
@property
def
|
(self): # pylint: disable=missing-docstring
if self.namespace.simple_http_port is not None:
return self.namespace.simple_http_port
else:
return challenges.SimpleHTTPResponse.PORT
class RenewerConfiguration(object):
"""Configuration wrapper for renewer."""
def __init__(self, namespace):
self.namespace = namespace
def __getattr__(self, name):
return getattr(self.namespace, name)
@property
def archive_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.ARCHIVE_DIR)
@property
def live_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.LIVE_DIR)
@property
def renewal_configs_dir(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.config_dir, constants.RENEWAL_CONFIGS_DIR)
@property
def renewer_config_file(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.config_dir, constants.RENEWER_CONFIG_FILENAME)
|
simple_http_port
|
rouge.py
|
#!/usr/bin/env python
#
# File Name : rouge.py
#
# Description : Computes ROUGE-L metric as described by Lin and Hovey (2004)
#
# Creation Date : 2015-01-07 06:03
# Author : Ramakrishna Vedantam <[email protected]>
import numpy as np
import pdb
def my_lcs(string, sub):
"""
Calculates longest common subsequence for a pair of tokenized strings
:param string : list of str : tokens from a string split using whitespace
:param sub : list of str : shorter string, also split using whitespace
:returns: length (list of int): length of the longest common subsequence between the two strings
Note: my_lcs only gives length of the longest common subsequence, not the actual LCS
"""
if(len(string)< len(sub)):
sub, string = string, sub
lengths = [[0 for i in range(0,len(sub)+1)] for j in range(0,len(string)+1)]
for j in range(1,len(sub)+1):
for i in range(1,len(string)+1):
if(string[i-1] == sub[j-1]):
lengths[i][j] = lengths[i-1][j-1] + 1
else:
lengths[i][j] = max(lengths[i-1][j] , lengths[i][j-1])
return lengths[len(string)][len(sub)]
class
|
():
'''
Class for computing ROUGE-L score for a set of candidate sentences for the MS COCO test set
'''
def __init__(self):
# vrama91: updated the value below based on discussion with Hovey
self.beta = 1.2
def calc_score(self, candidate, refs):
"""
Compute ROUGE-L score given one candidate and references for an image
:param candidate: str : candidate sentence to be evaluated
:param refs: list of str : COCO reference sentences for the particular image to be evaluated
:returns score: int (ROUGE-L score for the candidate evaluated against references)
"""
assert(len(candidate)==1)
assert(len(refs)>0)
prec = []
rec = []
# split into tokens
token_c = candidate[0].split(" ")
for reference in refs:
# split into tokens
token_r = reference.split(" ")
# compute the longest common subsequence
lcs = my_lcs(token_r, token_c)
prec.append(lcs/float(len(token_c)))
rec.append(lcs/float(len(token_r)))
prec_max = max(prec)
rec_max = max(rec)
if(prec_max!=0 and rec_max !=0):
score = ((1 + self.beta**2)*prec_max*rec_max)/float(rec_max + self.beta**2*prec_max)
else:
score = 0.0
return score
def compute_score(self, gts, res):
"""
Computes Rouge-L score given a set of reference and candidate sentences for the dataset
Invoked by evaluate_captions.py
:param hypo_for_image: dict : candidate / test sentences with "image name" key and "tokenized sentences" as values
:param ref_for_image: dict : reference MS-COCO sentences with "image name" key and "tokenized sentences" as values
:returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images)
"""
assert(gts.keys() == res.keys())
imgIds = gts.keys()
score = []
for id in imgIds:
hypo = res[id]
ref = gts[id]
score.append(self.calc_score(hypo, ref))
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) > 0)
average_score = np.mean(np.array(score))
return average_score, np.array(score)
def method(self):
return "Rouge"
# if __name__ == "__main__":
#
# cand_1 = "A boy picks an apple tree and places it into bags."
# cand_2 = "Two girls pick many red apples from trees and place them in a large bag."
# ref = "A boy picks an apple from a tree and places it into bags."
# concepts = ["pick", "apple", "tree", "place", "bag"]
#
#
# rouge = Rouge()
# print rouge.calc_score([cand_1], ref)
|
Rouge
|
day20.rs
|
use nom::*;
use std::collections::{HashMap, HashSet};
#[derive(Debug, Clone)]
enum Pattern {
Directions(Vec<char>),
Group(Vec<Pattern>),
Seq(Vec<Pattern>),
Empty,
}
named!(directions<&[u8], Pattern>,
map!(
many1!(alt!(char!('N') | char!('S') | char!('W') | char!('E'))),
Pattern::Directions
)
);
named!(group<&[u8], Pattern>,
do_parse!(
char!('(') >>
first: pattern >>
rest: many1!(preceded!(char!('|'), opt!(pattern))) >>
char!(')') >>
({
let mut res = vec![first];
res.extend(rest.into_iter().map(|p| p.unwrap_or(Pattern::Empty)));
Pattern::Group(res)
})
)
);
named!(pattern<&[u8], Pattern>,
map!(many1!(alt!(group | directions)), Pattern::Seq)
);
named!(parse<&[u8], Pattern>,
delimited!(char!('^'), pattern, char!('$'))
);
impl Pattern {
fn walk(&self, graph: &mut Graph, origins: Vec<(isize, isize)>) -> Vec<(isize, isize)> {
match self {
Pattern::Seq(patterns) => patterns
.iter()
.fold(origins, |origins, p| p.walk(graph, origins)),
Pattern::Directions(dirs) => dirs.iter().fold(origins, |mut origins, dir| {
for (i, j) in origins.iter_mut() {
match dir {
'E' => {
let neighbors = graph.entry((*i, *j)).or_insert_with(Vec::new);
neighbors.push((*i, *j + 1));
*j += 1;
}
'W' => {
let neighbors = graph.entry((*i, *j)).or_insert_with(Vec::new);
neighbors.push((*i, *j - 1));
*j -= 1;
}
'N' => {
let neighbors = graph.entry((*i, *j)).or_insert_with(Vec::new);
neighbors.push((*i - 1, *j));
*i -= 1;
}
'S' => {
let neighbors = graph.entry((*i, *j)).or_insert_with(Vec::new);
neighbors.push((*i + 1, *j));
*i += 1;
}
_ => panic!("unknown direction"),
}
}
origins
}),
Pattern::Group(subpatterns) =>
|
Pattern::Empty => origins,
}
}
}
type Graph = HashMap<(isize, isize), Vec<(isize, isize)>>;
fn shortest_paths(graph: &Graph) -> HashMap<(isize, isize), usize> {
type Vertex = (isize, isize);
let mut stack: Vec<Vertex> = vec![(0, 0)];
let mut visited: HashSet<Vertex> = HashSet::new();
let mut dist: HashMap<Vertex, usize> = HashMap::new();
dist.insert((0, 0), 0);
// DFS
while let Some(v) = stack.pop() {
if let Some(neighbors) = graph.get(&v) {
for w in neighbors {
let dist_vw = dist[&v] + 1;
let dist_w = dist.entry(*w).or_insert(usize::max_value());
if dist_vw < *dist_w {
*dist_w = dist_vw
}
if !visited.contains(&w) {
stack.push(*w);
}
}
}
visited.insert(v);
}
dist
}
pub fn solve(input: &str) -> (usize, usize) {
let (_, pattern) = parse(input.as_bytes()).expect("parser failed");
let mut graph = Graph::new();
pattern.walk(&mut graph, vec![(0, 0)]);
let dist = shortest_paths(&graph);
let max_shortest_path = dist.iter().map(|(_, &d)| d).max().unwrap();
let num_rooms_shortest_path_with_at_least_1000_doors = dist
.iter()
.filter_map(|(_, &d)| if d >= 1000 { Some(d) } else { None })
.count();
(
max_shortest_path,
num_rooms_shortest_path_with_at_least_1000_doors,
)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_max_shortest_path1() {
const INPUT: &str = "^WNE$";
let (_, pattern) = parse(INPUT.as_bytes()).expect("parser failed");
let mut graph = Graph::new();
pattern.walk(&mut graph, vec![(0, 0)]);
let dist = shortest_paths(&graph);
let max_shortest_path = dist.iter().map(|(_, &d)| d).max().unwrap();
assert_eq!(max_shortest_path, 3);
}
#[test]
fn test_max_shortest_path2() {
const INPUT: &str = "^ENWWW(NEEE|SSE(EE|N))$";
let (_, pattern) = parse(INPUT.as_bytes()).expect("parser failed");
let mut graph = Graph::new();
pattern.walk(&mut graph, vec![(0, 0)]);
let dist = shortest_paths(&graph);
let max_shortest_path = dist.iter().map(|(_, &d)| d).max().unwrap();
assert_eq!(max_shortest_path, 10);
}
#[test]
fn test_max_shortest_path3() {
const INPUT: &str = "^N(E|W)S$";
let (_, pattern) = parse(INPUT.as_bytes()).expect("parser failed");
let mut graph = Graph::new();
pattern.walk(&mut graph, vec![(0, 0)]);
let dist = shortest_paths(&graph);
let max_shortest_path = dist.iter().map(|(_, &d)| d).max().unwrap();
assert_eq!(max_shortest_path, 3);
}
#[test]
fn test_max_shortest_path4() {
const INPUT: &str = "^ENNWSWW(NEWS|)SSSEEN(WNSE|)EE(SWEN|)NNN$";
let (_, pattern) = parse(INPUT.as_bytes()).expect("parser failed");
let mut graph = Graph::new();
pattern.walk(&mut graph, vec![(0, 0)]);
let dist = shortest_paths(&graph);
let max_shortest_path = dist.iter().map(|(_, &d)| d).max().unwrap();
assert_eq!(max_shortest_path, 18);
}
}
|
{
let mut origins = subpatterns.iter().fold(Vec::new(), |mut new_origins, p| {
new_origins.extend(p.walk(graph, origins.clone()).iter());
new_origins
});
origins.sort_unstable();
origins.dedup();
origins
}
|
outputhandlers.py
|
#the default handler , does nothing , just passes the raw output directly to STDOUT
class DefaultCommandOutputHandler:
def __init__(self,**args):
pass
def __call__(self, raw_cmd_output):
print_xml_stream(raw_cmd_output)
class GoGenHandler:
def __init__(self,**args):
self.index = args['index']
self.source = args['source']
self.sourcetype = args['sourcetype']
self.host = args['host']
def __call__(self,raw_cmd_output):
print "<stream><event><data>%s</data><source>%s</source><sourcetype>%s</sourcetype><index>%s</index><host>%s</host></event></stream>" % (encodeXMLText(raw_cmd_output),self.source,self.sourcetype,self.index,self.host)
class MyCommandOutputHandler:
def __init__(self,**args):
pass
def __call__(self,raw_cmd_output):
print_xml_stream("foobar")
#HELPER FUNCTIONS
# prints XML stream
def print_xml_stream(s):
print "<stream><event><data>%s</data></event></stream>" % encodeXMLText(s)
def encodeXMLText(text):
text = text.replace("&", "&")
text = text.replace("\"", """)
text = text.replace("'", "'")
text = text.replace("<", "<")
text = text.replace(">", ">")
return text
|
#add your custom command output handler class to this module
|
|
graph.py
|
"""
Node classes (`Apply`, `Variable`) and expression graph algorithms.
"""
from __future__ import absolute_import, print_function, division
from collections import deque
from copy import copy
from itertools import count
import theano
from theano import config
from theano.gof import utils
from six import string_types, integer_types, iteritems
from theano.misc.ordered_set import OrderedSet
__docformat__ = "restructuredtext en"
# Lazy imports to avoid circular dependencies.
is_same_graph_with_merge = None
equal_computations = None
NoParams = object()
class Node(utils.object2):
"""
A Node in a theano graph.
Graphs contain two kinds of Nodes -- Variable and Apply.
Edges in the graph are not explicitly represented.
Instead each Node keeps track of its parents via
Variable.owner / Apply.inputs and its children
via Variable.clients / Apply.outputs.
"""
def get_parents(self):
"""
Return a list of the parents of this node.
Should return a copy--i.e., modifying the return
value should not modify the graph structure.
"""
raise NotImplementedError()
class Apply(Node):
"""
An :term:`Apply` instance is a node in an expression graph which represents
the application of an `Op` to some input `Variable` nodes, producing some
output `Variable` nodes.
This class is typically instantiated by an Op's make_node() function, which
is typically called by that Op's __call__() function.
An Apply instance serves as a simple structure with three important
attributes:
- :literal:`inputs` : a list of `Variable` nodes that represent the
arguments of the expression,
- :literal:`outputs` : a list of `Variable` nodes that represent the
variable of the expression, and
- :literal:`op` : an `Op` instance that determines the nature of the
expression being applied.
The driver `compile.function` uses Apply's inputs attribute together with
Variable's owner attribute to search the expression graph and determine
which inputs are necessary to compute the function's outputs.
A `Linker` uses the Apply instance's `op` field to compute the variables.
Comparing with the Python language, an `Apply` instance is theano's version
of a function call (or expression instance) whereas `Op` is theano's version
of a function definition.
Parameters
----------
op : `Op` instance
inputs : list of Variable instances
outputs : list of Variable instances
Notes
-----
The owner field of each output in the outputs list will be set to self.
If an output element has an owner that is neither None nor self, then a
ValueError exception will be raised.
"""
def __init__(self, op, inputs, outputs):
self.op = op
self.inputs = []
self.tag = utils.scratchpad()
if not isinstance(inputs, (list, tuple)):
raise TypeError("The inputs of an Apply must be a list or tuple")
if not isinstance(outputs, (list, tuple)):
raise TypeError("The output of an Apply must be a list or tuple")
# filter inputs to make sure each element is a Variable
for input in inputs:
if isinstance(input, Variable):
self.inputs.append(input)
else:
raise TypeError("The 'inputs' argument to Apply must contain Variable instances, not %s" % input)
self.outputs = []
# filter outputs to make sure each element is a Variable
for i, output in enumerate(outputs):
if isinstance(output, Variable):
if output.owner is None:
output.owner = self
output.index = i
elif output.owner is not self or output.index != i:
raise ValueError("All output variables passed to Apply must belong to it.")
self.outputs.append(output)
else:
raise TypeError("The 'outputs' argument to Apply must contain Variable instances with no owner, not %s" % output)
def run_params(self):
"""
Returns the params for the node, or NoParams if no params is set.
"""
if hasattr(self.op, 'get_params'):
return self.op.get_params(self)
return NoParams
def __getstate__(self):
d = self.__dict__
# ufunc don't pickle/unpickle well
if hasattr(self.tag, 'ufunc'):
d = copy(self.__dict__)
t = d["tag"]
del t.ufunc
d["tag"] = t
return d
def default_output(self):
"""
Returns the default output for this node.
Returns
-------
Variable instance
An element of self.outputs, typically self.outputs[0].
Notes
-----
May raise AttributeError self.op.default_output is out of range, or if
there are multiple outputs and self.op.default_output does not exist.
"""
do = getattr(self.op, 'default_output', None)
if do is None:
if len(self.outputs) == 1:
return self.outputs[0]
else:
raise AttributeError(
"%s.default_output should be an output index." % self.op)
elif not isinstance(do, integer_types):
raise AttributeError("%s.default_output should be an int or long" %
self.op)
elif do < 0 or do >= len(self.outputs):
raise AttributeError("%s.default_output is out of range." %
self.op)
return self.outputs[do]
out = property(default_output,
doc="alias for self.default_output()")
"""
Alias for self.default_output().
"""
def __str__(self):
return op_as_string(self.inputs, self)
def __repr__(self):
return str(self)
def __asapply__(self):
return self
def clone(self):
"""
Duplicate this Apply instance with inputs = self.inputs.
Returns
-------
object
A new Apply instance (or subclass instance) with new outputs.
Notes
-----
Tags are copied from self to the returned instance.
"""
cp = self.__class__(self.op, self.inputs,
[output.clone() for output in self.outputs])
cp.tag = copy(self.tag)
return cp
def clone_with_new_inputs(self, inputs, strict=True):
"""
Duplicate this Apply instance in a new graph.
Parameters
----------
inputs
List of Variable instances to use as inputs.
strict : bool
If True, the type fields of all the inputs must be equal
to the current ones (or compatible, for instance Tensor /
CudaNdarray of the same dtype and broadcastable patterns,
in which case they will be converted into current Type), and
returned outputs are guaranteed to have the same types as
self.outputs. If False, then there's no guarantee that the
clone's outputs will have the same types as self.outputs,
and cloning may not even be possible (it depends on the Op).
Returns
-------
object
An Apply instance with the same op but different outputs.
"""
assert isinstance(inputs, (list, tuple))
remake_node = False
new_inputs = inputs[:]
for i, (curr, new) in enumerate(zip(self.inputs, new_inputs)):
if not curr.type == new.type:
if strict:
# If compatible, casts new into curr.type
new_inputs[i] = curr.type.filter_variable(new)
else:
remake_node = True
if remake_node:
new_node = self.op.make_node(*new_inputs)
new_node.tag = copy(self.tag).__update__(new_node.tag)
else:
new_node = self.clone()
new_node.inputs = new_inputs
return new_node
def get_parents(self):
return list(self.inputs)
# convenience properties
nin = property(lambda self: len(self.inputs), doc='same as len(self.inputs)')
"""
Property: Number of inputs.
"""
nout = property(lambda self: len(self.outputs), doc='same as len(self.outputs)')
"""
Property: Number of outputs.
"""
params_type = property(lambda self: self.op.params_type, doc='type to use for the params')
class Variable(Node):
"""
A :term:`Variable` is a node in an expression graph that represents a
variable.
The inputs and outputs of every `Apply` (theano.gof.Apply) are `Variable`
instances. The input and output arguments to create a `function` are also
`Variable` instances. A `Variable` is like a strongly-typed variable in
some other languages; each `Variable` contains a reference to a `Type`
instance that defines the kind of value the `Variable` can take in a
computation.
A `Variable` is a container for four important attributes:
- :literal:`type` a `Type` instance defining the kind of value this
`Variable` can have,
- :literal:`owner` either None (for graph roots) or the `Apply` instance
of which `self` is an output,
- :literal:`index` the integer such that :literal:`owner.outputs[index] is
this_variable` (ignored if `owner` is None),
- :literal:`name` a string to use in pretty-printing and debugging.
There are a few kinds of Variables to be aware of: A Variable which is the
output of a symbolic computation has a reference to the Apply instance to
which it belongs (property: owner) and the position of itself in the owner's
output list (property: index).
- `Variable` (this base type) is typically the output of a symbolic
computation.
- `Constant` (a subclass) which adds a default and un-replaceable
:literal:`value`, and requires that owner is None.
- `TensorVariable` subclass of Variable that represents a numpy.ndarray
object.
- `TensorSharedVariable` Shared version of TensorVariable.
- `SparseVariable` subclass of Variable that represents
a scipy.sparse.{csc,csr}_matrix object.
- `CudaNdarrayVariable` subclass of Variable that represents our object on
the GPU that is a subset of numpy.ndarray.
- `RandomVariable`.
A Variable which is the output of a symbolic computation will have an owner
not equal to None.
Using the Variables' owner field and the Apply nodes' inputs fields, one can
navigate a graph from an output all the way to the inputs. The opposite
direction is not possible until a FunctionGraph has annotated the Variables
with the clients field, ie, before the compilation process has begun a
Variable does not know which Apply nodes take it as input.
Parameters
----------
type : a Type instance
The type governs the kind of data that can be associated with this
variable.
owner : None or Apply instance
The Apply instance which computes the value for this variable.
index : None or int
The position of this Variable in owner.outputs.
name : None or str
A string for pretty-printing and debugging.
Examples
--------
.. code-block:: python
import theano
from theano import tensor
a = tensor.constant(1.5) # declare a symbolic constant
b = tensor.fscalar() # declare a symbolic floating-point scalar
c = a + b # create a simple expression
f = theano.function([b], [c]) # this works because a has a value associated with it already
assert 4.0 == f(2.5) # bind 2.5 to an internal copy of b and evaluate an internal c
theano.function([a], [c]) # compilation error because b (required by c) is undefined
theano.function([a,b], [c]) # compilation error because a is constant, it can't be an input
d = tensor.value(1.5) # create a value similar to the constant 'a'
e = d + b
theano.function([d,b], [e]) # this works. d's default value of 1.5 is ignored.
The python variables :literal:`a,b,c` all refer to instances of type
`Variable`. The `Variable` refered to by `a` is also an instance of
`Constant`.
`compile.function` uses each `Apply` instance's `inputs` attribute together
with each Variable's `owner` field to determine which inputs are necessary
to compute the function's outputs.
"""
# __slots__ = ['type', 'owner', 'index', 'name']
__count__ = count(0)
def __init__(self, type, owner=None, index=None, name=None):
super(Variable, self).__init__()
self.tag = utils.scratchpad()
self.type = type
if owner is not None and not isinstance(owner, Apply):
raise TypeError("owner must be an Apply instance", owner)
self.owner = owner
if index is not None and not isinstance(index, integer_types):
raise TypeError("index must be an int", index)
self.index = index
if name is not None and not isinstance(name, string_types):
raise TypeError("name must be a string", name)
self.name = name
self.auto_name = 'auto_' + str(next(self.__count__))
def __str__(self):
"""Return a str representation of the Variable.
"""
if self.name is not None:
return self.name
if self.owner is not None:
op = self.owner.op
if self.index == op.default_output:
return str(self.owner.op) + ".out"
else:
return str(self.owner.op) + "." + str(self.index)
else:
return "<%s>" % str(self.type)
def __repr_test_value__(self):
"""Return a repr of the test value.
Return a printable representation of the test value. It can be
overridden by classes with non printable test_value to provide a
suitable representation of the test_value.
"""
return repr(theano.gof.op.get_test_value(self))
def __repr__(self, firstPass=True):
"""Return a repr of the Variable.
Return a printable name or description of the Variable. If
config.print_test_value is True it will also print the test_value if
any.
"""
to_print = [str(self)]
if config.print_test_value and firstPass:
try:
to_print.append(self.__repr_test_value__())
except AttributeError:
pass
return '\n'.join(to_print)
def clone(self):
"""
Return a new Variable like self.
Returns
-------
Variable instance
A new Variable instance (or subclass instance) with no owner or
index.
Notes
-----
Tags are copied to the returned instance.
Name is copied to the returned instance.
"""
# return copy(self)
cp = self.__class__(self.type, None, None, self.name)
cp.tag = copy(self.tag)
return cp
def __lt__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __lt__',
self.__class__.__name__)
def __le__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __le__',
self.__class__.__name__)
def __gt__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __gt__',
self.__class__.__name__)
def __ge__(self, other):
raise NotImplementedError('Subclasses of Variable must provide __ge__',
self.__class__.__name__)
def get_parents(self):
if self.owner is not None:
return [self.owner]
return []
def eval(self, inputs_to_values=None):
"""
Evaluates this variable.
Parameters
----------
inputs_to_values
A dictionary mapping theano Variables to values.
Examples
--------
>>> import numpy as np
>>> import theano.tensor as T
>>> x = T.dscalar('x')
>>> y = T.dscalar('y')
>>> z = x + y
>>> np.allclose(z.eval({x : 16.3, y : 12.1}), 28.4)
True
We passed :func:`eval` a dictionary mapping symbolic theano
variables to the values to substitute for them, and it returned
the numerical value of the expression.
Notes
-----
`eval` will be slow the first time you call it on a variable --
it needs to call :func:`function` to compile the expression behind
the scenes. Subsequent calls to :func:`eval` on that same variable
will be fast, because the variable caches the compiled function.
This way of computing has more overhead than a normal Theano
function, so don't use it too much in real scripts.
"""
if inputs_to_values is None:
inputs_to_values = {}
if not hasattr(self, '_fn_cache'):
self._fn_cache = dict()
inputs = tuple(sorted(inputs_to_values.keys(), key=id))
if inputs not in self._fn_cache:
self._fn_cache[inputs] = theano.function(inputs, self)
args = [inputs_to_values[param] for param in inputs]
rval = self._fn_cache[inputs](*args)
return rval
def __getstate__(self):
d = self.__dict__.copy()
d.pop("_fn_cache", None)
return d
class Constant(Variable):
"""
A :term:`Constant` is a `Variable` with a `value` field that cannot be
changed at runtime.
Constant nodes make eligible numerous optimizations: constant inlining in
C code, constant folding, etc.
Notes
-----
The data field is filtered by what is provided in the constructor for the
Constant's type field.
WRITEME
"""
# __slots__ = ['data']
def __init__(self, type, data, name=None):
Variable.__init__(self, type, None, None, name)
self.data = type.filter(data)
utils.add_tag_trace(self)
def equals(self, other):
# this does what __eq__ should do, but Variable and Apply should always be hashable by id
return isinstance(other, Constant) and self.signature() == other.signature()
def signature(self):
return (self.type, self.data)
def merge_signature(self):
return self.signature()
def __str__(self):
if self.name is not None:
return self.name
else:
name = str(self.data)
if len(name) > 20:
|
return 'Constant{%s}' % name
def clone(self):
"""
We clone this object, but we don't clone the data to lower memory
requirement. We suppose that the data will never change.
"""
cp = self.__class__(self.type, self.data, self.name)
cp.tag = copy(self.tag)
return cp
def __set_owner(self, value):
"""
WRITEME
Raises
------
ValueError
If `value` is not `None`.
"""
if value is not None:
raise ValueError("Constant instances cannot have an owner.")
owner = property(lambda self: None, __set_owner)
value = property(lambda self: self.data, doc='read-only data access method')
# index is not defined, because the `owner` attribute must necessarily be None
def stack_search(start, expand, mode='bfs', build_inv=False):
"""
Search through a graph, either breadth- or depth-first.
Parameters
----------
start : deque
Search from these nodes.
expand : callable
When we get to a node, add expand(node) to the list of nodes to visit.
This function should return a list, or None.
Returns
-------
list of `Variable` or `Apply` instances (depends on `expend`)
The list of nodes in order of traversal.
Notes
-----
A node will appear at most once in the return value, even if it
appears multiple times in the start parameter.
:postcondition: every element of start is transferred to the returned list.
:postcondition: start is empty.
"""
if mode not in ('bfs', 'dfs'):
raise ValueError('mode should be bfs or dfs', mode)
rval_set = set()
rval_list = list()
if mode == 'bfs':
start_pop = start.popleft
else:
start_pop = start.pop
expand_inv = {}
while start:
l = start_pop()
if id(l) not in rval_set:
rval_list.append(l)
rval_set.add(id(l))
expand_l = expand(l)
if expand_l:
if build_inv:
for r in expand_l:
expand_inv.setdefault(r, []).append(l)
start.extend(expand_l)
assert len(rval_list) == len(rval_set)
if build_inv:
return rval_list, expand_inv
return rval_list
def ancestors(variable_list, blockers=None):
"""
Return the variables that contribute to those in variable_list (inclusive).
Parameters
----------
variable_list : list of `Variable` instances
Output `Variable` instances from which to search backward through
owners.
Returns
-------
list of `Variable` instances
All input nodes, in the order found by a left-recursive depth-first
search started at the nodes in `variable_list`.
"""
def expand(r):
if r.owner and (not blockers or r not in blockers):
return reversed(r.owner.inputs)
dfs_variables = stack_search(deque(variable_list), expand, 'dfs')
return dfs_variables
def inputs(variable_list, blockers=None):
"""
Return the inputs required to compute the given Variables.
Parameters
----------
variable_list : list of `Variable` instances
Output `Variable` instances from which to search backward through
owners.
Returns
-------
list of `Variable` instances
Input nodes with no owner, in the order found by a left-recursive
depth-first search started at the nodes in `variable_list`.
"""
vlist = ancestors(variable_list, blockers)
rval = [r for r in vlist if r.owner is None]
return rval
def variables_and_orphans(i, o):
"""
Extract list of variables between i and o nodes via
dfs traversal and chooses the orphans among them
Parameters
----------
i : list
Input variables.
o : list
Output variables.
"""
def expand(r):
if r.owner and r not in i:
l = list(r.owner.inputs) + list(r.owner.outputs)
l.reverse()
return l
variables = stack_search(deque(o), expand, 'dfs')
orphans = [r for r in variables if r.owner is None and r not in i]
return variables, orphans
def ops(i, o):
"""
Set of Ops contained within the subgraph between i and o
Parameters
----------
i : list
Input variables.
o : list
Output variables.
Returns
-------
object
The set of ops that are contained within the subgraph that lies
between i and o, including the owners of the variables in o and
intermediary ops between i and o, but not the owners of the variables
in i.
"""
ops = set()
variables, orphans = variables_and_orphans(i, o)
for r in variables:
if r not in i and r not in orphans:
if r.owner is not None:
ops.add(r.owner)
return ops
def variables(i, o):
"""
Extracts list of variables within input and output nodes via dfs travesal
Parameters
----------
i : list
Input variables.
o : list
Output variables.
Returns
-------
object
The set of Variables that are involved in the subgraph that lies
between i and o. This includes i, o, orphans(i, o) and all values of
all intermediary steps from i to o.
"""
return variables_and_orphans(i, o)[0]
def orphans(i, o):
"""
Extracts list of variables within input and output nodes
via dfs travesal and returns the orphans among them
Parameters
----------
i : list
Input Variables.
o : list
Output Variables.
Returns
-------
object
The set of Variables which one or more Variables in o depend on but are
neither in i nor in the subgraph that lies between i and o.
Examples
--------
orphans([x], [(x+y).out]) => [y]
"""
return variables_and_orphans(i, o)[1]
def clone(i, o, copy_inputs=True):
"""
Copies the subgraph contained between i and o.
Parameters
----------
i : list
Input Variables.
o : list
Output Variables.
copy_inputs : bool
If True, the inputs will be copied (defaults to True).
Returns
-------
object
The inputs and outputs of that copy.
"""
equiv = clone_get_equiv(i, o, copy_inputs)
return [equiv[input] for input in i], [equiv[output] for output in o]
def clone_get_equiv(inputs, outputs, copy_inputs_and_orphans=True, memo=None):
"""
Return a dictionary that maps from Variable and Apply nodes in the
original graph to a new node (a clone) in a new graph.
This function works by recursively cloning inputs... rebuilding a directed
graph from the inputs up to eventually building new outputs.
Parameters
----------
inputs : a list of Variables
outputs : a list of Variables
copy_inputs_and_orphans : bool
True means to create the cloned graph from new input and constant
nodes (the bottom of a feed-upward graph).
False means to clone a graph that is rooted at the original input
nodes.
memo : None or dict
Optionally start with a partly-filled dictionary for the return value.
If a dictionary is passed, this function will work in-place on that
dictionary and return it.
"""
if memo is None:
memo = {}
# clone the inputs if necessary
for input in inputs:
if copy_inputs_and_orphans:
cpy = input.clone()
cpy.owner = None
cpy.index = None
memo.setdefault(input, cpy)
else:
memo.setdefault(input, input)
# go through the inputs -> outputs graph cloning as we go
for apply in io_toposort(inputs, outputs):
for input in apply.inputs:
if input not in memo:
if copy_inputs_and_orphans:
cpy = input.clone()
memo[input] = cpy
else:
memo[input] = input
new_apply = apply.clone_with_new_inputs([memo[i] for i in apply.inputs])
memo.setdefault(apply, new_apply)
for output, new_output in zip(apply.outputs, new_apply.outputs):
memo.setdefault(output, new_output)
# finish up by cloning any remaining outputs (it can happen)
for output in outputs:
if output not in memo:
memo[output] = output.clone()
return memo
def general_toposort(r_out, deps, debug_print=False,
compute_deps_cache=None, deps_cache=None,
clients=None):
"""
WRITEME
Parameters
----------
deps
A python function that takes a node as input and returns its dependence.
compute_deps_cache : optional
If provided deps_cache should also be provided. This is a function like
deps, but that also cache its results in a dict passed as deps_cache.
deps_cache : dict
Must be used with compute_deps_cache.
clients : dict
If a dict is passed it will be filled with a mapping of node
-> clients for each node in the subgraph.
Notes
-----
deps(i) should behave like a pure function (no funny business with
internal state).
deps(i) will be cached by this function (to be fast).
The order of the return value list is determined by the order of nodes
returned by the deps() function.
deps should be provided or can be None and the caller provides
compute_deps_cache and deps_cache. The second option removes a Python
function call, and allows for more specialized code, so it can be
faster.
"""
if compute_deps_cache is None:
deps_cache = {}
def compute_deps_cache(io):
if io not in deps_cache:
d = deps(io)
if d:
if not isinstance(d, (list, OrderedSet)):
raise TypeError(
"Non-deterministic collections here make"
" toposort non-deterministic.")
deps_cache[io] = list(d)
else:
deps_cache[io] = d
return d
else:
return deps_cache[io]
assert deps_cache is not None
assert isinstance(r_out, (tuple, list, deque))
reachable, _clients = stack_search(deque(r_out), compute_deps_cache,
'dfs', True)
if clients is not None:
clients.update(_clients)
sources = deque([r for r in reachable if not deps_cache.get(r, None)])
rset = set()
rlist = []
while sources:
node = sources.popleft()
if node not in rset:
rlist.append(node)
rset.add(node)
for client in _clients.get(node, []):
deps_cache[client] = [a for a in deps_cache[client]
if a is not node]
if not deps_cache[client]:
sources.append(client)
if len(rlist) != len(reachable):
if debug_print:
print('')
print(reachable)
print(rlist)
raise ValueError('graph contains cycles')
return rlist
def io_toposort(inputs, outputs, orderings=None, clients=None):
"""
Perform topological sort from input and output nodes
Parameters
----------
inputs : list or tuple of Variable instances
outputs : list or tuple of Apply instances
orderings : dict
Key: Apply instance. Value: list of Apply instance.
It is important that the value be a container with a deterministic
iteration order. No sets allowed!
clients : dict
If a dict is provided it will be filled with mappings of
node->clients for each node in the subgraph that is sorted
"""
# the inputs are used only here in the function that decides what 'predecessors' to explore
iset = set(inputs)
# We build 2 functions as a speed up
deps_cache = {}
compute_deps = None
compute_deps_cache = None
if not orderings: # can be None or empty dict
# Specialized function that is faster when no ordering.
# Also include the cache in the function itself for speed up.
def compute_deps_cache(obj):
if obj in deps_cache:
return deps_cache[obj]
rval = []
if obj not in iset:
if isinstance(obj, Variable):
if obj.owner:
rval = [obj.owner]
elif isinstance(obj, Apply):
rval = list(obj.inputs)
if rval:
if not isinstance(rval, (list, OrderedSet)):
raise TypeError(
"Non-deterministic collections here make"
" toposort non-deterministic.")
deps_cache[obj] = list(rval)
else:
deps_cache[obj] = rval
else:
deps_cache[obj] = rval
return rval
else:
def compute_deps(obj):
rval = []
if obj not in iset:
if isinstance(obj, Variable):
if obj.owner:
rval = [obj.owner]
elif isinstance(obj, Apply):
rval = list(obj.inputs)
rval.extend(orderings.get(obj, []))
else:
assert not orderings.get(obj, [])
return rval
topo = general_toposort(outputs, deps=compute_deps,
compute_deps_cache=compute_deps_cache,
deps_cache=deps_cache, clients=clients)
return [o for o in topo if isinstance(o, Apply)]
default_leaf_formatter = str
def default_node_formatter(op, argstrings):
return "%s(%s)" % (op.op, ", ".join(argstrings))
def io_connection_pattern(inputs, outputs):
"""
Returns the connection pattern of a subgraph defined by given
inputs and outputs.
"""
inner_nodes = io_toposort(inputs, outputs)
# Initialize 'connect_pattern_by_var' by establishing each input as
# connected only to itself
connect_pattern_by_var = {}
nb_inputs = len(inputs)
for i in range(nb_inputs):
input = inputs[i]
inp_connection_pattern = [i == j for j in range(nb_inputs)]
connect_pattern_by_var[input] = inp_connection_pattern
# Iterate through the nodes used to produce the outputs from the
# inputs and, for every node, infer their connection pattern to
# every input from the connection patterns of their parents.
for n in inner_nodes:
# Get the connection pattern of the inner node's op. If the op
# does not define a connection_pattern method, assume that
# every node output is connected to every node input
try:
op_connection_pattern = n.op.connection_pattern(n)
except AttributeError:
op_connection_pattern = ([[True] * len(n.outputs)] *
len(n.inputs))
# For every output of the inner node, figure out which inputs it
# is connected to by combining the connection pattern of the inner
# node and the connection patterns of the inner node's inputs.
for out_idx in range(len(n.outputs)):
out = n.outputs[out_idx]
out_connection_pattern = [False] * nb_inputs
for inp_idx in range(len(n.inputs)):
inp = n.inputs[inp_idx]
if inp in connect_pattern_by_var:
inp_connection_pattern = connect_pattern_by_var[inp]
# If the node output is connected to the node input, it
# means it is connected to every inner input that the
# node inputs is connected to
if op_connection_pattern[inp_idx][out_idx]:
out_connection_pattern = [out_connection_pattern[i] or
inp_connection_pattern[i]
for i in range(nb_inputs)]
# Store the connection pattern of the node output
connect_pattern_by_var[out] = out_connection_pattern
# Obtain the global connection pattern by combining the
# connnection patterns of the individual outputs
global_connection_pattern = [[] for o in range(len(inputs))]
for out in outputs:
out_connection_pattern = connect_pattern_by_var.get(out)
if out_connection_pattern is None:
# the output is completely isolated from inputs
out_connection_pattern = [False] * len(inputs)
for i in range(len(inputs)):
global_connection_pattern[i].append(out_connection_pattern[i])
return global_connection_pattern
def is_same_graph(var1, var2, givens=None, debug=False):
"""
Return True iff Variables `var1` and `var2` perform the same computation.
By 'performing the same computation', we mean that they must share the same
graph, so that for instance this function will return False when comparing
(x * (y * z)) with ((x * y) * z).
The current implementation is not efficient since, when possible, it
verifies equality by calling two different functions that are expected to
return the same output. The goal is to verify this assumption, to
eventually get rid of one of them in the future.
Parameters
----------
var1
The first Variable to compare.
var2
The second Variable to compare.
givens
Similar to the `givens` argument of `theano.function`, it can be used
to perform substitutions in the computational graph of `var1` and
`var2`. This argument is associated to neither `var1` nor `var2`:
substitutions may affect both graphs if the substituted variable
is present in both.
debug : bool
If True, then an exception is raised when we are in a situation where
the `equal_computations` implementation cannot be called.
This parameter is intended to be used in tests only, to make sure we
properly test both implementations.
Examples
--------
====== ====== ====== ======
var1 var2 givens output
====== ====== ====== ======
x + 1 x + 1 {} True
x + 1 y + 1 {} False
x + 1 y + 1 {x: y} True
====== ====== ====== ======
"""
# Lazy import.
if givens is None:
givens = {}
global equal_computations, is_same_graph_with_merge
if equal_computations is None:
from theano.gof.opt import is_same_graph_with_merge
from theano.scan_module.scan_utils import equal_computations
# Convert `givens` to dictionary.
if not isinstance(givens, dict):
givens = dict(givens)
# Get result from the merge-based function.
rval1 = is_same_graph_with_merge(var1=var1, var2=var2, givens=givens)
# Get result from the function `equal_computations` from scan_utils.
use_equal_computations = True
if givens:
# We need to build the `in_xs` and `in_ys` lists. To do this, we need
# to be able to tell whether a variable belongs to the computational
# graph of `var1` or `var2`.
# The typical case we want to handle is when `to_replace` belongs to
# one of these graphs, and `replace_by` belongs to the other one. In
# other situations, the current implementation of `equal_computations`
# is probably not appropriate, so we do not call it.
ok = True
in_xs = []
in_ys = []
# Compute the sets of all variables found in each computational graph.
inputs_var = list(map(inputs, ([var1], [var2])))
all_vars = [set(variables(v_i, v_o))
for v_i, v_o in ((inputs_var[0], [var1]),
(inputs_var[1], [var2]))]
def in_var(x, k):
# Return True iff `x` is in computation graph of variable `vark`.
return x in all_vars[k - 1]
for to_replace, replace_by in iteritems(givens):
# Map a substitution variable to the computational graphs it
# belongs to.
inside = dict((v, [in_var(v, k) for k in (1, 2)])
for v in (to_replace, replace_by))
if (inside[to_replace][0] and not inside[to_replace][1] and
inside[replace_by][1] and not inside[replace_by][0]):
# Substitute variable in `var1` by one from `var2`.
in_xs.append(to_replace)
in_ys.append(replace_by)
elif (inside[to_replace][1] and not inside[to_replace][0] and
inside[replace_by][0] and not inside[replace_by][1]):
# Substitute variable in `var2` by one from `var1`.
in_xs.append(replace_by)
in_ys.append(to_replace)
else:
ok = False
break
if not ok:
# We cannot directly use `equal_computations`.
if debug:
raise AssertionError(
'When `debug` is True we want to make sure we are also '
'using the `equal_computations` implementation')
use_equal_computations = False
else:
in_xs = None
in_ys = None
if use_equal_computations:
rval2 = equal_computations(xs=[var1], ys=[var2],
in_xs=in_xs, in_ys=in_ys)
assert rval2 == rval1
return rval1
def op_as_string(i, op,
leaf_formatter=default_leaf_formatter,
node_formatter=default_node_formatter):
"""
Op to return a string representation of the subgraph
between i and o
"""
strs = as_string(i, op.inputs, leaf_formatter, node_formatter)
return node_formatter(op, strs)
def as_string(i, o,
leaf_formatter=default_leaf_formatter,
node_formatter=default_node_formatter):
"""
Returns a string representation of the subgraph between i and o
Parameters
----------
i : list
Input `Variable` s.
o : list
Output `Variable` s.
leaf_formatter : callable
Takes a `Variable` and returns a string to describe it.
node_formatter : callable
Takes an `Op` and the list of strings corresponding to its arguments
and returns a string to describe it.
Returns
-------
str
Returns a string representation of the subgraph between i and o. If the
same op is used by several other ops, the first occurrence will be
marked as :literal:`*n -> description` and all subsequent occurrences
will be marked as :literal:`*n`, where n is an id number (ids are
attributed in an unspecified order and only exist for viewing
convenience).
"""
i = set(i)
orph = orphans(i, o)
multi = set()
seen = set()
for output in o:
op = output.owner
if op in seen:
multi.add(op)
else:
seen.add(op)
for op in ops(i, o):
for input in op.inputs:
op2 = input.owner
if input in i or input in orph or op2 is None:
continue
if op2 in seen:
multi.add(op2)
else:
seen.add(input.owner)
multi = [x for x in multi]
done = set()
def multi_index(x):
return multi.index(x) + 1
def describe(r):
if r.owner is not None and r not in i and r not in orph:
op = r.owner
idx = op.outputs.index(r)
if len(op.outputs) == 1:
idxs = ""
else:
idxs = "::%i" % idx
if op in done:
return "*%i%s" % (multi_index(op), idxs)
else:
done.add(op)
s = node_formatter(op, [describe(input) for input in op.inputs])
if op in multi:
return "*%i -> %s" % (multi_index(op), s)
else:
return s
else:
return leaf_formatter(r)
return [describe(output) for output in o]
def view_roots(r):
"""
Utility function that returns the leaves of a search through
consecutive view_map()s.
WRITEME
"""
owner = r.owner
if owner is not None:
try:
view_map = owner.op.view_map
view_map = dict((owner.outputs[o], i)
for o, i in iteritems(view_map))
except AttributeError:
return [r]
if r in view_map:
answer = []
for i in view_map[r]:
answer += view_roots(owner.inputs[i])
return answer
else:
return [r]
else:
return [r]
def list_of_nodes(inputs, outputs):
"""
Return the apply nodes of the graph between inputs and outputs.
"""
return stack_search(
deque([o.owner for o in outputs]),
lambda o: [inp.owner for inp in o.inputs
if inp.owner and
not any(i in inp.owner.outputs for i in inputs)])
|
name = name[:10] + '...' + name[-10:]
|
op.rs
|
macro_rules! opcodes {
{ $($variant: ident = $value: expr, op = $operands: expr),+$(,)? } =>
{
#[repr(u8)]
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum Opcode {
$($variant = $value),+,
}
pub const OPCODES: &'static [Opcode] = &[$(Opcode::$variant),*];
pub const OPCODES_OPERANDS: &'static [(Opcode, usize)] = &[$((Opcode::$variant , $operands)),*];
pub const OPCODES_STRINGS: &'static [(Opcode, &str)] = &[$((Opcode::$variant , stringify!(Opcode::$variant))),*];
}
}
// First number is the mnemonic's ID.
// op is the number of operands the mnemonic needs
opcodes! {
|
MUL = 3, op = 3,
MOD = 4, op = 3,
INC = 5, op = 1,
DEC = 6, op = 1,
// Control Flow instructions
EQ = 7, op = 2,
NEQ = 8, op = 2,
GT = 9, op = 2,
GTE = 10, op = 2,
LT = 11, op = 2,
LTE = 12, op = 2,
HLT = 13, op = 0,
NOP = 14, op = 0,
LOAD = 15, op = 3,
JMP = 16, op = 1,
// Takes the last modulo's remaining and put it into the specified register
MODR = 17, op = 1,
// Put in the register the specified value
// MOV = 18,
}
pub fn decode_opcode(val: u8) -> Option<Opcode> {
for code in OPCODES {
if *code as u8 == val {
return Some(*code);
}
}
None
}
pub fn get_op<'a>(base: String) -> Result<Opcode, &'a str> {
for (opcode, name) in OPCODES_STRINGS {
if base.to_uppercase().contains(name) {
return Ok(*opcode);
}
}
Err("No opcode found.")
}
|
// Arithmetical instructions
ADD = 1, op = 3,
SUB = 2, op = 3,
|
1.cache.js
|
$wnd.life_qbic_portal_portlet_AppWidgetSet.runAsyncCallback1("function L8b(){}\nfunction Gib(a){this.a=a}\nfunction du(a){bu();jh($t,a);fu()}\nfunction M8b(){M8b=Egb;F8b=new L8b}\nfunction N8b(){N8b=Egb;G8b=new Gib((_ib(),new Yib($moduleBase+'333A66D5307B1C8C9935C59A877F1993.cache.eot')))}\nfunction O8b(){O8b=Egb;H8b=new Gib((_ib(),new Yib($moduleBase+'C81F8CA7CA7012F31BDE883DF6C99E61.cache.svg')))}\nfunction P8b(){P8b=Egb;I8b=new Gib((_ib(),new Yib($moduleBase+'D03281BC3FDB49177B485D1DC10A13C4.cache.ttf')))}\nfunction Q8b(){Q8b=Egb;J8b=new Gib((_ib(),new Yib($moduleBase+'51DCB1D3F3D956ACC80F89D3CF0AC5BE.cache.woff')))}\nfunction K8b(a){if(!a.a){a.a=true;bu();du((Iz(),N8b(),'.v-debug-console{background:#fff;opacity:0.9;border:1px solid #000;font-family:sans-serif;}.v-debug-console-caption{background:#000;border-bottom:1px solid grey;color:white;font-weight:bold;}.v-debug-console-content{font-size:x-small;overflow:auto;white-space:'+('pre;}.v-debug-console-content input{font-size:xx-small;}.v-app .invalidlayout,.v-app .invalidlayout *{background:#f99 !important;}@font-face{font-family:\"vdebugfont\";src:'+(Nte+G8b.a.a+\"')\")+';}@font-face{font-family:'+Ote+';src:'+(Q8b(),Nte+J8b.a.a+\"')\"+' '+Pte+'\"woff\"'+')'+','+' '+Nte+(P8b(),I8b.a.a)+\"')\"+' '+Pte+'\"truetype\"'+')'+','+' '+Nte+(O8b(),H8b.a.a)+\"')\"+' '+Pte+'\"svg\"'+')')+';font-weight:'+Bfe+';font-style:'+Bfe+';}.v-debugwindow [data-icon]:before,.v-debugwindow-menu [data-icon]:before{font-family:'+Ote+';content:'+'attr(data-icon)')+';speak:none;font-weight:normal;font-variant:normal;text-transform:none;line-height:1;-webkit-font-smoothing:antialiased;font-style:normal;vertical-align:text-bottom;}.v-debugwindow{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:'+'border-box;opacity:0.8;color:#666;font-family:Arial, Helvetica, Tahoma, Verdana, sans-serif;font-size:13px;}.v-debugwindow-handle{position:absolute;bottom:0;background-color:#fff;opacity:0;z-index:1000;}.v-debugwindow-handle-sw{width:7px'+';height:7px;}.v-debugwindow-handle-se{right:0;width:14px;height:14px;}.v-debugwindow:hover{opacity:1;}.v-debugwindow *{font-size:inherit !important;}.v-debugwindow-size0,.v-debugwindow-menu .v-debugwindow-button-size0{font-size:10px;}.v-debugwindow-size1,.v-debugwindow-menu .v-debugwindow-button-size1{font-size:13px;}.v-debugwindow-size2,.v-debugwindow-menu .v-debugwindow-button-size2{font-size:16px;}.v-debugwindow-head{text-align:right;background-color:'+'transparent;}.v-debugwindow-tabs{display:inline-block;}.v-debugwindow-tab,.v-debugwindow-controls>*{width:2em;border:none;margin:0;line-height:1.5em;background-color:#fff;color:#666;}.v-debugwindow-tab{position:relative;top:1px;border-width:1px 0 1px 1px'+';border-style:solid;border-color:#666;border-radius:2px 2px 0 0;}.v-debugwindow-tab-selected{color:#666;background-color:#fff;border-bottom:1px solid #fff;}.v-debugwindow-controls{position:relative;top:1px;display:inline-block;background-color:#fff;border:'+'1px solid #666;border-radius:2px 2px 0 0;}.v-debugwindow-section-head{text-align:left;background-color:#fff;border:1px solid #666;border-bottom:1px solid #999;box-shadow:0 0 7px 0 rgba(55, 55, 55, 0.6);min-height:1.5em;line-height:1.5em;padding-left:5px;}.v-debugwindow-button{border:none'+';background-color:transparent;color:#666;}.v-debugwindow-button:hover{color:#000;text-decoration:underline;}.v-debugwindow-button-active{color:#666;box-shadow:1px 1px 3px 0 inset;}.v-debugwindow-content{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;box-shadow:0 0 7px 0 rgba(55, 55, 55, 0.6);background-color:'+'#fff;border:1px solid #666;border-top:none;}.v-debugwindow-menu{background-color:#fff;padding:4px;border:1px solid #999;border-top:none;border-radius:0 0 5px 5px;box-shadow:0 0 7px 0 rgba(55, 55, 55, 0.6);}.v-debugwindow-menu-content{min-width:100px;}.v-debugwindow-menu-content .v-debugwindow-button{line-height:22px'+';}.v-debugwindow-menu-content>div>.v-debugwindow-button{width:33%;}.v-debugwindow-row{display:table-row;}.v-debugwindow-row:nth-child(odd){background-color:rgba(0, 61, 255, 0.11);}.v-debugwindow-row>span{display:table-cell;padding:4px;}.v-debugwindow-row.SEVERE{color:#500;background-color:#ffc5c5;}.v-debugwindow-row.WARNING{background-color:#ff9;}.v-debugwindow-row.FINE{color:#737373;}.v-debugwindow-row.FINER{color:gray;}.v-debugwindow-row.FINEST{color:'+'#8c8c8c;}.v-debugwindow-row>span.caption{color:#999;text-align:right;white-space:nowrap;}.v-debugwindow-row>span.value,.v-debugwindow-selector>span.value{width:100%;}.v-debugwindow-selector :hover{background:rgba(255, 32, 32, 0.5);}.v-debugwindow-log{font-family:monospace;}.v-debugwindow-log .v-debugwindow-reset{color:#fff;background-color:#4c92ed;padding:4px;}.v-debugwindow-log .v-debugwindow-time{text-align:right'+';color:#999;}.v-debugwindow-log .v-debugwindow-message{white-space:nowrap;width:100%;}.v-debugwindow-log .v-debugwindow-message:hover{white-space:normal;word-wrap:break-word;}.v-debugwindow-log .v-debugwindow-message em{background-color:#c4e6f8;}.v-debugwindow-hierarchy .v-debugwindow-info{padding:1em;}.v-debugwindow-network .v-debugwindow-row{display:block !important;}.v-debugwindow-network .v-debugwindow-row>span{display:inline;}'));return true}return false}\nvar Nte=\"url('\",Ote='\"vdebugfont\"',Pte='format(';Dgb(535,1,{},Gib);var iI=uSd(h9d,'DataResourcePrototype',535);Dgb(804,1,dce);_.vc=function yIb(){K8b((M8b(),F8b))};var F8b,G8b,H8b,I8b,J8b;Dgb(1034,1,{},L8b);_.a=false;var TQ=uSd('com.vaadin.client.debug.internal.theme','DebugWindowStyles_default_InlineClientBundleGenerator/1',1034);U4d(Zh)(1);\n//# sourceURL=life.qbic.portal.portlet.AppWidgetSet-1.js\n")
|
||
ingress.go
|
// Copyright 2017 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"strings"
"github.com/golang/glog"
"istio.io/pilot/platform"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type ingress struct {
*infra
logs *accessLogs
}
const (
ingressServiceName = "istio-ingress"
)
func (t *ingress) String() string {
return "ingress"
}
func (t *ingress) setup() error {
if !t.Ingress {
return nil
}
if platform.ServiceRegistry(t.Registry) != platform.KubernetesRegistry {
return nil
}
t.logs = makeAccessLogs()
// parse and send yamls
if yaml, err := fill("ingress.yaml.tmpl", t.infra); err != nil {
return err
} else if err = t.kubeApply(yaml, t.Namespace); err != nil {
return err
|
return err
}
return nil
}
func (t *ingress) run() error {
if !t.Ingress {
glog.Info("skipping test since ingress is missing")
return nil
}
if platform.ServiceRegistry(t.Registry) != platform.KubernetesRegistry {
return nil
}
funcs := make(map[string]func() status)
funcs["Ingress status IP"] = t.checkIngressStatus
funcs["Route rule for /c"] = t.checkRouteRule
cases := []struct {
// empty destination to expect 404
dst string
url string
host string
}{
{"a", fmt.Sprintf("https://%s.%s:443/http", ingressServiceName, t.IstioNamespace), ""},
{"b", fmt.Sprintf("https://%s.%s:443/pasta", ingressServiceName, t.IstioNamespace), ""},
{"a", fmt.Sprintf("http://%s.%s/lucky", ingressServiceName, t.IstioNamespace), ""},
{"b", fmt.Sprintf("http://%s.%s/lol", ingressServiceName, t.IstioNamespace), ""},
{"a", fmt.Sprintf("http://%s.%s/foo", ingressServiceName, t.IstioNamespace), "foo.bar.com"},
{"a", fmt.Sprintf("http://%s.%s/bar", ingressServiceName, t.IstioNamespace), "foo.baz.com"},
{"a", fmt.Sprintf("grpc://%s.%s:80", ingressServiceName, t.IstioNamespace), "api.company.com"},
{"a", fmt.Sprintf("grpcs://%s.%s:443", ingressServiceName, t.IstioNamespace), "api.company.com"},
{"", fmt.Sprintf("http://%s.%s/notfound", ingressServiceName, t.IstioNamespace), ""},
{"", fmt.Sprintf("http://%s.%s/foo", ingressServiceName, t.IstioNamespace), ""},
}
for _, req := range cases {
name := fmt.Sprintf("Ingress request to %+v", req)
funcs[name] = (func(dst, url, host string) func() status {
extra := ""
if host != "" {
extra = "-key Host -val " + host
}
return func() status {
resp := t.clientRequest("t", url, 1, extra)
if dst == "" {
if len(resp.code) > 0 && resp.code[0] == "404" {
return nil
}
} else if len(resp.id) > 0 {
if !strings.Contains(resp.body, "X-Forwarded-For") &&
!strings.Contains(resp.body, "x-forwarded-for") {
glog.Warningf("Missing X-Forwarded-For in the body: %s", resp.body)
return errAgain
}
id := resp.id[0]
t.logs.add(dst, id, name)
t.logs.add("ingress", id, name)
return nil
}
return errAgain
}
})(req.dst, req.url, req.host)
}
if err := parallel(funcs); err != nil {
return err
}
if err := t.logs.check(t.infra); err != nil {
return err
}
return nil
}
// checkRouteRule verifies that version splitting is applied to ingress paths
func (t *ingress) checkRouteRule() status {
url := fmt.Sprintf("http://%s.%s/c", ingressServiceName, t.IstioNamespace)
resp := t.clientRequest("t", url, 100, "")
count := counts(resp.version)
glog.V(2).Infof("counts: %v", count)
if count["v1"] >= 95 {
return nil
}
return errAgain
}
// ensure that IPs/hostnames are in the ingress statuses
func (t *ingress) checkIngressStatus() status {
ings, err := client.Extensions().Ingresses(t.Namespace).List(metav1.ListOptions{})
if err != nil {
return err
}
if len(ings.Items) == 0 {
return fmt.Errorf("ingress status failure: no ingress")
}
for _, ing := range ings.Items {
if len(ing.Status.LoadBalancer.Ingress) == 0 {
return errAgain
}
for _, status := range ing.Status.LoadBalancer.Ingress {
if status.IP == "" && status.Hostname == "" {
return errAgain
}
glog.Infof("Ingress Status IP: %s", status.IP)
}
}
return nil
}
func (t *ingress) teardown() {
if !t.Ingress {
return
}
if err := client.Extensions().Ingresses(t.Namespace).
DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{}); err != nil {
glog.Warning(err)
}
if err := client.CoreV1().Secrets(t.Namespace).
Delete(ingressSecretName, &metav1.DeleteOptions{}); err != nil {
glog.Warning(err)
}
if err := t.deleteAllConfigs(); err != nil {
glog.Warning(err)
}
}
|
}
// send route rules for "c" only
if err := t.applyConfig("rule-default-route.yaml.tmpl", nil); err != nil {
|
scikitlearn.py
|
from datetime import datetime
import logging
import os
import time
from typing import Callable, Tuple, Optional, Sequence
import stopit
from sklearn.base import TransformerMixin, is_classifier
from sklearn.model_selection import ShuffleSplit, cross_validate, check_cv
from sklearn.pipeline import Pipeline
from gama.utilities.evaluation_library import Evaluation
from gama.utilities.generic.stopwatch import Stopwatch
import numpy as np
from gama.utilities.metrics import Metric
from gama.genetic_programming.components import Individual, PrimitiveNode, Fitness
log = logging.getLogger(__name__)
def primitive_node_to_sklearn(primitive_node: PrimitiveNode) -> object:
hyperparameters = {
terminal.output: terminal.value for terminal in primitive_node._terminals
}
return primitive_node._primitive.identifier(**hyperparameters)
def compile_individual(
individual: Individual,
parameter_checks=None,
preprocessing_steps: Sequence[Tuple[str, TransformerMixin]] = None,
) -> Pipeline:
steps = [
(str(i), primitive_node_to_sklearn(primitive))
for i, primitive in enumerate(individual.primitives)
]
if preprocessing_steps:
steps = steps + list(reversed(preprocessing_steps))
return Pipeline(list(reversed(steps)))
def object_is_valid_pipeline(o):
""" Determines if object behaves like a scikit-learn pipeline. """
return (
o is not None
and hasattr(o, "fit")
and hasattr(o, "predict")
and hasattr(o, "steps")
)
def
|
(
pipeline, x, y_train, timeout: float, metrics: Tuple[Metric], cv=5, subsample=None,
) -> Tuple:
""" Score `pipeline` with k-fold CV according to `metrics` on (a subsample of) X, y
Returns
-------
Tuple:
prediction: np.ndarray if successful, None if not
scores: tuple with one float per metric, each value is -inf on fail.
estimators: list of fitted pipelines if successful, None if not
error: None if successful, otherwise an Exception
"""
if not object_is_valid_pipeline(pipeline):
raise TypeError(f"Pipeline must not be None and requires fit, predict, steps.")
if not timeout > 0:
raise ValueError(f"`timeout` must be greater than 0, is {timeout}.")
prediction, estimators = None, None
# default score for e.g. timeout or failure
scores = tuple([float("-inf")] * len(metrics))
with stopit.ThreadingTimeout(timeout) as c_mgr:
try:
if isinstance(subsample, int) and subsample < len(y_train):
sampler = ShuffleSplit(n_splits=1, train_size=subsample, random_state=0)
idx, _ = next(sampler.split(x))
x, y_train = x.iloc[idx, :], y_train[idx]
splitter = check_cv(cv, y_train, is_classifier(pipeline))
result = cross_validate(
pipeline,
x,
y_train,
cv=splitter,
return_estimator=True,
scoring=[m.name for m in metrics],
error_score="raise",
)
scores = tuple([np.mean(result[f"test_{m.name}"]) for m in metrics])
estimators = result["estimator"]
for (estimator, (_, test)) in zip(estimators, splitter.split(x, y_train)):
if any([m.requires_probabilities for m in metrics]):
fold_pred = estimator.predict_proba(x.iloc[test, :])
else:
fold_pred = estimator.predict(x.iloc[test, :])
if prediction is None:
if fold_pred.ndim == 2:
prediction = np.empty(shape=(len(y_train), fold_pred.shape[1]))
else:
prediction = np.empty(shape=(len(y_train),))
prediction[test] = fold_pred
# prediction, scores, estimators = cross_val_predict_score(
# pipeline, x, y_train, cv=cv, metrics=metrics
# )
except stopit.TimeoutException:
# This exception is handled by the ThreadingTimeout context manager.
raise
except KeyboardInterrupt:
raise
except Exception as e:
return prediction, scores, estimators, e
if c_mgr.state == c_mgr.INTERRUPTED:
# A TimeoutException was raised, but not by the context manager.
# This indicates that the outer context manager (the ea) timed out.
raise stopit.utils.TimeoutException()
if not c_mgr:
# For now we treat an eval timeout the same way as
# e.g. NaN exceptions and use the default score.
return prediction, scores, estimators, stopit.TimeoutException()
return prediction, tuple(scores), estimators, None
def evaluate_individual(
individual: Individual,
evaluate_pipeline: Callable,
timeout: float = 1e6,
deadline: Optional[float] = None,
add_length_to_score: bool = True,
**kwargs,
) -> Evaluation:
""" Evaluate the pipeline specified by individual, and record
Parameters
----------
individual: Individual
Blueprint for the pipeline to evaluate.
evaluate_pipeline: Callable
Function which takes the pipeline and produces validation predictions,
scores, estimators and errors.
timeout: float (default=1e6)
Maximum time in seconds that the evaluation is allowed to take.
Don't depend on high accuracy.
A shorter timeout is imposed if `deadline` is in less than `timeout` seconds.
deadline: float, optional
A time in seconds since epoch.
Cut off evaluation at `deadline` even if `timeout` seconds have not yet elapsed.
add_length_to_score: bool (default=True)
Add the length of the individual to the score result of the evaluation.
**kwargs: Dict, optional (default=None)
Passed to `evaluate_pipeline` function.
Returns
-------
Evaluation
"""
result = Evaluation(individual, pid=os.getpid())
result.start_time = datetime.now()
if deadline is not None:
time_to_deadline = deadline - time.time()
timeout = min(timeout, time_to_deadline)
with Stopwatch() as wall_time, Stopwatch(time.process_time) as process_time:
evaluation = evaluate_pipeline(individual.pipeline, timeout=timeout, **kwargs)
result._predictions, result.score, result._estimators, error = evaluation
if error is not None:
result.error = f"{type(error)} {str(error)}"
result.duration = wall_time.elapsed_time
if add_length_to_score:
result.score = result.score + (-len(individual.primitives),)
individual.fitness = Fitness(
result.score,
result.start_time,
wall_time.elapsed_time,
process_time.elapsed_time,
)
return result
|
evaluate_pipeline
|
15.4.4.22-5-10.js
|
// Copyright (c) 2012 Ecma International. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
esid: sec-array.prototype.reduceright
es5id: 15.4.4.22-5-10
description: >
Array.prototype.reduceRight - side-effects produced by step 2 when
an exception occurs
---*/
var obj = {
0: 11,
1: 12
};
var accessed = false;
|
get: function() {
accessed = true;
return 0;
},
configurable: true
});
assert.throws(TypeError, function() {
Array.prototype.reduceRight.call(obj, function() {});
});
assert(accessed, 'accessed !== true');
|
Object.defineProperty(obj, "length", {
|
info.go
|
package metrics
import (
"context"
"fmt"
"time"
"go.opencensus.io/metric/metricproducer"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"github.com/pomerium/pomerium/internal/log"
"github.com/pomerium/pomerium/pkg/metrics"
)
var (
// InfoViews contains opencensus views for informational metrics about
// pomerium itself.
InfoViews = []*view.View{
ConfigLastReloadView,
ConfigLastReloadSuccessView,
IdentityManagerLastRefreshView,
ConfigDBVersionView,
ConfigDBErrorsView,
}
configLastReload = stats.Int64(
metrics.ConfigLastReloadTimestampSeconds,
"Timestamp of last successful config reload",
stats.UnitSeconds)
configDBVersion = stats.Int64(
metrics.ConfigDBVersion,
metrics.ConfigDBVersionHelp,
stats.UnitDimensionless,
)
configDBErrors = stats.Int64(
metrics.ConfigDBErrors,
metrics.ConfigDBErrorsHelp,
stats.UnitDimensionless,
)
configLastReloadSuccess = stats.Int64(
metrics.ConfigLastReloadSuccess,
"Returns 1 if last reload was successful",
stats.UnitDimensionless)
identityManagerLastRefresh = stats.Int64(
metrics.IdentityManagerLastRefreshTimestamp,
"Timestamp of last directory refresh",
"seconds",
)
// ConfigDBVersionView contains last databroker config version that was processed
ConfigDBVersionView = &view.View{
Name: configDBVersion.Name(),
Description: configDBVersion.Description(),
Measure: configDBVersion,
TagKeys: []tag.Key{TagKeyService, TagConfigID},
Aggregation: view.LastValue(),
}
// ConfigDBErrorsView contains list of errors encountered while parsing this databroker config
ConfigDBErrorsView = &view.View{
Name: configDBErrors.Name(),
Description: configDBErrors.Description(),
Measure: configDBErrors,
TagKeys: []tag.Key{TagKeyService, TagConfigID},
Aggregation: view.LastValue(),
}
// ConfigLastReloadView contains the timestamp the configuration was last
// reloaded, labeled by service.
ConfigLastReloadView = &view.View{
Name: configLastReload.Name(),
Description: configLastReload.Description(),
Measure: configLastReload,
TagKeys: []tag.Key{TagKeyService},
Aggregation: view.LastValue(),
}
// ConfigLastReloadSuccessView contains the result of the last configuration
// reload, labeled by service.
ConfigLastReloadSuccessView = &view.View{
Name: configLastReloadSuccess.Name(),
Description: configLastReloadSuccess.Description(),
Measure: configLastReloadSuccess,
TagKeys: []tag.Key{TagKeyService},
Aggregation: view.LastValue(),
}
// IdentityManagerLastRefreshView contains the timestamp the identity manager
// was last refreshed, labeled by service.
IdentityManagerLastRefreshView = &view.View{
Name: identityManagerLastRefresh.Name(),
Description: identityManagerLastRefresh.Description(),
Measure: identityManagerLastRefresh,
Aggregation: view.LastValue(),
}
)
// RecordIdentityManagerLastRefresh records that the identity manager refreshed users and groups.
func RecordIdentityManagerLastRefresh() {
stats.Record(context.Background(), identityManagerLastRefresh.M(time.Now().Unix()))
}
// SetDBConfigInfo records status, databroker version and error count while parsing
// the configuration from a databroker
func SetDBConfigInfo(ctx context.Context, service, configID string, version uint64, errCount int64) {
log.Info(ctx).
Str("service", service).
Str("config_id", configID).
Uint64("version", version).
Int64("err_count", errCount).
Msg("set db config info")
if err := stats.RecordWithTags(
ctx,
[]tag.Mutator{
tag.Insert(TagKeyService, service),
tag.Insert(TagConfigID, configID),
},
configDBVersion.M(int64(version)),
); err != nil {
log.Error(ctx).Err(err).Msg("telemetry/metrics: failed to record config version number")
}
if err := stats.RecordWithTags(
context.Background(),
[]tag.Mutator{
tag.Insert(TagKeyService, service),
tag.Insert(TagConfigID, configID),
},
configDBErrors.M(errCount),
); err != nil {
log.Error(ctx).Err(err).Msg("telemetry/metrics: failed to record config error count")
}
}
// SetDBConfigRejected records that a certain databroker config version has been rejected
func SetDBConfigRejected(ctx context.Context, service, configID string, version uint64, err error)
|
// SetConfigInfo records the status, checksum and timestamp of a configuration
// reload. You must register InfoViews or the related config views before calling
func SetConfigInfo(ctx context.Context, service, configName string, checksum uint64, success bool) {
if success {
registry.setConfigChecksum(service, configName, checksum)
serviceTag := tag.Insert(TagKeyService, service)
if err := stats.RecordWithTags(
context.Background(),
[]tag.Mutator{serviceTag},
configLastReload.M(time.Now().Unix()),
); err != nil {
log.Error(ctx).Err(err).Msg("telemetry/metrics: failed to record config checksum timestamp")
}
if err := stats.RecordWithTags(
context.Background(),
[]tag.Mutator{serviceTag},
configLastReloadSuccess.M(1),
); err != nil {
log.Error(ctx).Err(err).Msg("telemetry/metrics: failed to record config reload")
}
} else {
stats.Record(context.Background(), configLastReloadSuccess.M(0))
}
log.Info(ctx).
Str("service", service).
Str("config", configName).
Str("checksum", fmt.Sprintf("%x", checksum)).
Msg("config: updated config")
}
// SetBuildInfo records the pomerium build info. You must call RegisterInfoMetrics to
// have this exported
func SetBuildInfo(service, hostname string) {
registry.setBuildInfo(service, hostname)
}
// RegisterInfoMetrics registers non-view based metrics registry globally for export
func RegisterInfoMetrics() {
metricproducer.GlobalManager().AddProducer(registry.registry)
}
// AddPolicyCountCallback sets the function to call when exporting the
// policy count metric. You must call RegisterInfoMetrics to have this
// exported
func AddPolicyCountCallback(service string, f func() int64) {
registry.addPolicyCountCallback(service, f)
}
|
{
log.Warn(ctx).Err(err).Msg("databroker: invalid config detected, ignoring")
SetDBConfigInfo(ctx, service, configID, version, -1)
}
|
main.go
|
package main
import (
"flag"
"log"
"os"
"github.com/jaypadia-frame/forked-mp4ff/mp4"
)
func main() {
inFilePath := flag.String("i", "", "Required: Path to input mp4 file")
outFilePath := flag.String("o", "", "Required: Output filepath (without extension)")
flag.Parse()
if *inFilePath == "" || *outFilePath == "" {
flag.Usage()
return
}
ifd, err := os.Open(*inFilePath)
if err != nil {
log.Fatalln(err)
}
defer ifd.Close()
parsedMp4, err := mp4.DecodeFile(ifd)
if err != nil {
log.Fatal(err)
}
parsedMp4.Init.Moov.Mvex.Trex.TrackID = 3
ofd, err := os.Create(*outFilePath)
if err != nil {
log.Fatal(err)
}
defer ofd.Close()
err = parsedMp4.Encode(ofd)
|
}
|
if err != nil {
log.Fatal(err)
}
|
predicate.go
|
// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package service
import (
"reflect"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
)
func NewLoadBalancerIPsChangedPredicate(logger logr.Logger) predicate.Predicate {
return predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool {
if e.Object == nil {
logger.Error(nil, "CreateEvent has no object", "event", e)
return false
}
if _, ok := e.Object.(*corev1.Service); !ok {
return false
}
logger.Info("Creating a service")
return true
},
UpdateFunc: func(e event.UpdateEvent) bool {
if e.MetaOld == nil || e.MetaNew == nil || e.ObjectOld == nil || e.ObjectNew == nil {
logger.Error(nil, "UpdateEvent has no old or new metadata, or no old or new object", "event", e)
return false
}
var oldService, newService *corev1.Service
var ok bool
if oldService, ok = e.ObjectOld.(*corev1.Service); !ok {
return false
}
if newService, ok = e.ObjectNew.(*corev1.Service); !ok {
return false
}
oldIPs, newIPs := getServiceLoadBalancerIPs(oldService), getServiceLoadBalancerIPs(newService)
if len(newIPs) > 0 && e.MetaOld.GetDeletionTimestamp() != e.MetaNew.GetDeletionTimestamp() {
logger.Info("Updating the deletion timestamp of a service with LoadBalancer IPs")
return true
}
if len(newIPs) > 0 && shouldIgnoreService(oldService) != shouldIgnoreService(newService) {
logger.Info("Updating the ignore annotation of a service with LoadBalancer IPs")
return true
}
if !reflect.DeepEqual(oldIPs, newIPs)
|
return false
},
DeleteFunc: func(e event.DeleteEvent) bool {
if e.Object == nil {
logger.Error(nil, "DeleteEvent has no object", "event", e)
return false
}
if _, ok := e.Object.(*corev1.Service); !ok {
return false
}
logger.Info("Deleting a service")
return true
},
GenericFunc: func(e event.GenericEvent) bool {
return false
},
}
}
|
{
logger.Info("Updating service LoadBalancer IPs")
return true
}
|
dist_train_demo.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle.fluid.core as core
import math
import os
import sys
import numpy
import paddle
import paddle.fluid as fluid
BATCH_SIZE = 64
PASS_NUM = 1
def loss_net(hidden, label):
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
acc = fluid.layers.accuracy(input=prediction, label=label)
return prediction, avg_loss, acc
def conv_net(img, label):
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=img,
filter_size=5,
num_filters=20,
pool_size=2,
pool_stride=2,
act="relu")
conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
act="relu")
return loss_net(conv_pool_2, label)
def
|
(use_cuda, role, endpoints, current_endpoint, trainer_id, trainers):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
prediction, avg_loss, acc = conv_net(img, label)
test_program = fluid.default_main_program().clone(for_test=True)
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
optimizer.minimize(avg_loss)
t = fluid.DistributeTranspiler()
t.transpile(trainer_id, pservers=endpoints, trainers=trainers)
if role == "pserver":
prog = t.get_pserver_program(current_endpoint)
startup = t.get_startup_program(current_endpoint, pserver_program=prog)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup)
exe.run(prog)
elif role == "trainer":
prog = t.get_trainer_program()
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
train_reader = paddle.batch(
paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
feeder = fluid.DataFeeder(feed_list=[img, label], place=place)
exe.run(fluid.default_startup_program())
for pass_id in range(PASS_NUM):
for batch_id, data in enumerate(train_reader()):
acc_np, avg_loss_np = exe.run(
prog, feed=feeder.feed(data), fetch_list=[acc, avg_loss])
if (batch_id + 1) % 10 == 0:
print(
'PassID {0:1}, BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'.
format(pass_id, batch_id + 1,
float(avg_loss_np.mean()), float(
acc_np.mean())))
if __name__ == '__main__':
if len(sys.argv) != 6:
print(
"Usage: python %s role endpoints current_endpoint trainer_id trainers"
% sys.argv[0])
exit(0)
role, endpoints, current_endpoint, trainer_id, trainers = \
sys.argv[1:]
train(True, role, endpoints, current_endpoint,
int(trainer_id), int(trainers))
|
train
|
legacy_trie.rs
|
use crate::account::account::Account;
use crate::account::db_state::DBState;
use crate::account::node_ref::NodeRef;
use crate::account::state_node::StateNode;
use crate::common::address::Address;
use crate::consensus::tree_node::TreeNode;
use crate::database::state_db::StateDB;
use crate::database::IDB;
use crate::traits::{Encode, Exception};
use crate::util::hash::hash;
use futures::Future;
use starling::traits::Database;
use std::cmp::min;
use std::collections::{BTreeMap, HashMap, HashSet};
use std::error::Error;
use std::iter::FromIterator;
use std::sync::{Arc, Mutex};
/// A node in the Merkle Patricia Trie
#[derive(Clone, Debug)]
pub enum NodeType {
/// Contains an [Account](crate::account::account::Account)
Leaf(Account),
/// Contains a [StateNode](crate::account::state_node::StateNode)
Branch(StateNode),
}
/// Hycon Merkle Patricia Trie, a hashed radix tree with bytewise branching
pub struct LegacyTrie<DBType> {
db: StateDB<DBType, (Vec<u8>, DBState)>,
write_queue: Arc<Mutex<Vec<(Vec<u8>, DBState)>>>,
}
impl<DBType> LegacyTrie<DBType>
where
DBType: IDB,
{
pub fn new(db: StateDB<DBType, (Vec<u8>, DBState)>) -> Self {
let write_queue = Arc::new(Mutex::new(Vec::new()));
Self { db, write_queue }
}
/// Gets the specified Accounts from the tree
pub fn get<'a>(
&self,
root: &[u8],
modified_accounts: &Vec<&'a Address>,
) -> Result<Vec<Option<(&'a Address, Account)>>, Box<Error>> {
let mut accounts = Vec::with_capacity(modified_accounts.len());
let root_node = self.db.get_node(root)?;
let mut node_map: HashMap<Vec<u8>, StateNode> = HashMap::new();
match root_node {
Some(node) => {
let account_split = self.split_keys(modified_accounts)?;
for (index, address) in account_split {
let node = self.traverse_nodes(&node, address, &mut node_map, index);
match node {
Ok(account) => {
accounts.push(account);
}
Err(e) => {
return Err(e);
}
}
}
}
None => {
return Err(Box::new(Exception::new("Root Node not found")));
}
}
Ok(accounts)
}
fn split_keys<'a>(
&self,
keys: &Vec<&'a Address>,
) -> Result<Vec<(usize, &'a Address)>, Box<Error>>
|
/// Inserts the specified accounts into the tree
pub fn insert<'a>(
&mut self,
root: Option<&[u8]>,
keys: Vec<&'a Address>,
values: &[Account],
) -> Result<Vec<u8>, Box<Error>> {
// encode accounts and insert to db
if keys.len() != values.len() {
return Err(Box::new(Exception::new(
"Keys and values have different lengths",
)));
}
let split_addresses = self.split_keys(&keys)?;
let mut node_map: BTreeMap<Vec<u8>, TreeNode> = BTreeMap::new();
let mut ref_map: HashSet<Vec<u8>> = HashSet::new();
// set root - empty or existing state and create base future
let mut root_node: TreeNode;
match root {
Some(root_hash) => {
if let Some(db_state) = self.db.get_node(root_hash)? {
if let Some(state_node) = db_state.node {
for child in state_node.node_refs.iter() {
ref_map.insert(child.1.child.clone());
}
root_node = TreeNode::new(
NodeType::Branch(state_node),
Vec::new(),
self.write_queue.clone(),
0,
);
} else {
return Err(Box::new(Exception::new("DB State is not a state node")));
}
} else {
return Err(Box::new(Exception::new("DB State not found")));
}
}
None => {
let state_node = StateNode::new(Vec::new());
root_node = TreeNode::new(
NodeType::Branch(state_node),
Vec::new(),
self.write_queue.clone(),
0,
);
}
}
let mut prev_split = 0;
// iterate inserts
for ((split, key), account) in split_addresses.iter().zip(values.iter()) {
let mut offset = *split;
let mut current_node: TreeNode;
let mut prev_offset: usize;
if offset > 0 {
let n = min(prev_split, offset);
if let Some(node) = node_map.get(&key[0..n]) {
current_node = node.clone();
if current_node.is_leaf() {
current_node.upgrade_to_branch()?;
node_map.insert(key[0..n].to_vec(), current_node.clone());
offset = n;
}
} else {
current_node = root_node.clone();
}
} else {
current_node = root_node.clone();
}
// set up to traverse states
let mut db_state: Option<DBState>;
if let Some(next_node) = current_node.get_next_node_location(key[offset]) {
let mut early_out = false;
for (i, loc) in next_node.node_location.iter().enumerate() {
if loc != &key[offset + i] {
early_out = true;
let new_account = *account;
let node_hash = hash(&new_account.encode().unwrap(), 32);
self.db
.insert(&node_hash, &DBState::new(Some(new_account), None, 1))?;
let node_ref = NodeRef {
node_location: key[offset + i..key.len()].to_vec(),
child: node_hash,
};
let mut new_node = next_node.clone();
new_node.node_location =
next_node.node_location[i..next_node.node_location.len()].to_vec();
let state_node = StateNode::new(vec![node_ref, new_node]);
let tree_node = TreeNode::new(
NodeType::Branch(state_node),
next_node.node_location[0..i].to_vec(),
self.write_queue.clone(),
offset,
);
node_map.insert(key[0..offset + i].to_vec(), tree_node);
prev_split = offset + i;
break;
}
}
if early_out {
continue;
} else {
prev_offset = offset;
offset = offset + next_node.node_location.len();
ref_map.remove(&next_node.child);
db_state = self.db.get_node(&next_node.child)?;
}
} else {
// Early out if branch is empty
let new_account = *account;
let tree_node = TreeNode::new(
NodeType::Leaf(new_account),
key[offset..key.len()].to_vec(),
self.write_queue.clone(),
offset,
);
node_map.insert(key[0..offset + 1].to_vec(), tree_node);
prev_split = offset + 1;
continue;
}
while let Some(state) = &db_state {
if let Some(_prev_account) = &state.account {
let new_account = *account;
let tree_node = TreeNode::new(
NodeType::Leaf(new_account),
key[prev_offset..key.len()].to_vec(),
self.write_queue.clone(),
prev_offset,
);
node_map.insert(key[0..prev_offset + 1].to_vec(), tree_node);
break;
} else if let Some(node) = &state.node {
for child in node.node_refs.iter() {
ref_map.insert(child.1.child.clone());
}
let tree_node = TreeNode::new(
NodeType::Branch(node.clone()),
key[prev_offset..offset].to_vec(),
self.write_queue.clone(),
offset - 1,
);
node_map.insert(key[0..offset].to_vec(), tree_node);
if let Some(node_ref) = node.node_refs.get(&key[offset]) {
// check key compression
let mut early_out = false;
for (i, loc) in node_ref.node_location.iter().enumerate() {
if loc != &key[offset + i] {
early_out = true;
let new_account = *account;
let node_hash = hash(&new_account.encode().unwrap(), 32);
self.db.insert(
&node_hash,
&DBState::new(Some(new_account), None, 1),
)?;
let new_node_ref = NodeRef {
node_location: key[offset + i..key.len()].to_vec(),
child: node_hash,
};
let mut new_node = node_ref.clone();
new_node.node_location = node_ref.node_location
[i..node_ref.node_location.len()]
.to_vec();
let state_node = StateNode::new(vec![new_node, new_node_ref]);
let tree_node = TreeNode::new(
NodeType::Branch(state_node),
node_ref.node_location[0..i].to_vec(),
self.write_queue.clone(),
offset,
);
node_map.insert(key[0..offset + i].to_vec(), tree_node);
break;
}
}
if early_out {
break;
}
if let Some(next_node) = self.db.get_node(&node_ref.child)? {
ref_map.remove(&node_ref.child);
if next_node.account.is_none() {
prev_offset = offset;
offset += node_ref.node_location.len();
db_state = Some(next_node);
continue;
} else {
let new_account = *account;
let tree_node = TreeNode::new(
NodeType::Leaf(new_account),
key[offset..key.len()].to_vec(),
self.write_queue.clone(),
offset,
);
node_map.insert(key[0..offset + 1].to_vec(), tree_node);
prev_split = offset;
db_state = None;
continue;
}
} else {
let new_account = *account;
let tree_node = TreeNode::new(
NodeType::Leaf(new_account),
key[offset..key.len()].to_vec(),
self.write_queue.clone(),
prev_offset,
);
node_map.insert(key[0..prev_offset + 1].to_vec(), tree_node);
prev_split = offset;
db_state = None;
continue;
}
} else {
let new_account = *account;
let tree_node = TreeNode::new(
NodeType::Leaf(new_account),
key[offset..key.len()].to_vec(),
self.write_queue.clone(),
prev_offset,
);
node_map.insert(key[0..prev_offset + 1].to_vec(), tree_node);
prev_split = offset;
db_state = None;
continue;
}
} else {
return Err(Box::new(Exception::new(
"Unable to find node, corrupted tree",
)));
// we got nothing
}
}
}
let cln = node_map.clone();
let mut futures = Vec::from_iter(cln.iter());
let mut curr_node: Option<(&Vec<u8>, &TreeNode)> = futures.pop();
while let Some(node) = &curr_node {
if let Some(removed_node) = node_map.remove(node.0) {
if removed_node.parent == 0 {
root_node.add_future(&removed_node);
} else if let Some(tree_node) = node_map.get_mut(&node.0[0..node.1.parent]) {
tree_node.add_future(&removed_node);
}
curr_node = futures.pop();
} else {
return Err(Box::new(Exception::new(
"Error constructing tree, cannot resolve futures",
)));
}
}
let tree_root = root_node.wait();
match tree_root {
Ok(root) => {
for (key, value) in self.write_queue.lock().unwrap().iter() {
self.db.insert(&key, &value)?;
}
self.write_queue.lock().unwrap().clear();
self.update_refs(ref_map)?;
self.db.batch_write()?;
Ok(root.child)
}
Err(_e) => Err(Box::new(Exception::new("Error generating new root"))),
}
}
/// Removes a root and all sub nodes that have a Zero reference count
pub fn remove(&mut self, root: &[u8]) -> Result<(), Box<Error>> {
let mut pending_keys: Vec<Vec<u8>> = Vec::new();
let mut db_state = self.db.get_node(root)?;
let mut key = root.to_vec();
while let Some(state) = &db_state {
if state.ref_count == 1 {
self.db.remove(&key)?;
match &state.node {
Some(node) => {
for node_ref in &node.node_refs {
pending_keys.push(node_ref.1.child.clone());
}
}
None => {}
}
} else {
let mut new_state = state.clone();
new_state.ref_count -= 1;
self.db.insert(&key, &new_state)?;
}
if let Some(next_key) = pending_keys.pop() {
key = next_key;
db_state = self.db.get_node(&key)?;
} else {
break;
}
}
self.db.batch_write()?;
Ok(())
}
fn traverse_nodes<'a>(
&self,
root: &DBState,
address: &'a Address,
map: &mut HashMap<Vec<u8>, StateNode>,
split: usize,
) -> Result<Option<(&'a Address, Account)>, Box<Error>> {
let mut state: Option<DBState> = Some(root.clone());;
let mut offset = split;
if offset > 0 {
if let Some(node) = map.get(&address[0..offset]) {
if let Some(node_ref) = node.node_refs.get(&address[offset]) {
if let Some(next_node) = self.db.get_node(&node_ref.child)? {
offset += node_ref.node_location.len();
state = Some(next_node);
} else {
return Err(Box::new(Exception::new(&format!(
"Unable to find node {:?}, corrupted tree",
&node_ref.child
))));
}
}
}
}
while let Some(db_state) = &state {
if let Some(account) = &db_state.account {
return Ok(Some((address, *account)));
//we have an account
} else if let Some(node) = &db_state.node {
map.insert(address[0..offset].to_vec(), node.clone());
if let Some(node_ref) = node.node_refs.get(&address[offset]) {
if let Some(next_node) = self.db.get_node(&node_ref.child)? {
offset += node_ref.node_location.len();
state = Some(next_node);
continue;
} else {
// can't find child node in db error
return Err(Box::new(Exception::new(&format!(
"Unable to find node {:?}, corrupted tree",
&node_ref.child
))));
}
} else {
return Ok(None);
//Account does not exist yet
}
} else {
return Ok(None);
// we got nothing
}
}
Ok(None)
}
//Naive Implementation to test key identification logic
fn update_refs(&mut self, nodes: HashSet<Vec<u8>>) -> Result<(), Box<Error>> {
let refs: Vec<Vec<u8>> = nodes.iter().cloned().collect();
for node in refs {
if let Some(mut db_state) = self.db.get_node(&node)? {
db_state.ref_count = db_state.ref_count + 1;
self.db.insert(&node, &db_state)?;
} else {
return Err(Box::new(Exception::new(
"Could not increment reference count",
)));
}
}
Ok(())
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::account::account::Account;
use crate::account::node_ref::NodeRef;
use crate::common::exodus_block::ExodusBlock;
use crate::database::mock::RocksDBMock;
use crate::traits::{Decode, Encode};
use crate::traits::{Transaction, ValidAddress};
use crate::util::hash::hash;
use rand::{thread_rng, Rng};
use std::env;
use std::fs::File;
use std::io::Read;
use std::path::PathBuf;
#[test]
fn it_gets_items_from_a_tree_of_depth_1() {
let path = PathBuf::new();
let mut state_db: StateDB<RocksDBMock> = StateDB::new(path, None).unwrap();
let mut accounts: Vec<NodeRef> = Vec::with_capacity(256);
for i in 0..255 {
let db_state = DBState::new(
Some(Account {
balance: i * 100,
nonce: i as u32,
}),
None,
1,
);
let hash = hash(db_state.encode().unwrap().as_ref(), 32);
let _ = state_db.insert(&hash, &db_state);
let location = vec![
i as u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
let node_ref = NodeRef::new(&location, &hash);
accounts.push(node_ref);
}
let state_node = StateNode::new(accounts);
let state_hash = hash(state_node.encode().unwrap().as_ref(), 32);
let db_state = DBState::new(None, Some(state_node), 1);
let _ = state_db.insert(&state_hash, &db_state);
let _ = state_db.batch_write();
let legacy_trie = LegacyTrie::new(state_db);
let returned_accounts = legacy_trie.get(
&state_hash,
&vec![
&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
&[12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
],
);
match returned_accounts {
Ok(vec) => {
assert_eq!(vec.len(), 2);
if let Some(account) = &vec[0] {
assert_eq!(account.1.balance, 0);
assert_eq!(account.1.nonce, 0);
} else {
println!("Node not found");
unimplemented!()
}
if let Some(account) = &vec[1] {
assert_eq!(account.1.balance, 1200);
assert_eq!(account.1.nonce, 12);
} else {
println!("Node not found");
unimplemented!()
}
}
Err(e) => {
println!("Error: {:?}", e);
unimplemented!()
}
}
}
#[test]
fn it_gets_an_item_from_a_depth_greater_than_one() {
let path = PathBuf::new();
let mut state_db: StateDB<RocksDBMock> = StateDB::new(path, None).unwrap();
let first_account = DBState::new(
Some(Account {
balance: 100,
nonce: 1,
}),
None,
1,
);
let first_hash = hash(first_account.encode().unwrap().as_ref(), 32);
let _ = state_db.insert(&first_hash, &first_account);
let first_location = vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let first_node_ref = NodeRef::new(&first_location, &first_hash);
let second_account = DBState::new(
Some(Account {
balance: 200,
nonce: 2,
}),
None,
1,
);
let second_hash = hash(second_account.encode().unwrap().as_ref(), 32);
let _ = state_db.insert(&second_hash, &second_account);
let second_location = vec![1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let second_node_ref = NodeRef::new(&second_location, &second_hash);
let third_account = DBState::new(
Some(Account {
balance: 300,
nonce: 3,
}),
None,
1,
);
let third_hash = hash(third_account.encode().unwrap().as_ref(), 32);
let _ = state_db.insert(&third_hash, &third_account);
let third_location = vec![1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let third_node_ref = NodeRef::new(&third_location, &third_hash);
let second_level_refs = vec![first_node_ref, second_node_ref];
let second_state_node = StateNode::new(second_level_refs);
let second_state_node_state = DBState::new(None, Some(second_state_node), 1);
let second_state_node_hash = hash(&second_state_node_state.encode().unwrap(), 32);
let _ = state_db.insert(&second_state_node_hash, &second_state_node_state);
let first_level_node = NodeRef::new(&vec![0], &second_state_node_hash);
let root_node_refs = vec![first_level_node, third_node_ref];
let root_state_node = StateNode::new(root_node_refs);
let root_db_state = DBState::new(None, Some(root_state_node), 1);
let root_hash = hash(&root_db_state.encode().unwrap(), 32);
let _ = state_db.insert(&root_hash, &root_db_state);
let _ = state_db.batch_write();
let tree = LegacyTrie::new(state_db);
let addresses = vec![
&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
&[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
&[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
];
let returned_accounts = tree.get(&root_hash, &addresses);
match returned_accounts {
Ok(vec) => {
assert_eq!(vec.len(), 3);
// check integrity of returned accounts
for i in 0..vec.len() {
match &vec[i] {
Some(account) => {
assert_eq!(account.1.balance as usize, (i + 1) * 100);
assert_eq!(account.1.nonce as usize, i + 1);
}
None => unimplemented!(),
}
}
}
Err(e) => {
println!("Error: {:?}", e);
unimplemented!()
}
}
}
#[test]
fn it_gets_from_a_tree_with_compressed_branches() {
let path = PathBuf::new();
let mut state_db: StateDB<RocksDBMock> = StateDB::new(path, None).unwrap();
let first_account = DBState::new(
Some(Account {
balance: 100,
nonce: 1,
}),
None,
1,
);
let first_hash = hash(first_account.encode().unwrap().as_ref(), 32);
let _ = state_db.insert(&first_hash, &first_account);
let first_location = vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let first_node_ref = NodeRef::new(&first_location, &first_hash);
let second_account = DBState::new(
Some(Account {
balance: 200,
nonce: 2,
}),
None,
1,
);
let second_hash = hash(second_account.encode().unwrap().as_ref(), 32);
let _ = state_db.insert(&second_hash, &second_account);
let second_location = vec![1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let second_node_ref = NodeRef::new(&second_location, &second_hash);
let third_account = DBState::new(
Some(Account {
balance: 300,
nonce: 3,
}),
None,
1,
);
let third_hash = hash(third_account.encode().unwrap().as_ref(), 32);
let _ = state_db.insert(&third_hash, &third_account);
let third_location = vec![1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let third_node_ref = NodeRef::new(&third_location, &third_hash);
let second_level_refs = vec![first_node_ref, second_node_ref];
let second_state_node = StateNode::new(second_level_refs);
let second_state_node_state = DBState::new(None, Some(second_state_node), 1);
let second_state_node_hash = hash(&second_state_node_state.encode().unwrap(), 32);
let _ = state_db.insert(&second_state_node_hash, &second_state_node_state);
let first_level_node = NodeRef::new(&vec![0, 0, 0, 0, 0], &second_state_node_hash);
let root_node_refs = vec![first_level_node, third_node_ref];
let root_state_node = StateNode::new(root_node_refs);
let root_db_state = DBState::new(None, Some(root_state_node), 1);
let root_hash = hash(&root_db_state.encode().unwrap(), 32);
let _ = state_db.insert(&root_hash, &root_db_state);
let _ = state_db.batch_write();
let tree = LegacyTrie::new(state_db);
let addresses = vec![
&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
&[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
&[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
];
let returned_accounts = tree.get(&root_hash, &addresses);
match returned_accounts {
Ok(vec) => {
assert_eq!(vec.len(), 3);
println!("accounts: {:?}", vec);
// check integrity of returned accounts
for i in 0..vec.len() {
match &vec[i] {
Some(account) => {
assert_eq!(account.1.balance as usize, (i + 1) * 100);
assert_eq!(account.1.nonce as usize, i + 1);
}
None => unimplemented!(),
}
}
}
Err(e) => {
println!("Error: {:?}", e);
unimplemented!()
}
}
}
#[test]
fn it_calculates_the_split_points_for_keys() {
let path = PathBuf::new();
let state_db: StateDB<RocksDBMock> = StateDB::new(path, None).unwrap();
let trie = LegacyTrie::new(state_db);
let address_bytes = vec![
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
];
let mut addresses: Vec<&Address> = Vec::new();
for address in address_bytes.iter() {
addresses.push(address);
}
let split_addresses = trie.split_keys(&addresses).unwrap();
assert_eq!(split_addresses.len(), 3);
assert_eq!(split_addresses[0].0, 0);
assert_eq!(split_addresses[1].0, 1);
assert_eq!(split_addresses[2].0, 0);
}
#[test]
fn it_inserts_256_keys_with_different_first_bytes_into_empty_tree_and_retrieves_them() {
let path = PathBuf::new();
let state_db: StateDB<RocksDBMock> = StateDB::new(path, None).unwrap();
let mut tree = LegacyTrie::new(state_db);
let mut accounts = Vec::with_capacity(256);
let mut addresses = Vec::with_capacity(256);
for i in 0..256 {
let account = Account {
balance: i * 100,
nonce: i as u32,
};
let address = Address::from([
i as u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
]);
accounts.push(account);
addresses.push(address);
}
let mut add_refs = Vec::with_capacity(256);
for add in addresses.iter() {
add_refs.push(add)
}
let root_hash = tree.insert(None, add_refs.clone(), &accounts).unwrap();
let retrieved_accounts = tree.get(&root_hash, &add_refs).unwrap();
assert_eq!(retrieved_accounts.len(), 256);
for (i, (opt, original_address)) in
retrieved_accounts.iter().zip(addresses.iter()).enumerate()
{
assert!(opt.is_some());
match opt {
Some((add, account)) => {
assert_eq!(add, &original_address);
assert_eq!(account.balance, (i * 100) as u64);
assert_eq!(account.nonce, i as u32);
}
None => {}
}
}
}
#[test]
fn it_inserts_and_retrieves_a_key_from_an_existing_tree() {
let path = PathBuf::new();
let mut state_db: StateDB<RocksDBMock> = StateDB::new(path, None).unwrap();
let first_account = DBState::new(
Some(Account {
balance: 100,
nonce: 1,
}),
None,
1,
);
let first_hash = hash(first_account.encode().unwrap().as_ref(), 32);
let _ = state_db.insert(&first_hash, &first_account);
let first_location = vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let first_node_ref = NodeRef::new(&first_location, &first_hash);
let second_account = DBState::new(
Some(Account {
balance: 200,
nonce: 2,
}),
None,
1,
);
let second_hash = hash(second_account.encode().unwrap().as_ref(), 32);
let _ = state_db.insert(&second_hash, &second_account);
let second_location = vec![1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let second_node_ref = NodeRef::new(&second_location, &second_hash);
let third_account = DBState::new(
Some(Account {
balance: 300,
nonce: 3,
}),
None,
1,
);
let third_hash = hash(third_account.encode().unwrap().as_ref(), 32);
let _ = state_db.insert(&third_hash, &third_account);
let third_location = vec![1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let third_node_ref = NodeRef::new(&third_location, &third_hash);
let second_level_refs = vec![first_node_ref, second_node_ref];
let second_state_node = StateNode::new(second_level_refs);
let second_state_node_state = DBState::new(None, Some(second_state_node), 1);
let second_state_node_hash = hash(&second_state_node_state.encode().unwrap(), 32);
let _ = state_db.insert(&second_state_node_hash, &second_state_node_state);
let first_level_node = NodeRef::new(&vec![0], &second_state_node_hash);
let root_node_refs = vec![first_level_node, third_node_ref];
let root_state_node = StateNode::new(root_node_refs);
let root_db_state = DBState::new(None, Some(root_state_node), 1);
let root_hash = hash(&root_db_state.encode().unwrap(), 32);
let _ = state_db.insert(&root_hash, &root_db_state);
let _ = state_db.batch_write();
let mut tree = LegacyTrie::new(state_db);
let account = Account {
balance: 500,
nonce: 2,
};
let address_bytes = vec![
&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
&[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
&[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
];
let mut addresses: Vec<&Address> = Vec::new();
for address in address_bytes.iter() {
addresses.push(address);
}
let accounts = vec![account.clone(), account.clone(), account.clone()];
let result = tree.insert(Some(&root_hash), address_bytes.clone(), &accounts);
let new_root = result.unwrap();
assert_ne!(&new_root, &root_hash);
let returned_accounts = tree.get(&new_root, &addresses);
match returned_accounts {
Ok(vec) => {
assert_eq!(vec.len(), 3);
// check integrity of returned accounts
for i in 0..vec.len() {
match &vec[i] {
Some(account) => {
assert_eq!(account.1.balance as usize, 500);
assert_eq!(account.1.nonce as usize, 2);
}
None => unimplemented!(),
}
}
}
Err(e) => {
println!("Error: {:?}", e);
unimplemented!()
}
}
}
#[test]
fn it_inserts_a_node_into_a_compressed_branch() {
let path = PathBuf::new();
let state_db: StateDB<RocksDBMock> = StateDB::new(path, None).unwrap();
let mut tree = LegacyTrie::new(state_db);
let address_bytes = vec![
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 4, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
];
let mut addresses: Vec<&Address> = Vec::new();
for address in address_bytes.iter() {
addresses.push(address);
}
addresses.sort();
let mut account_vec = Vec::with_capacity(4);
for i in 1..8 {
let account = Account {
balance: i * 100,
nonce: i as u32,
};
account_vec.push(account);
}
let result = tree.insert(None, addresses.clone(), &account_vec);
let new_root = result.unwrap();
let accounts = tree.get(&new_root, &addresses).unwrap();
assert_eq!(accounts.len(), 7);
for (i, (opt, original_address)) in accounts.iter().zip(addresses.iter()).enumerate() {
assert!(opt.is_some());
match opt {
Some((add, account)) => {
assert_eq!(&add, &original_address);
assert_eq!(account.balance, ((i + 1) * 100) as u64);
assert_eq!(account.nonce, (i + 1) as u32);
}
None => {}
}
}
let new_address =
Address::from_bytes(&[0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
let new_account = Account::new(1100, 1);
let vec = vec![new_account];
let new_result = tree
.insert(Some(&new_root), vec![&new_address], &vec)
.unwrap();
addresses.push(&new_address);
addresses.sort();
let accounts = tree.get(&new_result, &addresses).unwrap();
assert_eq!(accounts.len(), 8);
for (i, (opt, original_address)) in accounts.iter().zip(addresses.iter()).enumerate() {
assert!(opt.is_some());
match opt {
Some((add, account)) => {
assert_eq!(&add, &original_address);
if i == 3 {
assert_eq!(account.balance, 1100);
assert_eq!(account.nonce, 1);
}
}
None => {}
}
}
}
#[test]
fn it_matches_typescript_world_state_for_exodus_block() {
let mut path = env::current_dir().unwrap();
path.push("data/exodusBlock.dat");
let mut exodus_file = File::open(path).unwrap();
let mut exodus_buf = Vec::new();
exodus_file.read_to_end(&mut exodus_buf).unwrap();
let exodus = ExodusBlock::decode(&exodus_buf).unwrap();
let mut keypairs: Vec<(Address, Account)> = Vec::with_capacity(12000);
let mut addresses: Vec<&Address> = Vec::with_capacity(12000);
let mut accounts: Vec<Account> = Vec::with_capacity(12000);
match &exodus.txs {
Some(tx_vec) => {
for tx in tx_vec {
let amount: u64 = tx.get_amount();
let nonce: u32;
if let Some(tx_nonce) = tx.get_nonce() {
nonce = tx_nonce;
} else {
break;
}
if let Some(add) = tx.get_to() {
keypairs.push((add, Account::new(amount, nonce)));
} else {
break;
}
}
}
None => {}
}
keypairs.sort_by(|a, b| a.0.cmp(&b.0));
for (key, value) in keypairs.iter() {
addresses.push(&key);
accounts.push(*value);
}
let db_path = PathBuf::new();
let state_db: StateDB<RocksDBMock> = StateDB::new(db_path, None).unwrap();
let mut tree = LegacyTrie::new(state_db);
let root = tree.insert(None, addresses.clone(), &accounts).unwrap();
let expected_root = vec![
202, 69, 158, 107, 102, 235, 159, 245, 39, 221, 20, 207, 134, 180, 208, 199, 131, 45,
190, 90, 112, 243, 240, 108, 135, 97, 169, 165, 102, 78, 15, 252,
];
assert_eq!(root, expected_root);
let retrieved = tree.get(&root, &addresses).unwrap();
for (ret, keypair) in retrieved.iter().zip(keypairs.iter()) {
assert!(ret.is_some());
match ret {
Some((add, account)) => {
assert_eq!(add, &&keypair.0);
assert_eq!(account.balance, keypair.1.balance);
assert_eq!(account.nonce, keypair.1.nonce);
}
None => {}
}
}
}
#[test]
fn it_increments_the_reference_count_for_untraversed_branches_and_prunes() {
let db_path = PathBuf::new();
let state_db: StateDB<RocksDBMock> = StateDB::new(db_path, None).unwrap();
let mut tree = LegacyTrie::new(state_db);
let address_bytes = vec![
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 4, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
];
let mut addresses: Vec<&Address> = Vec::new();
for address in address_bytes.iter() {
addresses.push(&address);
}
addresses.sort();
let mut account_vec = Vec::with_capacity(4);
for i in 1..8 {
let account = Account {
balance: i * 100,
nonce: i as u32,
};
account_vec.push(account);
}
let result = tree.insert(None, addresses.clone(), &account_vec);
let new_root = result.unwrap();
let new_address =
Address::from_bytes(&[0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
let new_account = Account::new(1100, 1);
let vec = vec![new_account];
let new_result = tree
.insert(Some(&new_root), vec![&new_address], &vec)
.unwrap();
addresses.push(&new_address);
addresses.sort();
let _ = tree.remove(&new_root);
assert!(tree.db.get_node(&new_root).is_err());
let accounts = tree.get(&new_result, &addresses).unwrap();
assert_eq!(accounts.len(), 8);
for (i, (opt, original_address)) in accounts.iter().zip(addresses.iter()).enumerate() {
assert!(opt.is_some());
match opt {
Some((add, account)) => {
assert_eq!(&add, &original_address);
if i == 3 {
assert_eq!(account.balance, 1100);
assert_eq!(account.nonce, 1);
}
}
None => {}
}
}
}
#[test]
fn it_can_prune_a_real_tree_after_numerous_inserts() {
let db_path = PathBuf::new();
let state_db: StateDB<RocksDBMock> = StateDB::new(db_path, None).unwrap();
let mut tree = LegacyTrie::new(state_db);
let (root, addresses) = initiate_exodus_state::<RocksDBMock>(&mut tree);
let mut address_refs = Vec::with_capacity(addresses.len());
for add in addresses.iter() {
address_refs.push(add);
}
let mut rng = thread_rng();
let mut current_root = root;
let mut roots = vec![current_root.clone()];
for _ in 0..100 {
let num = rng.gen_range(1, 100);
let mut changed_addresses = select_random_accounts(&mut rng, &addresses, num);
changed_addresses.sort();
let changed_accounts = tree.get(¤t_root, &changed_addresses).unwrap();
let mut updated_accounts = Vec::with_capacity(changed_accounts.len());
for modified in changed_accounts {
if let Some(modified_account) = modified {
let mut new_account = modified_account.1;
new_account.balance += 100;
new_account.nonce += 1;
updated_accounts.push(new_account);
}
}
let new_root = tree
.insert(
Some(¤t_root),
changed_addresses.clone(),
&updated_accounts,
)
.unwrap();
roots.push(new_root.clone());
current_root = new_root;
}
let retrieved = tree.get(¤t_root, &address_refs);
assert!(retrieved.is_ok());
let last_root = roots.pop();
assert_eq!(Some(current_root.clone()), last_root);
assert_eq!(roots.len(), 100);
while roots.len() > 0 {
let removed = roots.remove(0);
let _ = tree.remove(&removed);
}
let post_prune = tree.get(¤t_root, &address_refs);
assert!(post_prune.is_ok());
}
#[test]
fn it_can_update_accounts_correctly_in_real_tree() {
let address_bytes = vec![
[
0, 184, 45, 82, 76, 76, 245, 63, 56, 195, 39, 82, 177, 210, 89, 69, 92, 228, 154,
180,
],
[
4, 202, 92, 91, 92, 23, 108, 236, 249, 159, 71, 113, 1, 152, 195, 240, 110, 23,
160, 110,
],
[
9, 118, 136, 113, 189, 135, 84, 215, 3, 184, 12, 46, 239, 216, 52, 53, 222, 227,
195, 220,
],
[
25, 8, 48, 4, 188, 150, 120, 105, 88, 199, 220, 50, 253, 143, 241, 100, 229, 188,
8, 112,
],
[
44, 145, 42, 145, 209, 34, 118, 224, 204, 77, 85, 90, 249, 11, 239, 74, 44, 200,
144, 9,
],
[
56, 196, 213, 249, 206, 148, 250, 50, 161, 52, 165, 157, 98, 191, 230, 56, 52, 107,
18, 139,
],
[
62, 185, 33, 237, 70, 140, 178, 215, 224, 176, 42, 225, 227, 0, 98, 119, 74, 9,
120, 252,
],
[
63, 86, 219, 226, 161, 32, 122, 181, 254, 227, 42, 47, 135, 17, 0, 253, 164, 1, 99,
0,
],
[
63, 183, 251, 131, 137, 192, 126, 94, 45, 127, 39, 169, 31, 162, 233, 122, 239, 52,
103, 115,
],
[
67, 70, 229, 239, 103, 112, 35, 243, 228, 118, 71, 63, 151, 213, 147, 50, 55, 89,
209, 63,
],
[
108, 158, 174, 104, 242, 89, 239, 32, 143, 191, 194, 138, 252, 100, 19, 213, 223,
87, 53, 48,
],
[
129, 165, 238, 87, 79, 108, 189, 29, 42, 81, 80, 232, 120, 171, 25, 118, 64, 126,
213, 146,
],
[
131, 255, 82, 147, 112, 87, 197, 56, 225, 27, 30, 108, 233, 121, 109, 214, 190, 92,
56, 8,
],
[
132, 103, 151, 195, 214, 130, 244, 164, 37, 2, 69, 140, 5, 147, 11, 43, 98, 132,
163, 44,
],
[
136, 197, 141, 75, 34, 168, 185, 128, 21, 147, 51, 42, 91, 216, 77, 68, 216, 80,
20, 236,
],
[
139, 130, 218, 106, 46, 221, 103, 113, 145, 61, 143, 69, 112, 16, 213, 217, 92,
229, 240, 61,
],
[
151, 152, 0, 10, 65, 100, 118, 241, 81, 181, 216, 17, 106, 138, 87, 14, 221, 88,
249, 34,
],
[
158, 74, 97, 253, 98, 14, 44, 65, 80, 115, 183, 31, 95, 204, 163, 71, 247, 173, 44,
33,
],
[
169, 79, 124, 116, 142, 150, 242, 176, 150, 207, 205, 166, 243, 181, 188, 97, 5,
51, 177, 63,
],
[
196, 45, 34, 70, 209, 31, 153, 190, 236, 126, 230, 238, 218, 34, 9, 2, 16, 60, 19,
171,
],
[
201, 73, 63, 7, 121, 164, 32, 70, 54, 107, 114, 67, 180, 5, 200, 249, 30, 46, 28,
43,
],
[
204, 103, 211, 92, 186, 80, 52, 51, 242, 20, 172, 205, 154, 33, 226, 226, 50, 83,
244, 181,
],
[
207, 226, 50, 111, 243, 225, 57, 190, 79, 38, 62, 168, 89, 238, 56, 204, 25, 124,
194, 19,
],
[
251, 89, 230, 192, 154, 85, 91, 111, 230, 209, 27, 205, 83, 35, 238, 48, 233, 4,
136, 168,
],
[
255, 237, 6, 60, 87, 23, 97, 249, 138, 248, 127, 149, 40, 191, 2, 123, 54, 129, 2,
198,
],
];
let mut addresses: Vec<&Address> = Vec::new();
for address in address_bytes.iter() {
addresses.push(&address);
}
addresses.sort();
let mut account_vec = Vec::with_capacity(4);
for i in 1..addresses.len() + 1 {
let account = Account {
balance: i as u64 * 100,
nonce: i as u32,
};
account_vec.push(account);
}
let db_path = PathBuf::new();
let state_db: StateDB<RocksDBMock> = StateDB::new(db_path, None).unwrap();
let mut tree = LegacyTrie::new(state_db);
let (root, _) = initiate_exodus_state(&mut tree);
let new_root = tree.insert(Some(&root), addresses, &account_vec);
println!("Root: {:?}", new_root);
}
// Helper Functions for easier construction of tests
fn select_random_accounts<'a>(
rng: &mut rand::prelude::ThreadRng,
accounts: &'a Vec<Address>,
number: usize,
) -> Vec<&'a Address> {
assert!(number <= accounts.len());
let mut add_set: BTreeMap<&Address, ()> = BTreeMap::new();
let mut address_vec = Vec::with_capacity(number);
for _ in 0..number {
let index = rng.gen_range(0, accounts.len());
add_set.insert(&accounts[index], ());
}
for add in add_set {
address_vec.push(add.0);
}
address_vec
}
fn initiate_exodus_state<'a, T>(tree: &mut LegacyTrie<T>) -> (Vec<u8>, Vec<Address>)
where
T: IDB,
{
let mut path = env::current_dir().unwrap();
path.push("data/exodusBlock.dat");
let mut exodus_file = File::open(path).unwrap();
let mut exodus_buf = Vec::new();
exodus_file.read_to_end(&mut exodus_buf).unwrap();
let exodus = ExodusBlock::decode(&exodus_buf).unwrap();
let mut keypairs: Box<Vec<(Address, Account)>> = Box::new(Vec::with_capacity(12000));
let mut addresses: Vec<Address> = Vec::with_capacity(12000);
let mut accounts: Vec<Account> = Vec::with_capacity(12000);
match &exodus.txs {
Some(tx_vec) => {
for tx in tx_vec {
let amount: u64 = tx.get_amount();
let nonce: u32;
if let Some(tx_nonce) = tx.get_nonce() {
nonce = tx_nonce;
} else {
break;
}
if let Some(add) = tx.get_to() {
keypairs.push((add, Account::new(amount, nonce)));
} else {
break;
}
}
}
None => {}
}
let mut address_refs = Vec::with_capacity(keypairs.len());
keypairs.sort_by(|a, b| a.0.cmp(&b.0));
for (key, value) in keypairs.iter() {
let k = key.clone();
address_refs.push(key);
addresses.push(k);
accounts.push(*value);
}
addresses.sort();
(
tree.insert(None, address_refs, &accounts).unwrap(),
addresses,
)
}
}
|
{
if keys.is_empty() {
return Err(Box::new(Exception::new("No keys provided")));
}
let mut splits: Vec<(usize, &Address)> = Vec::with_capacity(keys.len());
for i in 0..keys.len() {
if i == 0 {
splits.push((0, &keys[i]))
} else {
for j in 0..19 {
if keys[i - 1][j] != keys[i][j] {
splits.push((j, &keys[i]));
break;
}
}
}
}
Ok(splits)
}
|
getQueryPack.go
|
// *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package operationalinsights
import (
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
// An Log Analytics QueryPack definition.
// API Version: 2019-09-01-preview.
func LookupQueryPack(ctx *pulumi.Context, args *LookupQueryPackArgs, opts ...pulumi.InvokeOption) (*LookupQueryPackResult, error) {
var rv LookupQueryPackResult
err := ctx.Invoke("azure-native:operationalinsights:getQueryPack", args, &rv, opts...)
if err != nil
|
return &rv, nil
}
type LookupQueryPackArgs struct {
// The name of the Log Analytics QueryPack resource.
QueryPackName string `pulumi:"queryPackName"`
// The name of the resource group. The name is case insensitive.
ResourceGroupName string `pulumi:"resourceGroupName"`
}
// An Log Analytics QueryPack definition.
type LookupQueryPackResult struct {
// Azure resource Id
Id string `pulumi:"id"`
// Resource location
Location string `pulumi:"location"`
// Azure resource name
Name string `pulumi:"name"`
// Current state of this QueryPack: whether or not is has been provisioned within the resource group it is defined. Users cannot change this value but are able to read from it. Values will include Succeeded, Deploying, Canceled, and Failed.
ProvisioningState string `pulumi:"provisioningState"`
// The unique ID of your application. This field cannot be changed.
QueryPackId string `pulumi:"queryPackId"`
// Resource tags
Tags map[string]string `pulumi:"tags"`
// Creation Date for the Log Analytics QueryPack, in ISO 8601 format.
TimeCreated string `pulumi:"timeCreated"`
// Last modified date of the Log Analytics QueryPack, in ISO 8601 format.
TimeModified string `pulumi:"timeModified"`
// Azure resource type
Type string `pulumi:"type"`
}
|
{
return nil, err
}
|
utils.py
|
from PIL import ImageChops, Image as PILImage
from http.client import HTTPConnection
from time import sleep
from traceback import format_stack, print_exc
def Tint(image, color):
return ImageChops.blend(image, PILImage.new('RGB', image.size, color), 0.36)
def GetStatusCode(host, path="/"):
""" This function retreives the status code of a website by requesting
HEAD data from the host. This means that it only requests the headers.
If the host cannot be reached or something else goes wrong, it returns
None instead.
"""
try:
conn = HTTPConnection(host)
conn.request("HEAD", path)
return conn.getresponse().status
except Exception:
|
return None
def WaitOK(host, path="/"):
while GetStatusCode(host, path) != 200:
sleep(5)
| |
user.component.ts
|
import { Component, OnDestroy, OnInit, ViewEncapsulation } from "@angular/core";
import { AngularFireStorage } from "@angular/fire/storage";
import { FormBuilder, FormGroup } from "@angular/forms";
import { DomSanitizer, SafeStyle } from "@angular/platform-browser";
import { Subscription } from "rxjs";
import { concatMap, finalize } from "rxjs/operators";
import { User } from "../../services/auth/auth.service";
import { ToastService } from "../../services/toast/toast.service";
import { UserDatabase, UsersService } from "../../services/users/users.service";
import {
acountUser,
countryCode,
paymentMethods,
statusUser,
transformCountry,
} from "./variablesUser";
@Component({
selector: "app-user",
templateUrl: "./user.component.html",
styleUrls: ["./user.component.scss"],
encapsulation: ViewEncapsulation.None,
})
export class
|
implements OnInit, OnDestroy {
localStoreUser: User;
isLoading = false;
error: string = null;
profileForm: FormGroup;
previewUrl: string | ArrayBuffer;
fileInput: File;
items: { Code: string; Name: string }[] = transformCountry(countryCode);
paymentMethods = paymentMethods;
statusUser = statusUser;
acountUser = acountUser
userDatabase: UserDatabase;
filePath: string;
// unsubscribe
userDatabaseSub: Subscription;
usersServiceSub: Subscription;
uploadFileAndUpdateUserSub: Subscription;
updateUserSub: Subscription;
storageSub: Subscription;
//
constructor(
private fb: FormBuilder,
private usersService: UsersService,
private storage: AngularFireStorage,
public sanitizer: DomSanitizer,
public toastService: ToastService
) {
this.localStoreUser = JSON.parse(localStorage.getItem("userData"));
this.filePath = `avatars/${this.localStoreUser.email}`;
}
ngOnInit() {
this.usersServiceSub =
this.usersService.currentUserDatabaseSubject.subscribe(
(userDatabase: UserDatabase) => {
this.userDatabase = userDatabase;
this.previewUrl = this.userDatabase ? this.userDatabase.avatar : "";
this.buildForm();
}
);
this.buildForm();
}
buildForm() {
this.profileForm = this.fb.group({
avatar: [null],
status: [this.userDatabase?.status ?? statusUser[0]],
username: [this.userDatabase?.displayName ?? ""],
country: [this.userDatabase?.country ?? ""],
paymentMethod: [this.userDatabase?.paymentMethod ?? ""],
});
}
onSelectFile(event) {
if (event.target.files && event.target.files[0]) {
this.fileInput = event.target.files[0];
var reader = new FileReader();
reader.readAsDataURL(event.target.files[0]); // read file as data url
reader.onload = (event) => {
this.previewUrl = event.target.result; // called once readAsDataURL is completed
};
}
}
onSubmit(form: FormGroup) {
const newUser: UserDatabase = {
...this.userDatabase,
displayName: form.value.username,
country: form.value.country,
paymentMethod: form.value.paymentMethod,
status: form.value.status,
};
if (this.fileInput) {
this.uploadFileAndUpdateUserSub = this.uploadFileAndUpdateUser(
this.fileInput,
newUser
);
} else {
this.updateUserSub = this.updateUser(newUser);
}
}
uploadFileAndUpdateUser(file: File, user: UserDatabase) {
return this.storage
.upload(this.filePath, file)
.snapshotChanges()
.pipe(
finalize(() => {
this.storageSub = this.storage
.ref(this.filePath)
.getDownloadURL()
.pipe(
concatMap((downloadURL: any) => {
user.avatar = downloadURL;
return this.usersService.updateUser(
this.userDatabase.databaseId,
user
);
})
)
.subscribe((data: UserDatabase) => {
this.showToast(
"Save Profile Finish",
"bg-success",
"toast-bottom-right"
);
this.usersService.currentUserDatabaseSubject.next(data);
this.buildForm();
});
})
)
.subscribe();
}
updateUser(user: UserDatabase) {
return this.usersService
.updateUser(this.userDatabase.databaseId, user)
.subscribe((data: UserDatabase) => {
this.showToast(
"Save Profile Finish",
"bg-success",
"toast-bottom-right"
);
this.usersService.currentUserDatabaseSubject.next(data);
this.buildForm();
});
}
showToast(mess: string, style: string, position: string) {
this.toastService.show(mess, {
classname: `${style} text-light ${position}`,
});
}
onReset() {
this.buildForm();
this.previewUrl = this.userDatabase.avatar;
}
style(data: string): SafeStyle {
return this.sanitizer.bypassSecurityTrustStyle(data);
}
ngOnDestroy() {
this.usersServiceSub && this.usersServiceSub.unsubscribe();
this.uploadFileAndUpdateUserSub &&
this.uploadFileAndUpdateUserSub.unsubscribe();
this.updateUserSub && this.updateUserSub.unsubscribe();
this.storageSub && this.storageSub.unsubscribe();
}
}
|
UserComponent
|
ClassAndModuleThatMergeWithModulesExportedStaticFunctionUsingClassPrivateStatics_es5.1.normal.js
|
import _class_call_check from "@swc/helpers/lib/_class_call_check.js";
var clodule = /*#__PURE__*/ function() {
"use strict";
function
|
() {
_class_call_check(this, clodule);
}
clodule.sfn = function sfn(id) {
return 42;
};
return clodule;
}();
(function(clodule1) {
var fn = function fn(x, y) {
return clodule.sfn("a");
};
clodule1.fn = fn;
})(clodule || (clodule = {}));
|
clodule
|
MetricsService.ts
|
import type {
ActionServiceBase,
ExtractActionTypeDefinitions,
PickActionFunctions,
} from "$common/ServiceBase";
import { getActionMethods } from "$common/ServiceBase";
import type { RootConfig } from "$common/config/RootConfig";
import type { MetricsEventFactory } from "$common/metrics-service/MetricsEventFactory";
import type { ProductHealthEventFactory } from "$common/metrics-service/ProductHealthEventFactory";
import type { RillIntakeClient } from "$common/metrics-service/RillIntakeClient";
import type { DataModelerStateService } from "$common/data-modeler-state-service/DataModelerStateService";
import type {
CommonFields,
MetricsEvent,
} from "$common/metrics-service/MetricsTypes";
import {
EntityType,
StateType,
} from "$common/data-modeler-state-service/entity-state-service/EntityStateService";
/**
* We have DataModelerStateService as the 1st arg to have a structure for PickActionFunctions
*/
export type MetricsEventFactoryClasses = PickActionFunctions<
CommonFields,
ProductHealthEventFactory
>;
export type MetricsActionDefinition = ExtractActionTypeDefinitions<
CommonFields,
MetricsEventFactoryClasses
>;
export class MetricsService
implements ActionServiceBase<MetricsActionDefinition>
{
private actionsMap: {
[Action in keyof MetricsActionDefinition]?: MetricsEventFactoryClasses;
} = {};
public constructor(
private readonly config: RootConfig,
private readonly dataModelerStateService: DataModelerStateService,
private readonly rillIntakeClient: RillIntakeClient,
private readonly metricsEventFactories: Array<MetricsEventFactory>
) {
metricsEventFactories.forEach((actions) => {
getActionMethods(actions).forEach((action) => {
this.actionsMap[action] = actions;
});
});
}
public async dispatch<Action extends keyof MetricsActionDefinition>(
action: Action,
args: MetricsActionDefinition[Action]
): Promise<any> {
if (!this.config.local.sendTelemetryData) return;
if (!this.actionsMap[action]?.[action]) {
console.log(`${action} not found`);
return;
}
const actionsInstance = this.actionsMap[action];
const event: MetricsEvent = await actionsInstance[action].call(
actionsInstance,
this.getCommonFields(),
...args
);
await this.rillIntakeClient.fireEvent(event);
}
private getCommonFields(): CommonFields {
const applicationState = this.dataModelerStateService.getApplicationState();
return {
app_name: this.config.metrics.appName,
install_id: this.config.local.installId,
build_id: this.config.local.version ?? "",
version: this.config.local.version ?? "",
project_id: applicationState.projectId,
|
entity_type: applicationState.activeEntity?.type ?? "",
entity_id: applicationState.activeEntity?.id ?? "",
};
}
}
| |
commands.py
|
from __future__ import print_function
from oauth2client.client import OAuth2WebServerFlow
import gmusicapi
from mopidy import commands
class GMusicCommand(commands.Command):
def __init__(self):
super(GMusicCommand, self).__init__()
self.add_child('login', LoginCommand())
class LoginCommand(commands.Command):
|
def run(self, args, config):
oauth_info = gmusicapi.Mobileclient._session_class.oauth
flow = OAuth2WebServerFlow(**oauth_info._asdict())
print()
print('Go to the following URL to get an initial auth code, then ' +
'provide it below: ' + flow.step1_get_authorize_url())
print()
try:
initial_code = raw_input('code: ')
except NameError:
# Python 3
initial_code = input('code: ')
credentials = flow.step2_exchange(initial_code)
refresh_token = credentials.refresh_token
print('\nPlease update your config to include the following:')
print()
print('[gmusic]')
print('refresh_token =', refresh_token)
print()
|
|
index.gql_query.ts
|
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import gql from 'graphql-tag';
export const authenticationsQuery = gql`
query GetAuthenticationsQuery(
$sourceId: ID!
$timerange: TimerangeInput!
$pagination: PaginationInput!
$filterQuery: String
$defaultIndex: [String!]!
$inspect: Boolean!
) {
source(id: $sourceId) {
id
Authentications(
timerange: $timerange
pagination: $pagination
filterQuery: $filterQuery
defaultIndex: $defaultIndex
) {
totalCount
edges {
node {
_id
failures
successes
user {
name
}
lastSuccess {
timestamp
source {
ip
}
host {
|
}
}
lastFailure {
timestamp
source {
ip
}
host {
id
name
}
}
}
cursor {
value
}
}
pageInfo {
endCursor {
value
}
hasNextPage
}
inspect @include(if: $inspect) {
dsl
response
}
}
}
}
`;
|
id
name
|
util.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import inspect
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s ASC too low! (Should be %s ASC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s ASC too high! (Should be %s ASC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
|
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "anotherscryptcoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("[regtest]\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
f.write("upnp=0\n")
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, "anotherscryptcoin.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "anotherscryptcoin.conf")):
with open(os.path.join(datadir, "anotherscryptcoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")) and os.access(os.path.join(datadir, "regtest", ".cookie"), os.R_OK):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
time.sleep(wait)
raise AssertionError("Block sync timed out:{}".format("".join("\n {!r}".format(b) for b in best_hash)))
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
raise AssertionError("Mempool sync timed out:{}".format("".join("\n {!r}".format(m) for m in pool)))
# Transaction/Block functions
#############################
def find_output(node, txid, amount, *, blockhash=None):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1, blockhash)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransactionwithwallet(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
|
num_matched = 0
for item in object_array:
|
managedidentities-gen.go
|
// Copyright 2021 Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated file. DO NOT EDIT.
// Package managedidentities provides access to the Managed Service for Microsoft Active Directory API.
//
// For product documentation, see: https://cloud.google.com/managed-microsoft-ad/
//
// Creating a client
//
// Usage example:
//
// import "google.golang.org/api/managedidentities/v1"
// ...
// ctx := context.Background()
// managedidentitiesService, err := managedidentities.NewService(ctx)
//
// In this example, Google Application Default Credentials are used for authentication.
//
// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials.
//
// Other authentication options
//
// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey:
//
// managedidentitiesService, err := managedidentities.NewService(ctx, option.WithAPIKey("AIza..."))
//
// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource:
//
// config := &oauth2.Config{...}
// // ...
// token, err := config.Exchange(ctx, ...)
// managedidentitiesService, err := managedidentities.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token)))
//
// See https://godoc.org/google.golang.org/api/option/ for details on options.
package managedidentities // import "google.golang.org/api/managedidentities/v1"
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
googleapi "google.golang.org/api/googleapi"
gensupport "google.golang.org/api/internal/gensupport"
option "google.golang.org/api/option"
internaloption "google.golang.org/api/option/internaloption"
htransport "google.golang.org/api/transport/http"
)
// Always reference these packages, just in case the auto-generated code
// below doesn't.
var _ = bytes.NewBuffer
var _ = strconv.Itoa
var _ = fmt.Sprintf
var _ = json.NewDecoder
var _ = io.Copy
var _ = url.Parse
var _ = gensupport.MarshalJSON
var _ = googleapi.Version
var _ = errors.New
var _ = strings.Replace
var _ = context.Canceled
var _ = internaloption.WithDefaultEndpoint
const apiId = "managedidentities:v1"
const apiName = "managedidentities"
const apiVersion = "v1"
const basePath = "https://managedidentities.googleapis.com/"
const mtlsBasePath = "https://managedidentities.mtls.googleapis.com/"
// OAuth2 scopes used by this API.
const (
// See, edit, configure, and delete your Google Cloud data and see the
// email address for your Google Account.
CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
)
// NewService creates a new Service.
func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) {
scopesOption := option.WithScopes(
"https://www.googleapis.com/auth/cloud-platform",
)
// NOTE: prepend, so we don't override user-specified scopes.
opts = append([]option.ClientOption{scopesOption}, opts...)
opts = append(opts, internaloption.WithDefaultEndpoint(basePath))
opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath))
client, endpoint, err := htransport.NewClient(ctx, opts...)
if err != nil {
return nil, err
}
s, err := New(client)
if err != nil {
return nil, err
}
if endpoint != "" {
s.BasePath = endpoint
}
return s, nil
}
// New creates a new Service. It uses the provided http.Client for requests.
//
// Deprecated: please use NewService instead.
// To provide a custom HTTP client, use option.WithHTTPClient.
// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead.
func New(client *http.Client) (*Service, error) {
if client == nil {
return nil, errors.New("client is nil")
}
s := &Service{client: client, BasePath: basePath}
s.Projects = NewProjectsService(s)
return s, nil
}
type Service struct {
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
Projects *ProjectsService
}
func (s *Service) userAgent() string {
if s.UserAgent == "" {
return googleapi.UserAgent
}
return googleapi.UserAgent + " " + s.UserAgent
}
func NewProjectsService(s *Service) *ProjectsService {
rs := &ProjectsService{s: s}
rs.Locations = NewProjectsLocationsService(s)
return rs
}
type ProjectsService struct {
s *Service
Locations *ProjectsLocationsService
}
func NewProjectsLocationsService(s *Service) *ProjectsLocationsService {
rs := &ProjectsLocationsService{s: s}
rs.Global = NewProjectsLocationsGlobalService(s)
return rs
}
type ProjectsLocationsService struct {
s *Service
Global *ProjectsLocationsGlobalService
}
func NewProjectsLocationsGlobalService(s *Service) *ProjectsLocationsGlobalService {
rs := &ProjectsLocationsGlobalService{s: s}
rs.Domains = NewProjectsLocationsGlobalDomainsService(s)
rs.Operations = NewProjectsLocationsGlobalOperationsService(s)
rs.Peerings = NewProjectsLocationsGlobalPeeringsService(s)
return rs
}
type ProjectsLocationsGlobalService struct {
s *Service
Domains *ProjectsLocationsGlobalDomainsService
Operations *ProjectsLocationsGlobalOperationsService
Peerings *ProjectsLocationsGlobalPeeringsService
}
func NewProjectsLocationsGlobalDomainsService(s *Service) *ProjectsLocationsGlobalDomainsService {
rs := &ProjectsLocationsGlobalDomainsService{s: s}
rs.SqlIntegrations = NewProjectsLocationsGlobalDomainsSqlIntegrationsService(s)
return rs
}
type ProjectsLocationsGlobalDomainsService struct {
s *Service
SqlIntegrations *ProjectsLocationsGlobalDomainsSqlIntegrationsService
}
func
|
(s *Service) *ProjectsLocationsGlobalDomainsSqlIntegrationsService {
rs := &ProjectsLocationsGlobalDomainsSqlIntegrationsService{s: s}
return rs
}
type ProjectsLocationsGlobalDomainsSqlIntegrationsService struct {
s *Service
}
func NewProjectsLocationsGlobalOperationsService(s *Service) *ProjectsLocationsGlobalOperationsService {
rs := &ProjectsLocationsGlobalOperationsService{s: s}
return rs
}
type ProjectsLocationsGlobalOperationsService struct {
s *Service
}
func NewProjectsLocationsGlobalPeeringsService(s *Service) *ProjectsLocationsGlobalPeeringsService {
rs := &ProjectsLocationsGlobalPeeringsService{s: s}
return rs
}
type ProjectsLocationsGlobalPeeringsService struct {
s *Service
}
// AttachTrustRequest: Request message for AttachTrust
type AttachTrustRequest struct {
// Trust: Required. The domain trust resource.
Trust *Trust `json:"trust,omitempty"`
// ForceSendFields is a list of field names (e.g. "Trust") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Trust") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *AttachTrustRequest) MarshalJSON() ([]byte, error) {
type NoMethod AttachTrustRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Binding: Associates `members`, or principals, with a `role`.
type Binding struct {
// Condition: The condition that is associated with this binding. If the
// condition evaluates to `true`, then this binding applies to the
// current request. If the condition evaluates to `false`, then this
// binding does not apply to the current request. However, a different
// role binding might grant the same role to one or more of the
// principals in this binding. To learn which resources support
// conditions in their IAM policies, see the IAM documentation
// (https://cloud.google.com/iam/help/conditions/resource-policies).
Condition *Expr `json:"condition,omitempty"`
// Members: Specifies the principals requesting access for a Cloud
// Platform resource. `members` can have the following values: *
// `allUsers`: A special identifier that represents anyone who is on the
// internet; with or without a Google account. *
// `allAuthenticatedUsers`: A special identifier that represents anyone
// who is authenticated with a Google account or a service account. *
// `user:{emailid}`: An email address that represents a specific Google
// account. For example, `[email protected]` . *
// `serviceAccount:{emailid}`: An email address that represents a
// service account. For example,
// `[email protected]`. * `group:{emailid}`: An
// email address that represents a Google group. For example,
// `[email protected]`. * `deleted:user:{emailid}?uid={uniqueid}`: An
// email address (plus unique identifier) representing a user that has
// been recently deleted. For example,
// `[email protected]?uid=123456789012345678901`. If the user is
// recovered, this value reverts to `user:{emailid}` and the recovered
// user retains the role in the binding. *
// `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address
// (plus unique identifier) representing a service account that has been
// recently deleted. For example,
// `[email protected]?uid=123456789012345678901`.
// If the service account is undeleted, this value reverts to
// `serviceAccount:{emailid}` and the undeleted service account retains
// the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`:
// An email address (plus unique identifier) representing a Google group
// that has been recently deleted. For example,
// `[email protected]?uid=123456789012345678901`. If the group is
// recovered, this value reverts to `group:{emailid}` and the recovered
// group retains the role in the binding. * `domain:{domain}`: The G
// Suite domain (primary) that represents all the users of that domain.
// For example, `google.com` or `example.com`.
Members []string `json:"members,omitempty"`
// Role: Role that is assigned to the list of `members`, or principals.
// For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
Role string `json:"role,omitempty"`
// ForceSendFields is a list of field names (e.g. "Condition") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Condition") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Binding) MarshalJSON() ([]byte, error) {
type NoMethod Binding
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CancelOperationRequest: The request message for
// Operations.CancelOperation.
type CancelOperationRequest struct {
}
// Certificate: Certificate used to configure LDAPS.
type Certificate struct {
// ExpireTime: The certificate expire time.
ExpireTime string `json:"expireTime,omitempty"`
// IssuingCertificate: The issuer of this certificate.
IssuingCertificate *Certificate `json:"issuingCertificate,omitempty"`
// Subject: The certificate subject.
Subject string `json:"subject,omitempty"`
// SubjectAlternativeName: The additional hostnames for the domain.
SubjectAlternativeName []string `json:"subjectAlternativeName,omitempty"`
// Thumbprint: The certificate thumbprint which uniquely identifies the
// certificate.
Thumbprint string `json:"thumbprint,omitempty"`
// ForceSendFields is a list of field names (e.g. "ExpireTime") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ExpireTime") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Certificate) MarshalJSON() ([]byte, error) {
type NoMethod Certificate
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// DailyCycle: Time window specified for daily operations.
type DailyCycle struct {
// Duration: Output only. Duration of the time window, set by service
// producer.
Duration string `json:"duration,omitempty"`
// StartTime: Time within the day to start the operations.
StartTime *TimeOfDay `json:"startTime,omitempty"`
// ForceSendFields is a list of field names (e.g. "Duration") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Duration") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *DailyCycle) MarshalJSON() ([]byte, error) {
type NoMethod DailyCycle
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Date: Represents a whole or partial calendar date, such as a
// birthday. The time of day and time zone are either specified
// elsewhere or are insignificant. The date is relative to the Gregorian
// Calendar. This can represent one of the following: * A full date,
// with non-zero year, month, and day values * A month and day value,
// with a zero year, such as an anniversary * A year on its own, with
// zero month and day values * A year and month value, with a zero day,
// such as a credit card expiration date Related types are
// google.type.TimeOfDay and `google.protobuf.Timestamp`.
type Date struct {
// Day: Day of a month. Must be from 1 to 31 and valid for the year and
// month, or 0 to specify a year by itself or a year and month where the
// day isn't significant.
Day int64 `json:"day,omitempty"`
// Month: Month of a year. Must be from 1 to 12, or 0 to specify a year
// without a month and day.
Month int64 `json:"month,omitempty"`
// Year: Year of the date. Must be from 1 to 9999, or 0 to specify a
// date without a year.
Year int64 `json:"year,omitempty"`
// ForceSendFields is a list of field names (e.g. "Day") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Day") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Date) MarshalJSON() ([]byte, error) {
type NoMethod Date
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// DenyMaintenancePeriod: DenyMaintenancePeriod definition. Maintenance
// is forbidden within the deny period. The start_date must be less than
// the end_date.
type DenyMaintenancePeriod struct {
// EndDate: Deny period end date. This can be: * A full date, with
// non-zero year, month and day values. * A month and day value, with a
// zero year. Allows recurring deny periods each year. Date matching
// this period will have to be before the end.
EndDate *Date `json:"endDate,omitempty"`
// StartDate: Deny period start date. This can be: * A full date, with
// non-zero year, month and day values. * A month and day value, with a
// zero year. Allows recurring deny periods each year. Date matching
// this period will have to be the same or after the start.
StartDate *Date `json:"startDate,omitempty"`
// Time: Time in UTC when the Blackout period starts on start_date and
// ends on end_date. This can be: * Full time. * All zeros for 00:00:00
// UTC
Time *TimeOfDay `json:"time,omitempty"`
// ForceSendFields is a list of field names (e.g. "EndDate") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "EndDate") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *DenyMaintenancePeriod) MarshalJSON() ([]byte, error) {
type NoMethod DenyMaintenancePeriod
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// DetachTrustRequest: Request message for DetachTrust
type DetachTrustRequest struct {
// Trust: Required. The domain trust resource to removed.
Trust *Trust `json:"trust,omitempty"`
// ForceSendFields is a list of field names (e.g. "Trust") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Trust") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *DetachTrustRequest) MarshalJSON() ([]byte, error) {
type NoMethod DetachTrustRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Domain: Represents a managed Microsoft Active Directory domain. If
// the domain is being changed, it will be placed into the UPDATING
// state, which indicates that the resource is being reconciled. At this
// point, Get will reflect an intermediate state.
type Domain struct {
// Admin: Optional. The name of delegated administrator account used to
// perform Active Directory operations. If not specified, `setupadmin`
// will be used.
Admin string `json:"admin,omitempty"`
// AuditLogsEnabled: Optional. Configuration for audit logs. True if
// audit logs are enabled, else false. Default is audit logs disabled.
AuditLogsEnabled bool `json:"auditLogsEnabled,omitempty"`
// AuthorizedNetworks: Optional. The full names of the Google Compute
// Engine networks (/compute/docs/networks-and-firewalls#networks) the
// domain instance is connected to. Networks can be added using
// UpdateDomain. The domain is only available on networks listed in
// `authorized_networks`. If CIDR subnets overlap between networks,
// domain creation will fail.
AuthorizedNetworks []string `json:"authorizedNetworks,omitempty"`
// CreateTime: Output only. The time the instance was created.
CreateTime string `json:"createTime,omitempty"`
// Fqdn: Output only. The fully-qualified domain name of the exposed
// domain used by clients to connect to the service. Similar to what
// would be chosen for an Active Directory set up on an internal
// network.
Fqdn string `json:"fqdn,omitempty"`
// Labels: Optional. Resource labels that can contain user-provided
// metadata.
Labels map[string]string `json:"labels,omitempty"`
// Locations: Required. Locations where domain needs to be provisioned.
// regions e.g. us-west1 or us-east4 Service supports up to 4 locations
// at once. Each location will use a /26 block.
Locations []string `json:"locations,omitempty"`
// Name: Required. The unique name of the domain using the form:
// `projects/{project_id}/locations/global/domains/{domain_name}`.
Name string `json:"name,omitempty"`
// ReservedIpRange: Required. The CIDR range of internal addresses that
// are reserved for this domain. Reserved networks must be /24 or
// larger. Ranges must be unique and non-overlapping with existing
// subnets in [Domain].[authorized_networks].
ReservedIpRange string `json:"reservedIpRange,omitempty"`
// State: Output only. The current state of this domain.
//
// Possible values:
// "STATE_UNSPECIFIED" - Not set.
// "CREATING" - The domain is being created.
// "READY" - The domain has been created and is fully usable.
// "UPDATING" - The domain's configuration is being updated.
// "DELETING" - The domain is being deleted.
// "REPAIRING" - The domain is being repaired and may be unusable.
// Details can be found in the `status_message` field.
// "PERFORMING_MAINTENANCE" - The domain is undergoing maintenance.
// "UNAVAILABLE" - The domain is not serving requests.
State string `json:"state,omitempty"`
// StatusMessage: Output only. Additional information about the current
// status of this domain, if available.
StatusMessage string `json:"statusMessage,omitempty"`
// Trusts: Output only. The current trusts associated with the domain.
Trusts []*Trust `json:"trusts,omitempty"`
// UpdateTime: Output only. The last update time.
UpdateTime string `json:"updateTime,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Admin") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Admin") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Domain) MarshalJSON() ([]byte, error) {
type NoMethod Domain
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Empty: A generic empty message that you can re-use to avoid defining
// duplicated empty messages in your APIs. A typical example is to use
// it as the request or the response type of an API method. For
// instance: service Foo { rpc Bar(google.protobuf.Empty) returns
// (google.protobuf.Empty); } The JSON representation for `Empty` is
// empty JSON object `{}`.
type Empty struct {
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
}
// Expr: Represents a textual expression in the Common Expression
// Language (CEL) syntax. CEL is a C-like expression language. The
// syntax and semantics of CEL are documented at
// https://github.com/google/cel-spec. Example (Comparison): title:
// "Summary size limit" description: "Determines if a summary is less
// than 100 chars" expression: "document.summary.size() < 100" Example
// (Equality): title: "Requestor is owner" description: "Determines if
// requestor is the document owner" expression: "document.owner ==
// request.auth.claims.email" Example (Logic): title: "Public documents"
// description: "Determine whether the document should be publicly
// visible" expression: "document.type != 'private' && document.type !=
// 'internal'" Example (Data Manipulation): title: "Notification string"
// description: "Create a notification string with a timestamp."
// expression: "'New message received at ' +
// string(document.create_time)" The exact variables and functions that
// may be referenced within an expression are determined by the service
// that evaluates it. See the service documentation for additional
// information.
type Expr struct {
// Description: Optional. Description of the expression. This is a
// longer text which describes the expression, e.g. when hovered over it
// in a UI.
Description string `json:"description,omitempty"`
// Expression: Textual representation of an expression in Common
// Expression Language syntax.
Expression string `json:"expression,omitempty"`
// Location: Optional. String indicating the location of the expression
// for error reporting, e.g. a file name and a position in the file.
Location string `json:"location,omitempty"`
// Title: Optional. Title for the expression, i.e. a short string
// describing its purpose. This can be used e.g. in UIs which allow to
// enter the expression.
Title string `json:"title,omitempty"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Expr) MarshalJSON() ([]byte, error) {
type NoMethod Expr
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleCloudManagedidentitiesV1OpMetadata: Represents the metadata of
// the long-running operation.
type GoogleCloudManagedidentitiesV1OpMetadata struct {
// ApiVersion: Output only. API version used to start the operation.
ApiVersion string `json:"apiVersion,omitempty"`
// CreateTime: Output only. The time the operation was created.
CreateTime string `json:"createTime,omitempty"`
// EndTime: Output only. The time the operation finished running.
EndTime string `json:"endTime,omitempty"`
// RequestedCancellation: Output only. Identifies whether the user has
// requested cancellation of the operation. Operations that have
// successfully been cancelled have Operation.error value with a
// google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
RequestedCancellation bool `json:"requestedCancellation,omitempty"`
// Target: Output only. Server-defined resource path for the target of
// the operation.
Target string `json:"target,omitempty"`
// Verb: Output only. Name of the verb executed by the operation.
Verb string `json:"verb,omitempty"`
// ForceSendFields is a list of field names (e.g. "ApiVersion") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ApiVersion") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleCloudManagedidentitiesV1OpMetadata) MarshalJSON() ([]byte, error) {
type NoMethod GoogleCloudManagedidentitiesV1OpMetadata
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleCloudManagedidentitiesV1alpha1OpMetadata: Represents the
// metadata of the long-running operation.
type GoogleCloudManagedidentitiesV1alpha1OpMetadata struct {
// ApiVersion: Output only. API version used to start the operation.
ApiVersion string `json:"apiVersion,omitempty"`
// CreateTime: Output only. The time the operation was created.
CreateTime string `json:"createTime,omitempty"`
// EndTime: Output only. The time the operation finished running.
EndTime string `json:"endTime,omitempty"`
// RequestedCancellation: Output only. Identifies whether the user has
// requested cancellation of the operation. Operations that have
// successfully been cancelled have Operation.error value with a
// google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
RequestedCancellation bool `json:"requestedCancellation,omitempty"`
// Target: Output only. Server-defined resource path for the target of
// the operation.
Target string `json:"target,omitempty"`
// Verb: Output only. Name of the verb executed by the operation.
Verb string `json:"verb,omitempty"`
// ForceSendFields is a list of field names (e.g. "ApiVersion") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ApiVersion") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleCloudManagedidentitiesV1alpha1OpMetadata) MarshalJSON() ([]byte, error) {
type NoMethod GoogleCloudManagedidentitiesV1alpha1OpMetadata
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleCloudManagedidentitiesV1beta1OpMetadata: Represents the
// metadata of the long-running operation.
type GoogleCloudManagedidentitiesV1beta1OpMetadata struct {
// ApiVersion: Output only. API version used to start the operation.
ApiVersion string `json:"apiVersion,omitempty"`
// CreateTime: Output only. The time the operation was created.
CreateTime string `json:"createTime,omitempty"`
// EndTime: Output only. The time the operation finished running.
EndTime string `json:"endTime,omitempty"`
// RequestedCancellation: Output only. Identifies whether the user has
// requested cancellation of the operation. Operations that have
// successfully been cancelled have Operation.error value with a
// google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
RequestedCancellation bool `json:"requestedCancellation,omitempty"`
// Target: Output only. Server-defined resource path for the target of
// the operation.
Target string `json:"target,omitempty"`
// Verb: Output only. Name of the verb executed by the operation.
Verb string `json:"verb,omitempty"`
// ForceSendFields is a list of field names (e.g. "ApiVersion") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ApiVersion") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleCloudManagedidentitiesV1beta1OpMetadata) MarshalJSON() ([]byte, error) {
type NoMethod GoogleCloudManagedidentitiesV1beta1OpMetadata
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
type GoogleCloudSaasacceleratorManagementProvidersV1Instance struct {
// ConsumerDefinedName: consumer_defined_name is the name that is set by
// the consumer. On the other hand Name field represents system-assigned
// id of an instance so consumers are not necessarily aware of it.
// consumer_defined_name is used for notification/UI purposes for
// consumer to recognize their instances.
ConsumerDefinedName string `json:"consumerDefinedName,omitempty"`
// CreateTime: Output only. Timestamp when the resource was created.
CreateTime string `json:"createTime,omitempty"`
// Labels: Optional. Resource labels to represent user provided
// metadata. Each label is a key-value pair, where both the key and the
// value are arbitrary strings provided by the user.
Labels map[string]string `json:"labels,omitempty"`
// MaintenancePolicyNames: Deprecated. The MaintenancePolicies that have
// been attached to the instance. The key must be of the type name of
// the oneof policy name defined in MaintenancePolicy, and the
// referenced policy must define the same policy type. For complete
// details of MaintenancePolicy, please refer to go/cloud-saas-mw-ug.
MaintenancePolicyNames map[string]string `json:"maintenancePolicyNames,omitempty"`
// MaintenanceSchedules: The MaintenanceSchedule contains the scheduling
// information of published maintenance schedule with same key as
// software_versions.
MaintenanceSchedules map[string]GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSchedule `json:"maintenanceSchedules,omitempty"`
// MaintenanceSettings: Optional. The MaintenanceSettings associated
// with instance.
MaintenanceSettings *GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSettings `json:"maintenanceSettings,omitempty"`
// Name: Unique name of the resource. It uses the form:
// `projects/{project_id|project_number}/locations/{location_id}/instance
// s/{instance_id}` Note: Either project_id or project_number can be
// used, but keep it consistent with other APIs (e.g. RescheduleUpdate)
Name string `json:"name,omitempty"`
// ProducerMetadata: Output only. Custom string attributes used
// primarily to expose producer-specific information in monitoring
// dashboards. See go/get-instance-metadata.
ProducerMetadata map[string]string `json:"producerMetadata,omitempty"`
// ProvisionedResources: Output only. The list of data plane resources
// provisioned for this instance, e.g. compute VMs. See
// go/get-instance-metadata.
ProvisionedResources []*GoogleCloudSaasacceleratorManagementProvidersV1ProvisionedResource `json:"provisionedResources,omitempty"`
// SlmInstanceTemplate: Link to the SLM instance template. Only
// populated when updating SLM instances via SSA's Actuation service
// adaptor. Service producers with custom control plane (e.g. Cloud SQL)
// doesn't need to populate this field. Instead they should use
// software_versions.
SlmInstanceTemplate string `json:"slmInstanceTemplate,omitempty"`
// SloMetadata: Output only. SLO metadata for instance classification in
// the Standardized dataplane SLO platform. See
// go/cloud-ssa-standard-slo for feature description.
SloMetadata *GoogleCloudSaasacceleratorManagementProvidersV1SloMetadata `json:"sloMetadata,omitempty"`
// SoftwareVersions: Software versions that are used to deploy this
// instance. This can be mutated by rollout services.
SoftwareVersions map[string]string `json:"softwareVersions,omitempty"`
// State: Output only. Current lifecycle state of the resource (e.g. if
// it's being created or ready to use).
//
// Possible values:
// "STATE_UNSPECIFIED" - Unspecified state.
// "CREATING" - Instance is being created.
// "READY" - Instance has been created and is ready to use.
// "UPDATING" - Instance is being updated.
// "REPAIRING" - Instance is unheathy and under repair.
// "DELETING" - Instance is being deleted.
// "ERROR" - Instance encountered an error and is in indeterministic
// state.
State string `json:"state,omitempty"`
// TenantProjectId: Output only. ID of the associated GCP tenant
// project. See go/get-instance-metadata.
TenantProjectId string `json:"tenantProjectId,omitempty"`
// UpdateTime: Output only. Timestamp when the resource was last
// modified.
UpdateTime string `json:"updateTime,omitempty"`
// ForceSendFields is a list of field names (e.g. "ConsumerDefinedName")
// to unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ConsumerDefinedName") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *GoogleCloudSaasacceleratorManagementProvidersV1Instance) MarshalJSON() ([]byte, error) {
type NoMethod GoogleCloudSaasacceleratorManagementProvidersV1Instance
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSchedule:
// Maintenance schedule which is exposed to customer and potentially end
// user, indicating published upcoming future maintenance schedule
type GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSchedule struct {
// CanReschedule: This field is deprecated, and will be always set to
// true since reschedule can happen multiple times now. This field
// should not be removed until all service producers remove this for
// their customers.
CanReschedule bool `json:"canReschedule,omitempty"`
// EndTime: The scheduled end time for the maintenance.
EndTime string `json:"endTime,omitempty"`
// RolloutManagementPolicy: The rollout management policy this
// maintenance schedule is associated with. When doing reschedule update
// request, the reschedule should be against this given policy.
RolloutManagementPolicy string `json:"rolloutManagementPolicy,omitempty"`
// ScheduleDeadlineTime: schedule_deadline_time is the time deadline any
// schedule start time cannot go beyond, including reschedule. It's
// normally the initial schedule start time plus maintenance window
// length (1 day or 1 week). Maintenance cannot be scheduled to start
// beyond this deadline.
ScheduleDeadlineTime string `json:"scheduleDeadlineTime,omitempty"`
// StartTime: The scheduled start time for the maintenance.
StartTime string `json:"startTime,omitempty"`
// ForceSendFields is a list of field names (e.g. "CanReschedule") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "CanReschedule") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSchedule) MarshalJSON() ([]byte, error) {
type NoMethod GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSchedule
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSettings:
// Maintenance settings associated with instance. Allows service
// producers and end users to assign settings that controls maintenance
// on this instance.
type GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSettings struct {
// Exclude: Optional. Exclude instance from maintenance. When true,
// rollout service will not attempt maintenance on the instance. Rollout
// service will include the instance in reported rollout progress as not
// attempted.
Exclude bool `json:"exclude,omitempty"`
// IsRollback: Optional. If the update call is triggered from rollback,
// set the value as true.
IsRollback bool `json:"isRollback,omitempty"`
// MaintenancePolicies: Optional. The MaintenancePolicies that have been
// attached to the instance. The key must be of the type name of the
// oneof policy name defined in MaintenancePolicy, and the embedded
// policy must define the same policy type. For complete details of
// MaintenancePolicy, please refer to go/cloud-saas-mw-ug. If only the
// name is needed (like in the deprecated
// Instance.maintenance_policy_names field) then only populate
// MaintenancePolicy.name.
MaintenancePolicies map[string]MaintenancePolicy `json:"maintenancePolicies,omitempty"`
// ForceSendFields is a list of field names (e.g. "Exclude") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Exclude") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSettings) MarshalJSON() ([]byte, error) {
type NoMethod GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSettings
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleCloudSaasacceleratorManagementProvidersV1NodeSloMetadata: Node
// information for custom per-node SLO implementations. SSA does not
// support per-node SLO, but producers can populate per-node information
// in SloMetadata for custom precomputations. SSA Eligibility Exporter
// will emit per-node metric based on this information.
type GoogleCloudSaasacceleratorManagementProvidersV1NodeSloMetadata struct {
// Location: The location of the node, if different from instance
// location.
Location string `json:"location,omitempty"`
// NodeId: The id of the node. This should be equal to
// SaasInstanceNode.node_id.
NodeId string `json:"nodeId,omitempty"`
// PerSliEligibility: If present, this will override eligibility for the
// node coming from instance or exclusions for specified SLIs.
PerSliEligibility *GoogleCloudSaasacceleratorManagementProvidersV1PerSliSloEligibility `json:"perSliEligibility,omitempty"`
// ForceSendFields is a list of field names (e.g. "Location") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Location") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleCloudSaasacceleratorManagementProvidersV1NodeSloMetadata) MarshalJSON() ([]byte, error) {
type NoMethod GoogleCloudSaasacceleratorManagementProvidersV1NodeSloMetadata
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleCloudSaasacceleratorManagementProvidersV1PerSliSloEligibility:
// PerSliSloEligibility is a mapping from an SLI name to eligibility.
type GoogleCloudSaasacceleratorManagementProvidersV1PerSliSloEligibility struct {
// Eligibilities: An entry in the eligibilities map specifies an
// eligibility for a particular SLI for the given instance. The SLI key
// in the name must be a valid SLI name specified in the Eligibility
// Exporter binary flags otherwise an error will be emitted by
// Eligibility Exporter and the oncaller will be alerted. If an SLI has
// been defined in the binary flags but the eligibilities map does not
// contain it, the corresponding SLI time series will not be emitted by
// the Eligibility Exporter. This ensures a smooth rollout and
// compatibility between the data produced by different versions of the
// Eligibility Exporters. If eligibilities map contains a key for an SLI
// which has not been declared in the binary flags, there will be an
// error message emitted in the Eligibility Exporter log and the metric
// for the SLI in question will not be emitted.
Eligibilities map[string]GoogleCloudSaasacceleratorManagementProvidersV1SloEligibility `json:"eligibilities,omitempty"`
// ForceSendFields is a list of field names (e.g. "Eligibilities") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Eligibilities") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleCloudSaasacceleratorManagementProvidersV1PerSliSloEligibility) MarshalJSON() ([]byte, error) {
type NoMethod GoogleCloudSaasacceleratorManagementProvidersV1PerSliSloEligibility
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleCloudSaasacceleratorManagementProvidersV1ProvisionedResource:
// Describes provisioned dataplane resources.
type GoogleCloudSaasacceleratorManagementProvidersV1ProvisionedResource struct {
// ResourceType: Type of the resource. This can be either a GCP resource
// or a custom one (e.g. another cloud provider's VM). For GCP compute
// resources use singular form of the names listed in GCP compute API
// documentation
// (https://cloud.google.com/compute/docs/reference/rest/v1/), prefixed
// with 'compute-', for example: 'compute-instance', 'compute-disk',
// 'compute-autoscaler'.
ResourceType string `json:"resourceType,omitempty"`
// ResourceUrl: URL identifying the resource, e.g.
// "https://www.googleapis.com/compute/v1/projects/...)".
ResourceUrl string `json:"resourceUrl,omitempty"`
// ForceSendFields is a list of field names (e.g. "ResourceType") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ResourceType") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleCloudSaasacceleratorManagementProvidersV1ProvisionedResource) MarshalJSON() ([]byte, error) {
type NoMethod GoogleCloudSaasacceleratorManagementProvidersV1ProvisionedResource
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleCloudSaasacceleratorManagementProvidersV1SloEligibility:
// SloEligibility is a tuple containing eligibility value: true if an
// instance is eligible for SLO calculation or false if it should be
// excluded from all SLO-related calculations along with a user-defined
// reason.
type GoogleCloudSaasacceleratorManagementProvidersV1SloEligibility struct {
// Eligible: Whether an instance is eligible or ineligible.
Eligible bool `json:"eligible,omitempty"`
// Reason: User-defined reason for the current value of instance
// eligibility. Usually, this can be directly mapped to the internal
// state. An empty reason is allowed.
Reason string `json:"reason,omitempty"`
// ForceSendFields is a list of field names (e.g. "Eligible") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Eligible") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleCloudSaasacceleratorManagementProvidersV1SloEligibility) MarshalJSON() ([]byte, error) {
type NoMethod GoogleCloudSaasacceleratorManagementProvidersV1SloEligibility
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleCloudSaasacceleratorManagementProvidersV1SloMetadata:
// SloMetadata contains resources required for proper SLO classification
// of the instance.
type GoogleCloudSaasacceleratorManagementProvidersV1SloMetadata struct {
// Nodes: Optional. List of nodes. Some producers need to use per-node
// metadata to calculate SLO. This field allows such producers to
// publish per-node SLO meta data, which will be consumed by SSA
// Eligibility Exporter and published in the form of per node metric to
// Monarch.
Nodes []*GoogleCloudSaasacceleratorManagementProvidersV1NodeSloMetadata `json:"nodes,omitempty"`
// PerSliEligibility: Optional. Multiple per-instance SLI eligibilities
// which apply for individual SLIs.
PerSliEligibility *GoogleCloudSaasacceleratorManagementProvidersV1PerSliSloEligibility `json:"perSliEligibility,omitempty"`
// Tier: Name of the SLO tier the Instance belongs to. This name will be
// expected to match the tiers specified in the service SLO
// configuration. Field is mandatory and must not be empty.
Tier string `json:"tier,omitempty"`
// ForceSendFields is a list of field names (e.g. "Nodes") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Nodes") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleCloudSaasacceleratorManagementProvidersV1SloMetadata) MarshalJSON() ([]byte, error) {
type NoMethod GoogleCloudSaasacceleratorManagementProvidersV1SloMetadata
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// LDAPSSettings: LDAPSSettings represents the ldaps settings for domain
// resource. LDAP is the Lightweight Directory Access Protocol, defined
// in https://tools.ietf.org/html/rfc4511. The settings object
// configures LDAP over SSL/TLS, whether it is over port 636 or the
// StartTLS operation. If LDAPSSettings is being changed, it will be
// placed into the UPDATING state, which indicates that the resource is
// being reconciled. At this point, Get will reflect an intermediate
// state.
type LDAPSSettings struct {
// Certificate: Output only. The certificate used to configure LDAPS.
// Certificates can be chained with a maximum length of 15.
Certificate *Certificate `json:"certificate,omitempty"`
// CertificatePassword: Input only. The password used to encrypt the
// uploaded PFX certificate.
CertificatePassword string `json:"certificatePassword,omitempty"`
// CertificatePfx: Input only. The uploaded PKCS12-formatted certificate
// to configure LDAPS with. It will enable the domain controllers in
// this domain to accept LDAPS connections (either LDAP over SSL/TLS or
// the StartTLS operation). A valid certificate chain must form a valid
// x.509 certificate chain (or be comprised of a single self-signed
// certificate. It must be encrypted with either: 1) PBES2 + PBKDF2 +
// AES256 encryption and SHA256 PRF; or 2)
// pbeWithSHA1And3-KeyTripleDES-CBC Private key must be included for the
// leaf / single self-signed certificate. Note: For a fqdn
// your-example-domain.com, the wildcard fqdn is
// *.your-example-domain.com. Specifically the leaf certificate must
// have: - Either a blank subject or a subject with CN matching the
// wildcard fqdn. - Exactly two SANs - the fqdn and wildcard fqdn. -
// Encipherment and digital key signature key usages. - Server
// authentication extended key usage (OID=1.3.6.1.5.5.7.3.1) - Private
// key must be in one of the following formats: RSA, ECDSA, ED25519. -
// Private key must have appropriate key length: 2048 for RSA, 256 for
// ECDSA - Signature algorithm of the leaf certificate cannot be MD2,
// MD5 or SHA1.
CertificatePfx string `json:"certificatePfx,omitempty"`
// Name: The resource name of the LDAPS settings. Uses the form:
// `projects/{project}/locations/{location}/domains/{domain}`.
Name string `json:"name,omitempty"`
// State: Output only. The current state of this LDAPS settings.
//
// Possible values:
// "STATE_UNSPECIFIED" - Not Set
// "UPDATING" - The LDAPS setting is being updated.
// "ACTIVE" - The LDAPS setting is ready.
// "FAILED" - The LDAPS setting is not applied correctly.
State string `json:"state,omitempty"`
// UpdateTime: Output only. Last update time.
UpdateTime string `json:"updateTime,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Certificate") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Certificate") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *LDAPSSettings) MarshalJSON() ([]byte, error) {
type NoMethod LDAPSSettings
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListDomainsResponse: Response message for ListDomains
type ListDomainsResponse struct {
// Domains: A list of Managed Identities Service domains in the project.
Domains []*Domain `json:"domains,omitempty"`
// NextPageToken: A token to retrieve the next page of results, or empty
// if there are no more results in the list.
NextPageToken string `json:"nextPageToken,omitempty"`
// Unreachable: A list of locations that could not be reached.
Unreachable []string `json:"unreachable,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Domains") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Domains") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListDomainsResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListDomainsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListLocationsResponse: The response message for
// Locations.ListLocations.
type ListLocationsResponse struct {
// Locations: A list of locations that matches the specified filter in
// the request.
Locations []*Location `json:"locations,omitempty"`
// NextPageToken: The standard List next-page token.
NextPageToken string `json:"nextPageToken,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Locations") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Locations") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListLocationsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListOperationsResponse: The response message for
// Operations.ListOperations.
type ListOperationsResponse struct {
// NextPageToken: The standard List next-page token.
NextPageToken string `json:"nextPageToken,omitempty"`
// Operations: A list of operations that matches the specified filter in
// the request.
Operations []*Operation `json:"operations,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListOperationsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListPeeringsResponse: ListPeeringsResponse is the response message
// for ListPeerings method.
type ListPeeringsResponse struct {
// NextPageToken: Token to retrieve the next page of results, or empty
// if there are no more results in the list.
NextPageToken string `json:"nextPageToken,omitempty"`
// Peerings: A list of Managed Identities Service Peerings in the
// project.
Peerings []*Peering `json:"peerings,omitempty"`
// Unreachable: Locations that could not be reached.
Unreachable []string `json:"unreachable,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListPeeringsResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListPeeringsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListSqlIntegrationsResponse: ListSqlIntegrationsResponse is the
// response message for ListSqlIntegrations method.
type ListSqlIntegrationsResponse struct {
// NextPageToken: Token to retrieve the next page of results, or empty
// if there are no more results in the list.
NextPageToken string `json:"nextPageToken,omitempty"`
// SqlIntegrations: A list of SQLIntegrations of a domain.
SqlIntegrations []*SqlIntegration `json:"sqlIntegrations,omitempty"`
// Unreachable: A list of locations that could not be reached.
Unreachable []string `json:"unreachable,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListSqlIntegrationsResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListSqlIntegrationsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Location: A resource that represents Google Cloud Platform location.
type Location struct {
// DisplayName: The friendly name for this location, typically a nearby
// city name. For example, "Tokyo".
DisplayName string `json:"displayName,omitempty"`
// Labels: Cross-service attributes for the location. For example
// {"cloud.googleapis.com/region": "us-east1"}
Labels map[string]string `json:"labels,omitempty"`
// LocationId: The canonical id for this location. For example:
// "us-east1".
LocationId string `json:"locationId,omitempty"`
// Metadata: Service-specific metadata. For example the available
// capacity at the given location.
Metadata googleapi.RawMessage `json:"metadata,omitempty"`
// Name: Resource name for the location, which may vary between
// implementations. For example:
// "projects/example-project/locations/us-east1"
Name string `json:"name,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "DisplayName") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DisplayName") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Location) MarshalJSON() ([]byte, error) {
type NoMethod Location
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MaintenancePolicy: Defines policies to service maintenance events.
type MaintenancePolicy struct {
// CreateTime: Output only. The time when the resource was created.
CreateTime string `json:"createTime,omitempty"`
// Description: Optional. Description of what this policy is for.
// Create/Update methods return INVALID_ARGUMENT if the length is
// greater than 512.
Description string `json:"description,omitempty"`
// Labels: Optional. Resource labels to represent user provided
// metadata. Each label is a key-value pair, where both the key and the
// value are arbitrary strings provided by the user.
Labels map[string]string `json:"labels,omitempty"`
// Name: Required. MaintenancePolicy name using the form:
// `projects/{project_id}/locations/{location_id}/maintenancePolicies/{ma
// intenance_policy_id}` where {project_id} refers to a GCP consumer
// project ID, {location_id} refers to a GCP region/zone,
// {maintenance_policy_id} must be 1-63 characters long and match the
// regular expression `[a-z0-9]([-a-z0-9]*[a-z0-9])?`.
Name string `json:"name,omitempty"`
// State: Optional. The state of the policy.
//
// Possible values:
// "STATE_UNSPECIFIED" - Unspecified state.
// "READY" - Resource is ready to be used.
// "DELETING" - Resource is being deleted. It can no longer be
// attached to instances.
State string `json:"state,omitempty"`
// UpdatePolicy: Maintenance policy applicable to instance update.
UpdatePolicy *UpdatePolicy `json:"updatePolicy,omitempty"`
// UpdateTime: Output only. The time when the resource was updated.
UpdateTime string `json:"updateTime,omitempty"`
// ForceSendFields is a list of field names (e.g. "CreateTime") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "CreateTime") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MaintenancePolicy) MarshalJSON() ([]byte, error) {
type NoMethod MaintenancePolicy
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MaintenanceWindow: MaintenanceWindow definition.
type MaintenanceWindow struct {
// DailyCycle: Daily cycle.
DailyCycle *DailyCycle `json:"dailyCycle,omitempty"`
// WeeklyCycle: Weekly cycle.
WeeklyCycle *WeeklyCycle `json:"weeklyCycle,omitempty"`
// ForceSendFields is a list of field names (e.g. "DailyCycle") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DailyCycle") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MaintenanceWindow) MarshalJSON() ([]byte, error) {
type NoMethod MaintenanceWindow
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Operation: This resource represents a long-running operation that is
// the result of a network API call.
type Operation struct {
// Done: If the value is `false`, it means the operation is still in
// progress. If `true`, the operation is completed, and either `error`
// or `response` is available.
Done bool `json:"done,omitempty"`
// Error: The error result of the operation in case of failure or
// cancellation.
Error *Status `json:"error,omitempty"`
// Metadata: Service-specific metadata associated with the operation. It
// typically contains progress information and common metadata such as
// create time. Some services might not provide such metadata. Any
// method that returns a long-running operation should document the
// metadata type, if any.
Metadata googleapi.RawMessage `json:"metadata,omitempty"`
// Name: The server-assigned name, which is only unique within the same
// service that originally returns it. If you use the default HTTP
// mapping, the `name` should be a resource name ending with
// `operations/{unique_id}`.
Name string `json:"name,omitempty"`
// Response: The normal response of the operation in case of success. If
// the original method returns no data on success, such as `Delete`, the
// response is `google.protobuf.Empty`. If the original method is
// standard `Get`/`Create`/`Update`, the response should be the
// resource. For other methods, the response should have the type
// `XxxResponse`, where `Xxx` is the original method name. For example,
// if the original method name is `TakeSnapshot()`, the inferred
// response type is `TakeSnapshotResponse`.
Response googleapi.RawMessage `json:"response,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Done") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Done") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Operation) MarshalJSON() ([]byte, error) {
type NoMethod Operation
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// OperationMetadata: Represents the metadata of the long-running
// operation.
type OperationMetadata struct {
// ApiVersion: Output only. API version used to start the operation.
ApiVersion string `json:"apiVersion,omitempty"`
// CancelRequested: Output only. Identifies whether the user has
// requested cancellation of the operation. Operations that have been
// cancelled successfully have Operation.error value with a
// google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
CancelRequested bool `json:"cancelRequested,omitempty"`
// CreateTime: Output only. The time the operation was created.
CreateTime string `json:"createTime,omitempty"`
// EndTime: Output only. The time the operation finished running.
EndTime string `json:"endTime,omitempty"`
// StatusDetail: Output only. Human-readable status of the operation, if
// any.
StatusDetail string `json:"statusDetail,omitempty"`
// Target: Output only. Server-defined resource path for the target of
// the operation.
Target string `json:"target,omitempty"`
// Verb: Output only. Name of the verb executed by the operation.
Verb string `json:"verb,omitempty"`
// ForceSendFields is a list of field names (e.g. "ApiVersion") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ApiVersion") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *OperationMetadata) MarshalJSON() ([]byte, error) {
type NoMethod OperationMetadata
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Peering: Represents a Managed Service for Microsoft Active Directory
// Peering.
type Peering struct {
// AuthorizedNetwork: Required. The full names of the Google Compute
// Engine networks (/compute/docs/networks-and-firewalls#networks) to
// which the instance is connected. Caller needs to make sure that CIDR
// subnets do not overlap between networks, else peering creation will
// fail.
AuthorizedNetwork string `json:"authorizedNetwork,omitempty"`
// CreateTime: Output only. The time the instance was created.
CreateTime string `json:"createTime,omitempty"`
// DomainResource: Required. Full domain resource path for the Managed
// AD Domain involved in peering. The resource path should be in the
// form: `projects/{project_id}/locations/global/domains/{domain_name}`
DomainResource string `json:"domainResource,omitempty"`
// Labels: Optional. Resource labels to represent user-provided
// metadata.
Labels map[string]string `json:"labels,omitempty"`
// Name: Output only. Unique name of the peering in this scope including
// projects and location using the form:
// `projects/{project_id}/locations/global/peerings/{peering_id}`.
Name string `json:"name,omitempty"`
// State: Output only. The current state of this Peering.
//
// Possible values:
// "STATE_UNSPECIFIED" - Not set.
// "CREATING" - Peering is being created.
// "CONNECTED" - Peering is connected.
// "DISCONNECTED" - Peering is disconnected.
// "DELETING" - Peering is being deleted.
State string `json:"state,omitempty"`
// StatusMessage: Output only. Additional information about the current
// status of this peering, if available.
StatusMessage string `json:"statusMessage,omitempty"`
// UpdateTime: Output only. Last update time.
UpdateTime string `json:"updateTime,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "AuthorizedNetwork")
// to unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AuthorizedNetwork") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *Peering) MarshalJSON() ([]byte, error) {
type NoMethod Peering
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Policy: An Identity and Access Management (IAM) policy, which
// specifies access controls for Google Cloud resources. A `Policy` is a
// collection of `bindings`. A `binding` binds one or more `members`, or
// principals, to a single `role`. Principals can be user accounts,
// service accounts, Google groups, and domains (such as G Suite). A
// `role` is a named list of permissions; each `role` can be an IAM
// predefined role or a user-created custom role. For some types of
// Google Cloud resources, a `binding` can also specify a `condition`,
// which is a logical expression that allows access to a resource only
// if the expression evaluates to `true`. A condition can add
// constraints based on attributes of the request, the resource, or
// both. To learn which resources support conditions in their IAM
// policies, see the IAM documentation
// (https://cloud.google.com/iam/help/conditions/resource-policies).
// **JSON example:** { "bindings": [ { "role":
// "roles/resourcemanager.organizationAdmin", "members": [
// "user:[email protected]", "group:[email protected]",
// "domain:google.com",
// "serviceAccount:[email protected]" ] }, {
// "role": "roles/resourcemanager.organizationViewer", "members": [
// "user:[email protected]" ], "condition": { "title": "expirable access",
// "description": "Does not grant access after Sep 2020", "expression":
// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ],
// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: -
// members: - user:[email protected] - group:[email protected] -
// domain:google.com -
// serviceAccount:[email protected] role:
// roles/resourcemanager.organizationAdmin - members: -
// user:[email protected] role: roles/resourcemanager.organizationViewer
// condition: title: expirable access description: Does not grant access
// after Sep 2020 expression: request.time <
// timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3
// For a description of IAM and its features, see the IAM documentation
// (https://cloud.google.com/iam/docs/).
type Policy struct {
// Bindings: Associates a list of `members`, or principals, with a
// `role`. Optionally, may specify a `condition` that determines how and
// when the `bindings` are applied. Each of the `bindings` must contain
// at least one principal. The `bindings` in a `Policy` can refer to up
// to 1,500 principals; up to 250 of these principals can be Google
// groups. Each occurrence of a principal counts towards these limits.
// For example, if the `bindings` grant 50 different roles to
// `user:[email protected]`, and not to any other principal, then you
// can add another 1,450 principals to the `bindings` in the `Policy`.
Bindings []*Binding `json:"bindings,omitempty"`
// Etag: `etag` is used for optimistic concurrency control as a way to
// help prevent simultaneous updates of a policy from overwriting each
// other. It is strongly suggested that systems make use of the `etag`
// in the read-modify-write cycle to perform policy updates in order to
// avoid race conditions: An `etag` is returned in the response to
// `getIamPolicy`, and systems are expected to put that etag in the
// request to `setIamPolicy` to ensure that their change will be applied
// to the same version of the policy. **Important:** If you use IAM
// Conditions, you must include the `etag` field whenever you call
// `setIamPolicy`. If you omit this field, then IAM allows you to
// overwrite a version `3` policy with a version `1` policy, and all of
// the conditions in the version `3` policy are lost.
Etag string `json:"etag,omitempty"`
// Version: Specifies the format of the policy. Valid values are `0`,
// `1`, and `3`. Requests that specify an invalid value are rejected.
// Any operation that affects conditional role bindings must specify
// version `3`. This requirement applies to the following operations: *
// Getting a policy that includes a conditional role binding * Adding a
// conditional role binding to a policy * Changing a conditional role
// binding in a policy * Removing any role binding, with or without a
// condition, from a policy that includes conditions **Important:** If
// you use IAM Conditions, you must include the `etag` field whenever
// you call `setIamPolicy`. If you omit this field, then IAM allows you
// to overwrite a version `3` policy with a version `1` policy, and all
// of the conditions in the version `3` policy are lost. If a policy
// does not include any conditions, operations on that policy may
// specify any valid version or leave the field unset. To learn which
// resources support conditions in their IAM policies, see the IAM
// documentation
// (https://cloud.google.com/iam/help/conditions/resource-policies).
Version int64 `json:"version,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Bindings") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Bindings") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Policy) MarshalJSON() ([]byte, error) {
type NoMethod Policy
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ReconfigureTrustRequest: Request message for ReconfigureTrust
type ReconfigureTrustRequest struct {
// TargetDnsIpAddresses: Required. The target DNS server IP addresses to
// resolve the remote domain involved in the trust.
TargetDnsIpAddresses []string `json:"targetDnsIpAddresses,omitempty"`
// TargetDomainName: Required. The fully-qualified target domain name
// which will be in trust with current domain.
TargetDomainName string `json:"targetDomainName,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "TargetDnsIpAddresses") to unconditionally include in API requests.
// By default, fields with empty or default values are omitted from API
// requests. However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "TargetDnsIpAddresses") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *ReconfigureTrustRequest) MarshalJSON() ([]byte, error) {
type NoMethod ReconfigureTrustRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ResetAdminPasswordRequest: Request message for ResetAdminPassword
type ResetAdminPasswordRequest struct {
}
// ResetAdminPasswordResponse: Response message for ResetAdminPassword
type ResetAdminPasswordResponse struct {
// Password: A random password. See admin for more information.
Password string `json:"password,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Password") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Password") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ResetAdminPasswordResponse) MarshalJSON() ([]byte, error) {
type NoMethod ResetAdminPasswordResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Schedule: Configure the schedule.
type Schedule struct {
// Day: Allows to define schedule that runs specified day of the week.
//
// Possible values:
// "DAY_OF_WEEK_UNSPECIFIED" - The day of the week is unspecified.
// "MONDAY" - Monday
// "TUESDAY" - Tuesday
// "WEDNESDAY" - Wednesday
// "THURSDAY" - Thursday
// "FRIDAY" - Friday
// "SATURDAY" - Saturday
// "SUNDAY" - Sunday
Day string `json:"day,omitempty"`
// Duration: Output only. Duration of the time window, set by service
// producer.
Duration string `json:"duration,omitempty"`
// StartTime: Time within the window to start the operations.
StartTime *TimeOfDay `json:"startTime,omitempty"`
// ForceSendFields is a list of field names (e.g. "Day") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Day") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Schedule) MarshalJSON() ([]byte, error) {
type NoMethod Schedule
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SetIamPolicyRequest: Request message for `SetIamPolicy` method.
type SetIamPolicyRequest struct {
// Policy: REQUIRED: The complete policy to be applied to the
// `resource`. The size of the policy is limited to a few 10s of KB. An
// empty policy is a valid policy but certain Cloud Platform services
// (such as Projects) might reject them.
Policy *Policy `json:"policy,omitempty"`
// ForceSendFields is a list of field names (e.g. "Policy") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Policy") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) {
type NoMethod SetIamPolicyRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SqlIntegration: Represents the SQL instance integrated with Managed
// AD.
type SqlIntegration struct {
// CreateTime: Output only. The time the SQL integration was created.
CreateTime string `json:"createTime,omitempty"`
// Name: The unique name of the SQL integration in the form of
// `projects/{project_id}/locations/global/domains/{domain_name}/sqlInteg
// rations/{sql_integration}`
Name string `json:"name,omitempty"`
// SqlInstance: The full resource name of an integrated SQL instance
SqlInstance string `json:"sqlInstance,omitempty"`
// State: Output only. The current state of the SQL integration.
//
// Possible values:
// "STATE_UNSPECIFIED" - Not Set
// "CREATING" - The SQL integration is being created.
// "DELETING" - The SQL integration is being deleted.
// "READY" - The SQL integration is ready.
State string `json:"state,omitempty"`
// UpdateTime: Output only. The time the SQL integration was updated.
UpdateTime string `json:"updateTime,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "CreateTime") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "CreateTime") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SqlIntegration) MarshalJSON() ([]byte, error) {
type NoMethod SqlIntegration
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Status: The `Status` type defines a logical error model that is
// suitable for different programming environments, including REST APIs
// and RPC APIs. It is used by gRPC (https://github.com/grpc). Each
// `Status` message contains three pieces of data: error code, error
// message, and error details. You can find out more about this error
// model and how to work with it in the API Design Guide
// (https://cloud.google.com/apis/design/errors).
type Status struct {
// Code: The status code, which should be an enum value of
// google.rpc.Code.
Code int64 `json:"code,omitempty"`
// Details: A list of messages that carry the error details. There is a
// common set of message types for APIs to use.
Details []googleapi.RawMessage `json:"details,omitempty"`
// Message: A developer-facing error message, which should be in
// English. Any user-facing error message should be localized and sent
// in the google.rpc.Status.details field, or localized by the client.
Message string `json:"message,omitempty"`
// ForceSendFields is a list of field names (e.g. "Code") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Code") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Status) MarshalJSON() ([]byte, error) {
type NoMethod Status
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TestIamPermissionsRequest: Request message for `TestIamPermissions`
// method.
type TestIamPermissionsRequest struct {
// Permissions: The set of permissions to check for the `resource`.
// Permissions with wildcards (such as '*' or 'storage.*') are not
// allowed. For more information see IAM Overview
// (https://cloud.google.com/iam/docs/overview#permissions).
Permissions []string `json:"permissions,omitempty"`
// ForceSendFields is a list of field names (e.g. "Permissions") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Permissions") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) {
type NoMethod TestIamPermissionsRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TestIamPermissionsResponse: Response message for `TestIamPermissions`
// method.
type TestIamPermissionsResponse struct {
// Permissions: A subset of `TestPermissionsRequest.permissions` that
// the caller is allowed.
Permissions []string `json:"permissions,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Permissions") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Permissions") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) {
type NoMethod TestIamPermissionsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TimeOfDay: Represents a time of day. The date and time zone are
// either not significant or are specified elsewhere. An API may choose
// to allow leap seconds. Related types are google.type.Date and
// `google.protobuf.Timestamp`.
type TimeOfDay struct {
// Hours: Hours of day in 24 hour format. Should be from 0 to 23. An API
// may choose to allow the value "24:00:00" for scenarios like business
// closing time.
Hours int64 `json:"hours,omitempty"`
// Minutes: Minutes of hour of day. Must be from 0 to 59.
Minutes int64 `json:"minutes,omitempty"`
// Nanos: Fractions of seconds in nanoseconds. Must be from 0 to
// 999,999,999.
Nanos int64 `json:"nanos,omitempty"`
// Seconds: Seconds of minutes of the time. Must normally be from 0 to
// 59. An API may allow the value 60 if it allows leap-seconds.
Seconds int64 `json:"seconds,omitempty"`
// ForceSendFields is a list of field names (e.g. "Hours") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Hours") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *TimeOfDay) MarshalJSON() ([]byte, error) {
type NoMethod TimeOfDay
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Trust: Represents a relationship between two domains. This allows a
// controller in one domain to authenticate a user in another domain. If
// the trust is being changed, it will be placed into the UPDATING
// state, which indicates that the resource is being reconciled. At this
// point, Get will reflect an intermediate state.
type Trust struct {
// CreateTime: Output only. The time the instance was created.
CreateTime string `json:"createTime,omitempty"`
// LastTrustHeartbeatTime: Output only. The last heartbeat time when the
// trust was known to be connected.
LastTrustHeartbeatTime string `json:"lastTrustHeartbeatTime,omitempty"`
// SelectiveAuthentication: Optional. The trust authentication type,
// which decides whether the trusted side has forest/domain wide access
// or selective access to an approved set of resources.
SelectiveAuthentication bool `json:"selectiveAuthentication,omitempty"`
// State: Output only. The current state of the trust.
//
// Possible values:
// "STATE_UNSPECIFIED" - Not set.
// "CREATING" - The domain trust is being created.
// "UPDATING" - The domain trust is being updated.
// "DELETING" - The domain trust is being deleted.
// "CONNECTED" - The domain trust is connected.
// "DISCONNECTED" - The domain trust is disconnected.
State string `json:"state,omitempty"`
// StateDescription: Output only. Additional information about the
// current state of the trust, if available.
StateDescription string `json:"stateDescription,omitempty"`
// TargetDnsIpAddresses: Required. The target DNS server IP addresses
// which can resolve the remote domain involved in the trust.
TargetDnsIpAddresses []string `json:"targetDnsIpAddresses,omitempty"`
// TargetDomainName: Required. The fully qualified target domain name
// which will be in trust with the current domain.
TargetDomainName string `json:"targetDomainName,omitempty"`
// TrustDirection: Required. The trust direction, which decides if the
// current domain is trusted, trusting, or both.
//
// Possible values:
// "TRUST_DIRECTION_UNSPECIFIED" - Not set.
// "INBOUND" - The inbound direction represents the trusting side.
// "OUTBOUND" - The outboud direction represents the trusted side.
// "BIDIRECTIONAL" - The bidirectional direction represents the
// trusted / trusting side.
TrustDirection string `json:"trustDirection,omitempty"`
// TrustHandshakeSecret: Required. The trust secret used for the
// handshake with the target domain. This will not be stored.
TrustHandshakeSecret string `json:"trustHandshakeSecret,omitempty"`
// TrustType: Required. The type of trust represented by the trust
// resource.
//
// Possible values:
// "TRUST_TYPE_UNSPECIFIED" - Not set.
// "FOREST" - The forest trust.
// "EXTERNAL" - The external domain trust.
TrustType string `json:"trustType,omitempty"`
// UpdateTime: Output only. The last update time.
UpdateTime string `json:"updateTime,omitempty"`
// ForceSendFields is a list of field names (e.g. "CreateTime") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "CreateTime") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Trust) MarshalJSON() ([]byte, error) {
type NoMethod Trust
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// UpdatePolicy: Maintenance policy applicable to instance updates.
type UpdatePolicy struct {
// Channel: Optional. Relative scheduling channel applied to resource.
//
// Possible values:
// "UPDATE_CHANNEL_UNSPECIFIED" - Unspecified channel.
// "EARLIER" - Early channel within a customer project.
// "LATER" - Later channel within a customer project.
Channel string `json:"channel,omitempty"`
// DenyMaintenancePeriods: Deny Maintenance Period that is applied to
// resource to indicate when maintenance is forbidden. User can specify
// zero or more non-overlapping deny periods. Maximum number of
// deny_maintenance_periods expected is one.
DenyMaintenancePeriods []*DenyMaintenancePeriod `json:"denyMaintenancePeriods,omitempty"`
// Window: Optional. Maintenance window that is applied to resources
// covered by this policy.
Window *MaintenanceWindow `json:"window,omitempty"`
// ForceSendFields is a list of field names (e.g. "Channel") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Channel") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *UpdatePolicy) MarshalJSON() ([]byte, error) {
type NoMethod UpdatePolicy
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ValidateTrustRequest: Request message for ValidateTrust
type ValidateTrustRequest struct {
// Trust: Required. The domain trust to validate trust state for.
Trust *Trust `json:"trust,omitempty"`
// ForceSendFields is a list of field names (e.g. "Trust") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Trust") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ValidateTrustRequest) MarshalJSON() ([]byte, error) {
type NoMethod ValidateTrustRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// WeeklyCycle: Time window specified for weekly operations.
type WeeklyCycle struct {
// Schedule: User can specify multiple windows in a week. Minimum of 1
// window.
Schedule []*Schedule `json:"schedule,omitempty"`
// ForceSendFields is a list of field names (e.g. "Schedule") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
// non-pointer, non-interface field appearing in ForceSendFields will be
// sent to the server regardless of whether the field is empty or not.
// This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Schedule") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *WeeklyCycle) MarshalJSON() ([]byte, error) {
type NoMethod WeeklyCycle
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// method id "managedidentities.projects.locations.get":
type ProjectsLocationsGetCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets information about a location.
//
// - name: Resource name for the location.
func (r *ProjectsLocationsService) Get(name string) *ProjectsLocationsGetCall {
c := &ProjectsLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGetCall) Context(ctx context.Context) *ProjectsLocationsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.get" call.
// Exactly one of *Location or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Location.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsLocationsGetCall) Do(opts ...googleapi.CallOption) (*Location, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Location{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets information about a location.",
// "flatPath": "v1/projects/{projectsId}/locations/{locationsId}",
// "httpMethod": "GET",
// "id": "managedidentities.projects.locations.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Resource name for the location.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+name}",
// "response": {
// "$ref": "Location"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.list":
type ProjectsLocationsListCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists information about the supported locations for this
// service.
//
// - name: The resource that owns the locations collection, if
// applicable.
func (r *ProjectsLocationsService) List(name string) *ProjectsLocationsListCall {
c := &ProjectsLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Filter sets the optional parameter "filter": A filter to narrow down
// results to a preferred subset. The filtering language accepts strings
// like "displayName=tokyo", and is documented in more detail in AIP-160
// (https://google.aip.dev/160).
func (c *ProjectsLocationsListCall) Filter(filter string) *ProjectsLocationsListCall {
c.urlParams_.Set("filter", filter)
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of results to return. If not set, the service selects a default.
func (c *ProjectsLocationsListCall) PageSize(pageSize int64) *ProjectsLocationsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": A page token
// received from the `next_page_token` field in the response. Send that
// page token to receive the subsequent page.
func (c *ProjectsLocationsListCall) PageToken(pageToken string) *ProjectsLocationsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsListCall) Context(ctx context.Context) *ProjectsLocationsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}/locations")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.list" call.
// Exactly one of *ListLocationsResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListLocationsResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsLocationsListCall) Do(opts ...googleapi.CallOption) (*ListLocationsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListLocationsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists information about the supported locations for this service.",
// "flatPath": "v1/projects/{projectsId}/locations",
// "httpMethod": "GET",
// "id": "managedidentities.projects.locations.list",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "filter": {
// "description": "A filter to narrow down results to a preferred subset. The filtering language accepts strings like \"displayName=tokyo\", and is documented in more detail in [AIP-160](https://google.aip.dev/160).",
// "location": "query",
// "type": "string"
// },
// "name": {
// "description": "The resource that owns the locations collection, if applicable.",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// },
// "pageSize": {
// "description": "The maximum number of results to return. If not set, the service selects a default.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "A page token received from the `next_page_token` field in the response. Send that page token to receive the subsequent page.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v1/{+name}/locations",
// "response": {
// "$ref": "ListLocationsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsLocationsListCall) Pages(ctx context.Context, f func(*ListLocationsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "managedidentities.projects.locations.global.domains.attachTrust":
type ProjectsLocationsGlobalDomainsAttachTrustCall struct {
s *Service
name string
attachtrustrequest *AttachTrustRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// AttachTrust: Adds an AD trust to a domain.
//
// - name: The resource domain name, project name and location using the
// form:
// `projects/{project_id}/locations/global/domains/{domain_name}`.
func (r *ProjectsLocationsGlobalDomainsService) AttachTrust(name string, attachtrustrequest *AttachTrustRequest) *ProjectsLocationsGlobalDomainsAttachTrustCall {
c := &ProjectsLocationsGlobalDomainsAttachTrustCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.attachtrustrequest = attachtrustrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalDomainsAttachTrustCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalDomainsAttachTrustCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalDomainsAttachTrustCall) Context(ctx context.Context) *ProjectsLocationsGlobalDomainsAttachTrustCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalDomainsAttachTrustCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalDomainsAttachTrustCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.attachtrustrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:attachTrust")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.domains.attachTrust" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalDomainsAttachTrustCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Adds an AD trust to a domain.",
// "flatPath": "v1/projects/{projectsId}/locations/global/domains/{domainsId}:attachTrust",
// "httpMethod": "POST",
// "id": "managedidentities.projects.locations.global.domains.attachTrust",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The resource domain name, project name and location using the form: `projects/{project_id}/locations/global/domains/{domain_name}`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/domains/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+name}:attachTrust",
// "request": {
// "$ref": "AttachTrustRequest"
// },
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.domains.create":
type ProjectsLocationsGlobalDomainsCreateCall struct {
s *Service
parent string
domain *Domain
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a Microsoft AD domain.
//
// - parent: The resource project name and location using the form:
// `projects/{project_id}/locations/global`.
func (r *ProjectsLocationsGlobalDomainsService) Create(parent string, domain *Domain) *ProjectsLocationsGlobalDomainsCreateCall {
c := &ProjectsLocationsGlobalDomainsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
c.domain = domain
return c
}
// DomainName sets the optional parameter "domainName": Required. The
// fully qualified domain name. e.g. mydomain.myorganization.com, with
// the following restrictions: * Must contain only lowercase letters,
// numbers, periods and hyphens. * Must start with a letter. * Must
// contain between 2-64 characters. * Must end with a number or a
// letter. * Must not start with period. * First segment length
// (mydomain for example above) shouldn't exceed 15 chars. * The last
// segment cannot be fully numeric. * Must be unique within the customer
// project.
func (c *ProjectsLocationsGlobalDomainsCreateCall) DomainName(domainName string) *ProjectsLocationsGlobalDomainsCreateCall {
c.urlParams_.Set("domainName", domainName)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalDomainsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalDomainsCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalDomainsCreateCall) Context(ctx context.Context) *ProjectsLocationsGlobalDomainsCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalDomainsCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalDomainsCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.domain)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/domains")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.domains.create" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalDomainsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a Microsoft AD domain.",
// "flatPath": "v1/projects/{projectsId}/locations/global/domains",
// "httpMethod": "POST",
// "id": "managedidentities.projects.locations.global.domains.create",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "domainName": {
// "description": "Required. The fully qualified domain name. e.g. mydomain.myorganization.com, with the following restrictions: * Must contain only lowercase letters, numbers, periods and hyphens. * Must start with a letter. * Must contain between 2-64 characters. * Must end with a number or a letter. * Must not start with period. * First segment length (mydomain for example above) shouldn't exceed 15 chars. * The last segment cannot be fully numeric. * Must be unique within the customer project.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "Required. The resource project name and location using the form: `projects/{project_id}/locations/global`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+parent}/domains",
// "request": {
// "$ref": "Domain"
// },
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.domains.delete":
type ProjectsLocationsGlobalDomainsDeleteCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes a domain.
//
// - name: The domain resource name using the form:
// `projects/{project_id}/locations/global/domains/{domain_name}`.
func (r *ProjectsLocationsGlobalDomainsService) Delete(name string) *ProjectsLocationsGlobalDomainsDeleteCall {
c := &ProjectsLocationsGlobalDomainsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalDomainsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalDomainsDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalDomainsDeleteCall) Context(ctx context.Context) *ProjectsLocationsGlobalDomainsDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalDomainsDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalDomainsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.domains.delete" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalDomainsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes a domain.",
// "flatPath": "v1/projects/{projectsId}/locations/global/domains/{domainsId}",
// "httpMethod": "DELETE",
// "id": "managedidentities.projects.locations.global.domains.delete",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The domain resource name using the form: `projects/{project_id}/locations/global/domains/{domain_name}`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/domains/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+name}",
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.domains.detachTrust":
type ProjectsLocationsGlobalDomainsDetachTrustCall struct {
s *Service
name string
detachtrustrequest *DetachTrustRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// DetachTrust: Removes an AD trust.
//
// - name: The resource domain name, project name, and location using
// the form:
// `projects/{project_id}/locations/global/domains/{domain_name}`.
func (r *ProjectsLocationsGlobalDomainsService) DetachTrust(name string, detachtrustrequest *DetachTrustRequest) *ProjectsLocationsGlobalDomainsDetachTrustCall {
c := &ProjectsLocationsGlobalDomainsDetachTrustCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.detachtrustrequest = detachtrustrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalDomainsDetachTrustCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalDomainsDetachTrustCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalDomainsDetachTrustCall) Context(ctx context.Context) *ProjectsLocationsGlobalDomainsDetachTrustCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalDomainsDetachTrustCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalDomainsDetachTrustCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.detachtrustrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:detachTrust")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.domains.detachTrust" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalDomainsDetachTrustCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Removes an AD trust.",
// "flatPath": "v1/projects/{projectsId}/locations/global/domains/{domainsId}:detachTrust",
// "httpMethod": "POST",
// "id": "managedidentities.projects.locations.global.domains.detachTrust",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The resource domain name, project name, and location using the form: `projects/{project_id}/locations/global/domains/{domain_name}`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/domains/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+name}:detachTrust",
// "request": {
// "$ref": "DetachTrustRequest"
// },
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.domains.get":
type ProjectsLocationsGlobalDomainsGetCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets information about a domain.
//
// - name: The domain resource name using the form:
// `projects/{project_id}/locations/global/domains/{domain_name}`.
func (r *ProjectsLocationsGlobalDomainsService) Get(name string) *ProjectsLocationsGlobalDomainsGetCall {
c := &ProjectsLocationsGlobalDomainsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalDomainsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalDomainsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsGlobalDomainsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsGlobalDomainsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalDomainsGetCall) Context(ctx context.Context) *ProjectsLocationsGlobalDomainsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalDomainsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalDomainsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.domains.get" call.
// Exactly one of *Domain or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Domain.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsGlobalDomainsGetCall) Do(opts ...googleapi.CallOption) (*Domain, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Domain{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets information about a domain.",
// "flatPath": "v1/projects/{projectsId}/locations/global/domains/{domainsId}",
// "httpMethod": "GET",
// "id": "managedidentities.projects.locations.global.domains.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The domain resource name using the form: `projects/{project_id}/locations/global/domains/{domain_name}`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/domains/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+name}",
// "response": {
// "$ref": "Domain"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.domains.getIamPolicy":
type ProjectsLocationsGlobalDomainsGetIamPolicyCall struct {
s *Service
resource string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// GetIamPolicy: Gets the access control policy for a resource. Returns
// an empty policy if the resource exists and does not have a policy
// set.
//
// - resource: REQUIRED: The resource for which the policy is being
// requested. See the operation documentation for the appropriate
// value for this field.
func (r *ProjectsLocationsGlobalDomainsService) GetIamPolicy(resource string) *ProjectsLocationsGlobalDomainsGetIamPolicyCall {
c := &ProjectsLocationsGlobalDomainsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.resource = resource
return c
}
// OptionsRequestedPolicyVersion sets the optional parameter
// "options.requestedPolicyVersion": The maximum policy version that
// will be used to format the policy. Valid values are 0, 1, and 3.
// Requests specifying an invalid value will be rejected. Requests for
// policies with any conditional role bindings must specify version 3.
// Policies with no conditional role bindings may specify any valid
// value or leave the field unset. The policy in the response might use
// the policy version that you specified, or it might use a lower policy
// version. For example, if you specify version 3, but the policy has no
// conditional role bindings, the response uses version 1. To learn
// which resources support conditions in their IAM policies, see the IAM
// documentation
// (https://cloud.google.com/iam/help/conditions/resource-policies).
func (c *ProjectsLocationsGlobalDomainsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsLocationsGlobalDomainsGetIamPolicyCall {
c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalDomainsGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalDomainsGetIamPolicyCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsGlobalDomainsGetIamPolicyCall) IfNoneMatch(entityTag string) *ProjectsLocationsGlobalDomainsGetIamPolicyCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalDomainsGetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsGlobalDomainsGetIamPolicyCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalDomainsGetIamPolicyCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalDomainsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"resource": c.resource,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.domains.getIamPolicy" call.
// Exactly one of *Policy or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Policy.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsGlobalDomainsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Policy{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.",
// "flatPath": "v1/projects/{projectsId}/locations/global/domains/{domainsId}:getIamPolicy",
// "httpMethod": "GET",
// "id": "managedidentities.projects.locations.global.domains.getIamPolicy",
// "parameterOrder": [
// "resource"
// ],
// "parameters": {
// "options.requestedPolicyVersion": {
// "description": "Optional. The maximum policy version that will be used to format the policy. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional role bindings must specify version 3. Policies with no conditional role bindings may specify any valid value or leave the field unset. The policy in the response might use the policy version that you specified, or it might use a lower policy version. For example, if you specify version 3, but the policy has no conditional role bindings, the response uses version 1. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "resource": {
// "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/domains/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+resource}:getIamPolicy",
// "response": {
// "$ref": "Policy"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.domains.getLdapssettings":
type ProjectsLocationsGlobalDomainsGetLdapssettingsCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// GetLdapssettings: Gets the domain ldaps settings.
//
// - name: The domain resource name using the form:
// `projects/{project_id}/locations/global/domains/{domain_name}`.
func (r *ProjectsLocationsGlobalDomainsService) GetLdapssettings(name string) *ProjectsLocationsGlobalDomainsGetLdapssettingsCall {
c := &ProjectsLocationsGlobalDomainsGetLdapssettingsCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalDomainsGetLdapssettingsCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalDomainsGetLdapssettingsCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsGlobalDomainsGetLdapssettingsCall) IfNoneMatch(entityTag string) *ProjectsLocationsGlobalDomainsGetLdapssettingsCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalDomainsGetLdapssettingsCall) Context(ctx context.Context) *ProjectsLocationsGlobalDomainsGetLdapssettingsCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalDomainsGetLdapssettingsCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalDomainsGetLdapssettingsCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}/ldapssettings")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.domains.getLdapssettings" call.
// Exactly one of *LDAPSSettings or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *LDAPSSettings.ServerResponse.Header or (if a response was returned
// at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalDomainsGetLdapssettingsCall) Do(opts ...googleapi.CallOption) (*LDAPSSettings, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &LDAPSSettings{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets the domain ldaps settings.",
// "flatPath": "v1/projects/{projectsId}/locations/global/domains/{domainsId}/ldapssettings",
// "httpMethod": "GET",
// "id": "managedidentities.projects.locations.global.domains.getLdapssettings",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The domain resource name using the form: `projects/{project_id}/locations/global/domains/{domain_name}`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/domains/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+name}/ldapssettings",
// "response": {
// "$ref": "LDAPSSettings"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.domains.list":
type ProjectsLocationsGlobalDomainsListCall struct {
s *Service
parent string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists domains in a project.
//
// - parent: The resource name of the domain location using the form:
// `projects/{project_id}/locations/global`.
func (r *ProjectsLocationsGlobalDomainsService) List(parent string) *ProjectsLocationsGlobalDomainsListCall {
c := &ProjectsLocationsGlobalDomainsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
return c
}
// Filter sets the optional parameter "filter": A filter specifying
// constraints of a list operation. For example,
// `Domain.fqdn="mydomain.myorginization".
func (c *ProjectsLocationsGlobalDomainsListCall) Filter(filter string) *ProjectsLocationsGlobalDomainsListCall {
c.urlParams_.Set("filter", filter)
return c
}
// OrderBy sets the optional parameter "orderBy": Specifies the ordering
// of results. See Sorting order
// (https://cloud.google.com/apis/design/design_patterns#sorting_order)
// for more information.
func (c *ProjectsLocationsGlobalDomainsListCall) OrderBy(orderBy string) *ProjectsLocationsGlobalDomainsListCall {
c.urlParams_.Set("orderBy", orderBy)
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of items to return. If not specified, a default value of 1000 will be
// used. Regardless of the page_size value, the response may include a
// partial list. Callers should rely on a response's next_page_token to
// determine if there are additional results to list.
func (c *ProjectsLocationsGlobalDomainsListCall) PageSize(pageSize int64) *ProjectsLocationsGlobalDomainsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": The
// `next_page_token` value returned from a previous ListDomainsRequest
// request, if any.
func (c *ProjectsLocationsGlobalDomainsListCall) PageToken(pageToken string) *ProjectsLocationsGlobalDomainsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalDomainsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalDomainsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsGlobalDomainsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsGlobalDomainsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalDomainsListCall) Context(ctx context.Context) *ProjectsLocationsGlobalDomainsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalDomainsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalDomainsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/domains")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.domains.list" call.
// Exactly one of *ListDomainsResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListDomainsResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalDomainsListCall) Do(opts ...googleapi.CallOption) (*ListDomainsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListDomainsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists domains in a project.",
// "flatPath": "v1/projects/{projectsId}/locations/global/domains",
// "httpMethod": "GET",
// "id": "managedidentities.projects.locations.global.domains.list",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "filter": {
// "description": "Optional. A filter specifying constraints of a list operation. For example, `Domain.fqdn=\"mydomain.myorginization\"`.",
// "location": "query",
// "type": "string"
// },
// "orderBy": {
// "description": "Optional. Specifies the ordering of results. See [Sorting order](https://cloud.google.com/apis/design/design_patterns#sorting_order) for more information.",
// "location": "query",
// "type": "string"
// },
// "pageSize": {
// "description": "Optional. The maximum number of items to return. If not specified, a default value of 1000 will be used. Regardless of the page_size value, the response may include a partial list. Callers should rely on a response's next_page_token to determine if there are additional results to list.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "Optional. The `next_page_token` value returned from a previous ListDomainsRequest request, if any.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "Required. The resource name of the domain location using the form: `projects/{project_id}/locations/global`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+parent}/domains",
// "response": {
// "$ref": "ListDomainsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsLocationsGlobalDomainsListCall) Pages(ctx context.Context, f func(*ListDomainsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "managedidentities.projects.locations.global.domains.patch":
type ProjectsLocationsGlobalDomainsPatchCall struct {
s *Service
name string
domain *Domain
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Patch: Updates the metadata and configuration of a domain.
//
// - name: The unique name of the domain using the form:
// `projects/{project_id}/locations/global/domains/{domain_name}`.
func (r *ProjectsLocationsGlobalDomainsService) Patch(name string, domain *Domain) *ProjectsLocationsGlobalDomainsPatchCall {
c := &ProjectsLocationsGlobalDomainsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.domain = domain
return c
}
// UpdateMask sets the optional parameter "updateMask": Required. Mask
// of fields to update. At least one path must be supplied in this
// field. The elements of the repeated paths field may only include
// fields from Domain: * `labels` * `locations` * `authorized_networks`
// * `audit_logs_enabled`
func (c *ProjectsLocationsGlobalDomainsPatchCall) UpdateMask(updateMask string) *ProjectsLocationsGlobalDomainsPatchCall {
c.urlParams_.Set("updateMask", updateMask)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalDomainsPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalDomainsPatchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalDomainsPatchCall) Context(ctx context.Context) *ProjectsLocationsGlobalDomainsPatchCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalDomainsPatchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalDomainsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.domain)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.domains.patch" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalDomainsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates the metadata and configuration of a domain.",
// "flatPath": "v1/projects/{projectsId}/locations/global/domains/{domainsId}",
// "httpMethod": "PATCH",
// "id": "managedidentities.projects.locations.global.domains.patch",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The unique name of the domain using the form: `projects/{project_id}/locations/global/domains/{domain_name}`.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/domains/[^/]+$",
// "required": true,
// "type": "string"
// },
// "updateMask": {
// "description": "Required. Mask of fields to update. At least one path must be supplied in this field. The elements of the repeated paths field may only include fields from Domain: * `labels` * `locations` * `authorized_networks` * `audit_logs_enabled`",
// "format": "google-fieldmask",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v1/{+name}",
// "request": {
// "$ref": "Domain"
// },
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.domains.reconfigureTrust":
type ProjectsLocationsGlobalDomainsReconfigureTrustCall struct {
s *Service
name string
reconfiguretrustrequest *ReconfigureTrustRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// ReconfigureTrust: Updates the DNS conditional forwarder.
//
// - name: The resource domain name, project name and location using the
// form:
// `projects/{project_id}/locations/global/domains/{domain_name}`.
func (r *ProjectsLocationsGlobalDomainsService) ReconfigureTrust(name string, reconfiguretrustrequest *ReconfigureTrustRequest) *ProjectsLocationsGlobalDomainsReconfigureTrustCall {
c := &ProjectsLocationsGlobalDomainsReconfigureTrustCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.reconfiguretrustrequest = reconfiguretrustrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalDomainsReconfigureTrustCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalDomainsReconfigureTrustCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalDomainsReconfigureTrustCall) Context(ctx context.Context) *ProjectsLocationsGlobalDomainsReconfigureTrustCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalDomainsReconfigureTrustCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalDomainsReconfigureTrustCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.reconfiguretrustrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:reconfigureTrust")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.domains.reconfigureTrust" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalDomainsReconfigureTrustCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates the DNS conditional forwarder.",
// "flatPath": "v1/projects/{projectsId}/locations/global/domains/{domainsId}:reconfigureTrust",
// "httpMethod": "POST",
// "id": "managedidentities.projects.locations.global.domains.reconfigureTrust",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The resource domain name, project name and location using the form: `projects/{project_id}/locations/global/domains/{domain_name}`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/domains/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+name}:reconfigureTrust",
// "request": {
// "$ref": "ReconfigureTrustRequest"
// },
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.domains.resetAdminPassword":
type ProjectsLocationsGlobalDomainsResetAdminPasswordCall struct {
s *Service
name string
resetadminpasswordrequest *ResetAdminPasswordRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// ResetAdminPassword: Resets a domain's administrator password.
//
// - name: The domain resource name using the form:
// `projects/{project_id}/locations/global/domains/{domain_name}`.
func (r *ProjectsLocationsGlobalDomainsService) ResetAdminPassword(name string, resetadminpasswordrequest *ResetAdminPasswordRequest) *ProjectsLocationsGlobalDomainsResetAdminPasswordCall {
c := &ProjectsLocationsGlobalDomainsResetAdminPasswordCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.resetadminpasswordrequest = resetadminpasswordrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalDomainsResetAdminPasswordCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalDomainsResetAdminPasswordCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalDomainsResetAdminPasswordCall) Context(ctx context.Context) *ProjectsLocationsGlobalDomainsResetAdminPasswordCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalDomainsResetAdminPasswordCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalDomainsResetAdminPasswordCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.resetadminpasswordrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:resetAdminPassword")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.domains.resetAdminPassword" call.
// Exactly one of *ResetAdminPasswordResponse or error will be non-nil.
// Any non-2xx status code is an error. Response headers are in either
// *ResetAdminPasswordResponse.ServerResponse.Header or (if a response
// was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalDomainsResetAdminPasswordCall) Do(opts ...googleapi.CallOption) (*ResetAdminPasswordResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ResetAdminPasswordResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Resets a domain's administrator password.",
// "flatPath": "v1/projects/{projectsId}/locations/global/domains/{domainsId}:resetAdminPassword",
// "httpMethod": "POST",
// "id": "managedidentities.projects.locations.global.domains.resetAdminPassword",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The domain resource name using the form: `projects/{project_id}/locations/global/domains/{domain_name}`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/domains/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+name}:resetAdminPassword",
// "request": {
// "$ref": "ResetAdminPasswordRequest"
// },
// "response": {
// "$ref": "ResetAdminPasswordResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.domains.setIamPolicy":
type ProjectsLocationsGlobalDomainsSetIamPolicyCall struct {
s *Service
resource string
setiampolicyrequest *SetIamPolicyRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// SetIamPolicy: Sets the access control policy on the specified
// resource. Replaces any existing policy. Can return `NOT_FOUND`,
// `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
//
// - resource: REQUIRED: The resource for which the policy is being
// specified. See the operation documentation for the appropriate
// value for this field.
func (r *ProjectsLocationsGlobalDomainsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsGlobalDomainsSetIamPolicyCall {
c := &ProjectsLocationsGlobalDomainsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.resource = resource
c.setiampolicyrequest = setiampolicyrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalDomainsSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalDomainsSetIamPolicyCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalDomainsSetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsGlobalDomainsSetIamPolicyCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalDomainsSetIamPolicyCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalDomainsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"resource": c.resource,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.domains.setIamPolicy" call.
// Exactly one of *Policy or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Policy.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsGlobalDomainsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Policy{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.",
// "flatPath": "v1/projects/{projectsId}/locations/global/domains/{domainsId}:setIamPolicy",
// "httpMethod": "POST",
// "id": "managedidentities.projects.locations.global.domains.setIamPolicy",
// "parameterOrder": [
// "resource"
// ],
// "parameters": {
// "resource": {
// "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/domains/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+resource}:setIamPolicy",
// "request": {
// "$ref": "SetIamPolicyRequest"
// },
// "response": {
// "$ref": "Policy"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.domains.testIamPermissions":
type ProjectsLocationsGlobalDomainsTestIamPermissionsCall struct {
s *Service
resource string
testiampermissionsrequest *TestIamPermissionsRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// TestIamPermissions: Returns permissions that a caller has on the
// specified resource. If the resource does not exist, this will return
// an empty set of permissions, not a `NOT_FOUND` error. Note: This
// operation is designed to be used for building permission-aware UIs
// and command-line tools, not for authorization checking. This
// operation may "fail open" without warning.
//
// - resource: REQUIRED: The resource for which the policy detail is
// being requested. See the operation documentation for the
// appropriate value for this field.
func (r *ProjectsLocationsGlobalDomainsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsGlobalDomainsTestIamPermissionsCall {
c := &ProjectsLocationsGlobalDomainsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.resource = resource
c.testiampermissionsrequest = testiampermissionsrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalDomainsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalDomainsTestIamPermissionsCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalDomainsTestIamPermissionsCall) Context(ctx context.Context) *ProjectsLocationsGlobalDomainsTestIamPermissionsCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalDomainsTestIamPermissionsCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalDomainsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"resource": c.resource,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.domains.testIamPermissions" call.
// Exactly one of *TestIamPermissionsResponse or error will be non-nil.
// Any non-2xx status code is an error. Response headers are in either
// *TestIamPermissionsResponse.ServerResponse.Header or (if a response
// was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalDomainsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &TestIamPermissionsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.",
// "flatPath": "v1/projects/{projectsId}/locations/global/domains/{domainsId}:testIamPermissions",
// "httpMethod": "POST",
// "id": "managedidentities.projects.locations.global.domains.testIamPermissions",
// "parameterOrder": [
// "resource"
// ],
// "parameters": {
// "resource": {
// "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/domains/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+resource}:testIamPermissions",
// "request": {
// "$ref": "TestIamPermissionsRequest"
// },
// "response": {
// "$ref": "TestIamPermissionsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.domains.updateLdapssettings":
type ProjectsLocationsGlobalDomainsUpdateLdapssettingsCall struct {
s *Service
name string
ldapssettings *LDAPSSettings
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// UpdateLdapssettings: Patches a single ldaps settings.
//
// - name: The resource name of the LDAPS settings. Uses the form:
// `projects/{project}/locations/{location}/domains/{domain}`.
func (r *ProjectsLocationsGlobalDomainsService) UpdateLdapssettings(name string, ldapssettings *LDAPSSettings) *ProjectsLocationsGlobalDomainsUpdateLdapssettingsCall {
c := &ProjectsLocationsGlobalDomainsUpdateLdapssettingsCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.ldapssettings = ldapssettings
return c
}
// UpdateMask sets the optional parameter "updateMask": Required. Mask
// of fields to update. At least one path must be supplied in this
// field. For the `FieldMask` definition, see
// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
func (c *ProjectsLocationsGlobalDomainsUpdateLdapssettingsCall) UpdateMask(updateMask string) *ProjectsLocationsGlobalDomainsUpdateLdapssettingsCall {
c.urlParams_.Set("updateMask", updateMask)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalDomainsUpdateLdapssettingsCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalDomainsUpdateLdapssettingsCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalDomainsUpdateLdapssettingsCall) Context(ctx context.Context) *ProjectsLocationsGlobalDomainsUpdateLdapssettingsCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalDomainsUpdateLdapssettingsCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalDomainsUpdateLdapssettingsCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.ldapssettings)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}/ldapssettings")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.domains.updateLdapssettings" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalDomainsUpdateLdapssettingsCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Patches a single ldaps settings.",
// "flatPath": "v1/projects/{projectsId}/locations/global/domains/{domainsId}/ldapssettings",
// "httpMethod": "PATCH",
// "id": "managedidentities.projects.locations.global.domains.updateLdapssettings",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The resource name of the LDAPS settings. Uses the form: `projects/{project}/locations/{location}/domains/{domain}`.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/domains/[^/]+$",
// "required": true,
// "type": "string"
// },
// "updateMask": {
// "description": "Required. Mask of fields to update. At least one path must be supplied in this field. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask",
// "format": "google-fieldmask",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v1/{+name}/ldapssettings",
// "request": {
// "$ref": "LDAPSSettings"
// },
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.domains.validateTrust":
type ProjectsLocationsGlobalDomainsValidateTrustCall struct {
s *Service
name string
validatetrustrequest *ValidateTrustRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// ValidateTrust: Validates a trust state, that the target domain is
// reachable, and that the target domain is able to accept incoming
// trust requests.
//
// - name: The resource domain name, project name, and location using
// the form:
// `projects/{project_id}/locations/global/domains/{domain_name}`.
func (r *ProjectsLocationsGlobalDomainsService) ValidateTrust(name string, validatetrustrequest *ValidateTrustRequest) *ProjectsLocationsGlobalDomainsValidateTrustCall {
c := &ProjectsLocationsGlobalDomainsValidateTrustCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.validatetrustrequest = validatetrustrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalDomainsValidateTrustCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalDomainsValidateTrustCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalDomainsValidateTrustCall) Context(ctx context.Context) *ProjectsLocationsGlobalDomainsValidateTrustCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalDomainsValidateTrustCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalDomainsValidateTrustCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.validatetrustrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:validateTrust")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.domains.validateTrust" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalDomainsValidateTrustCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Validates a trust state, that the target domain is reachable, and that the target domain is able to accept incoming trust requests.",
// "flatPath": "v1/projects/{projectsId}/locations/global/domains/{domainsId}:validateTrust",
// "httpMethod": "POST",
// "id": "managedidentities.projects.locations.global.domains.validateTrust",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The resource domain name, project name, and location using the form: `projects/{project_id}/locations/global/domains/{domain_name}`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/domains/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+name}:validateTrust",
// "request": {
// "$ref": "ValidateTrustRequest"
// },
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.domains.sqlIntegrations.get":
type ProjectsLocationsGlobalDomainsSqlIntegrationsGetCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets details of a single sqlIntegration.
//
// - name: SQLIntegration resource name using the form:
// `projects/{project_id}/locations/global/domains/{domain}/sqlIntegrat
// ions/{name}`.
func (r *ProjectsLocationsGlobalDomainsSqlIntegrationsService) Get(name string) *ProjectsLocationsGlobalDomainsSqlIntegrationsGetCall {
c := &ProjectsLocationsGlobalDomainsSqlIntegrationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalDomainsSqlIntegrationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalDomainsSqlIntegrationsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsGlobalDomainsSqlIntegrationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsGlobalDomainsSqlIntegrationsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalDomainsSqlIntegrationsGetCall) Context(ctx context.Context) *ProjectsLocationsGlobalDomainsSqlIntegrationsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalDomainsSqlIntegrationsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalDomainsSqlIntegrationsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.domains.sqlIntegrations.get" call.
// Exactly one of *SqlIntegration or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *SqlIntegration.ServerResponse.Header or (if a response was returned
// at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalDomainsSqlIntegrationsGetCall) Do(opts ...googleapi.CallOption) (*SqlIntegration, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &SqlIntegration{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets details of a single sqlIntegration.",
// "flatPath": "v1/projects/{projectsId}/locations/global/domains/{domainsId}/sqlIntegrations/{sqlIntegrationsId}",
// "httpMethod": "GET",
// "id": "managedidentities.projects.locations.global.domains.sqlIntegrations.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. SQLIntegration resource name using the form: `projects/{project_id}/locations/global/domains/{domain}/sqlIntegrations/{name}`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/domains/[^/]+/sqlIntegrations/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+name}",
// "response": {
// "$ref": "SqlIntegration"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.domains.sqlIntegrations.list":
type ProjectsLocationsGlobalDomainsSqlIntegrationsListCall struct {
s *Service
parent string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists SqlIntegrations in a given domain.
//
// - parent: The resource name of the SqlIntegrations using the form:
// `projects/{project_id}/locations/global/domains/*`.
func (r *ProjectsLocationsGlobalDomainsSqlIntegrationsService) List(parent string) *ProjectsLocationsGlobalDomainsSqlIntegrationsListCall {
c := &ProjectsLocationsGlobalDomainsSqlIntegrationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
return c
}
// Filter sets the optional parameter "filter": Filter specifying
// constraints of a list operation. For example,
// `SqlIntegration.name="sql".
func (c *ProjectsLocationsGlobalDomainsSqlIntegrationsListCall) Filter(filter string) *ProjectsLocationsGlobalDomainsSqlIntegrationsListCall {
c.urlParams_.Set("filter", filter)
return c
}
// OrderBy sets the optional parameter "orderBy": Specifies the ordering
// of results following syntax at
// https://cloud.google.com/apis/design/design_patterns#sorting_order.
func (c *ProjectsLocationsGlobalDomainsSqlIntegrationsListCall) OrderBy(orderBy string) *ProjectsLocationsGlobalDomainsSqlIntegrationsListCall {
c.urlParams_.Set("orderBy", orderBy)
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of items to return. If not specified, a default value of 1000 will be
// used by the service. Regardless of the page_size value, the response
// may include a partial list and a caller should only rely on
// response'ANIZATIONs next_page_token to determine if there are more
// instances left to be queried.
func (c *ProjectsLocationsGlobalDomainsSqlIntegrationsListCall) PageSize(pageSize int64) *ProjectsLocationsGlobalDomainsSqlIntegrationsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": The
// next_page_token value returned from a previous List request, if any.
func (c *ProjectsLocationsGlobalDomainsSqlIntegrationsListCall) PageToken(pageToken string) *ProjectsLocationsGlobalDomainsSqlIntegrationsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalDomainsSqlIntegrationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalDomainsSqlIntegrationsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsGlobalDomainsSqlIntegrationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsGlobalDomainsSqlIntegrationsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalDomainsSqlIntegrationsListCall) Context(ctx context.Context) *ProjectsLocationsGlobalDomainsSqlIntegrationsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalDomainsSqlIntegrationsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalDomainsSqlIntegrationsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/sqlIntegrations")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.domains.sqlIntegrations.list" call.
// Exactly one of *ListSqlIntegrationsResponse or error will be non-nil.
// Any non-2xx status code is an error. Response headers are in either
// *ListSqlIntegrationsResponse.ServerResponse.Header or (if a response
// was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalDomainsSqlIntegrationsListCall) Do(opts ...googleapi.CallOption) (*ListSqlIntegrationsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListSqlIntegrationsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists SqlIntegrations in a given domain.",
// "flatPath": "v1/projects/{projectsId}/locations/global/domains/{domainsId}/sqlIntegrations",
// "httpMethod": "GET",
// "id": "managedidentities.projects.locations.global.domains.sqlIntegrations.list",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "filter": {
// "description": "Optional. Filter specifying constraints of a list operation. For example, `SqlIntegration.name=\"sql\"`.",
// "location": "query",
// "type": "string"
// },
// "orderBy": {
// "description": "Optional. Specifies the ordering of results following syntax at https://cloud.google.com/apis/design/design_patterns#sorting_order.",
// "location": "query",
// "type": "string"
// },
// "pageSize": {
// "description": "Optional. The maximum number of items to return. If not specified, a default value of 1000 will be used by the service. Regardless of the page_size value, the response may include a partial list and a caller should only rely on response'ANIZATIONs next_page_token to determine if there are more instances left to be queried.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "Optional. The next_page_token value returned from a previous List request, if any.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "Required. The resource name of the SqlIntegrations using the form: `projects/{project_id}/locations/global/domains/*`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/domains/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+parent}/sqlIntegrations",
// "response": {
// "$ref": "ListSqlIntegrationsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsLocationsGlobalDomainsSqlIntegrationsListCall) Pages(ctx context.Context, f func(*ListSqlIntegrationsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "managedidentities.projects.locations.global.operations.cancel":
type ProjectsLocationsGlobalOperationsCancelCall struct {
s *Service
name string
canceloperationrequest *CancelOperationRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Cancel: Starts asynchronous cancellation on a long-running operation.
// The server makes a best effort to cancel the operation, but success
// is not guaranteed. If the server doesn't support this method, it
// returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use
// Operations.GetOperation or other methods to check whether the
// cancellation succeeded or whether the operation completed despite
// cancellation. On successful cancellation, the operation is not
// deleted; instead, it becomes an operation with an Operation.error
// value with a google.rpc.Status.code of 1, corresponding to
// `Code.CANCELLED`.
//
// - name: The name of the operation resource to be cancelled.
func (r *ProjectsLocationsGlobalOperationsService) Cancel(name string, canceloperationrequest *CancelOperationRequest) *ProjectsLocationsGlobalOperationsCancelCall {
c := &ProjectsLocationsGlobalOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.canceloperationrequest = canceloperationrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalOperationsCancelCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalOperationsCancelCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalOperationsCancelCall) Context(ctx context.Context) *ProjectsLocationsGlobalOperationsCancelCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalOperationsCancelCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalOperationsCancelCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceloperationrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:cancel")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.operations.cancel" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsGlobalOperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.",
// "flatPath": "v1/projects/{projectsId}/locations/global/operations/{operationsId}:cancel",
// "httpMethod": "POST",
// "id": "managedidentities.projects.locations.global.operations.cancel",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The name of the operation resource to be cancelled.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/operations/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+name}:cancel",
// "request": {
// "$ref": "CancelOperationRequest"
// },
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.operations.delete":
type ProjectsLocationsGlobalOperationsDeleteCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes a long-running operation. This method indicates that
// the client is no longer interested in the operation result. It does
// not cancel the operation. If the server doesn't support this method,
// it returns `google.rpc.Code.UNIMPLEMENTED`.
//
// - name: The name of the operation resource to be deleted.
func (r *ProjectsLocationsGlobalOperationsService) Delete(name string) *ProjectsLocationsGlobalOperationsDeleteCall {
c := &ProjectsLocationsGlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalOperationsDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalOperationsDeleteCall) Context(ctx context.Context) *ProjectsLocationsGlobalOperationsDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalOperationsDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.operations.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsGlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.",
// "flatPath": "v1/projects/{projectsId}/locations/global/operations/{operationsId}",
// "httpMethod": "DELETE",
// "id": "managedidentities.projects.locations.global.operations.delete",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The name of the operation resource to be deleted.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/operations/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+name}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.operations.get":
type ProjectsLocationsGlobalOperationsGetCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets the latest state of a long-running operation. Clients can
// use this method to poll the operation result at intervals as
// recommended by the API service.
//
// - name: The name of the operation resource.
func (r *ProjectsLocationsGlobalOperationsService) Get(name string) *ProjectsLocationsGlobalOperationsGetCall {
c := &ProjectsLocationsGlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalOperationsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsGlobalOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsGlobalOperationsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalOperationsGetCall) Context(ctx context.Context) *ProjectsLocationsGlobalOperationsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalOperationsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.operations.get" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.",
// "flatPath": "v1/projects/{projectsId}/locations/global/operations/{operationsId}",
// "httpMethod": "GET",
// "id": "managedidentities.projects.locations.global.operations.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The name of the operation resource.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/operations/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+name}",
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.operations.list":
type ProjectsLocationsGlobalOperationsListCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists operations that match the specified filter in the
// request. If the server doesn't support this method, it returns
// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to
// override the binding to use different resource name schemes, such as
// `users/*/operations`. To override the binding, API services can add a
// binding such as "/v1/{name=users/*}/operations" to their service
// configuration. For backwards compatibility, the default name includes
// the operations collection id, however overriding users must ensure
// the name binding is the parent resource, without the operations
// collection id.
//
// - name: The name of the operation's parent resource.
func (r *ProjectsLocationsGlobalOperationsService) List(name string) *ProjectsLocationsGlobalOperationsListCall {
c := &ProjectsLocationsGlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Filter sets the optional parameter "filter": The standard list
// filter.
func (c *ProjectsLocationsGlobalOperationsListCall) Filter(filter string) *ProjectsLocationsGlobalOperationsListCall {
c.urlParams_.Set("filter", filter)
return c
}
// PageSize sets the optional parameter "pageSize": The standard list
// page size.
func (c *ProjectsLocationsGlobalOperationsListCall) PageSize(pageSize int64) *ProjectsLocationsGlobalOperationsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": The standard list
// page token.
func (c *ProjectsLocationsGlobalOperationsListCall) PageToken(pageToken string) *ProjectsLocationsGlobalOperationsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalOperationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalOperationsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsGlobalOperationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsGlobalOperationsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalOperationsListCall) Context(ctx context.Context) *ProjectsLocationsGlobalOperationsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalOperationsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalOperationsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.operations.list" call.
// Exactly one of *ListOperationsResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListOperationsResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListOperationsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.",
// "flatPath": "v1/projects/{projectsId}/locations/global/operations",
// "httpMethod": "GET",
// "id": "managedidentities.projects.locations.global.operations.list",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "filter": {
// "description": "The standard list filter.",
// "location": "query",
// "type": "string"
// },
// "name": {
// "description": "The name of the operation's parent resource.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/operations$",
// "required": true,
// "type": "string"
// },
// "pageSize": {
// "description": "The standard list page size.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "The standard list page token.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v1/{+name}",
// "response": {
// "$ref": "ListOperationsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsLocationsGlobalOperationsListCall) Pages(ctx context.Context, f func(*ListOperationsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "managedidentities.projects.locations.global.peerings.create":
type ProjectsLocationsGlobalPeeringsCreateCall struct {
s *Service
parent string
peering *Peering
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a Peering for Managed AD instance.
//
// - parent: Resource project name and location using the form:
// `projects/{project_id}/locations/global`.
func (r *ProjectsLocationsGlobalPeeringsService) Create(parent string, peering *Peering) *ProjectsLocationsGlobalPeeringsCreateCall {
c := &ProjectsLocationsGlobalPeeringsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
c.peering = peering
return c
}
// PeeringId sets the optional parameter "peeringId": Required. Peering
// Id, unique name to identify peering. It should follow the regex
// format "^(?:a-z (?:[-a-z0-9]{0,61}[a-z0-9])?)$"
func (c *ProjectsLocationsGlobalPeeringsCreateCall) PeeringId(peeringId string) *ProjectsLocationsGlobalPeeringsCreateCall {
c.urlParams_.Set("peeringId", peeringId)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalPeeringsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalPeeringsCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalPeeringsCreateCall) Context(ctx context.Context) *ProjectsLocationsGlobalPeeringsCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalPeeringsCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalPeeringsCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.peering)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/peerings")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.peerings.create" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalPeeringsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a Peering for Managed AD instance.",
// "flatPath": "v1/projects/{projectsId}/locations/global/peerings",
// "httpMethod": "POST",
// "id": "managedidentities.projects.locations.global.peerings.create",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "parent": {
// "description": "Required. Resource project name and location using the form: `projects/{project_id}/locations/global`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global$",
// "required": true,
// "type": "string"
// },
// "peeringId": {
// "description": "Required. Peering Id, unique name to identify peering. It should follow the regex format \"^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$\"",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v1/{+parent}/peerings",
// "request": {
// "$ref": "Peering"
// },
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.peerings.delete":
type ProjectsLocationsGlobalPeeringsDeleteCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes identified Peering.
//
// - name: Peering resource name using the form:
// `projects/{project_id}/locations/global/peerings/{peering_id}`.
func (r *ProjectsLocationsGlobalPeeringsService) Delete(name string) *ProjectsLocationsGlobalPeeringsDeleteCall {
c := &ProjectsLocationsGlobalPeeringsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalPeeringsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalPeeringsDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalPeeringsDeleteCall) Context(ctx context.Context) *ProjectsLocationsGlobalPeeringsDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalPeeringsDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalPeeringsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.peerings.delete" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalPeeringsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes identified Peering.",
// "flatPath": "v1/projects/{projectsId}/locations/global/peerings/{peeringsId}",
// "httpMethod": "DELETE",
// "id": "managedidentities.projects.locations.global.peerings.delete",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. Peering resource name using the form: `projects/{project_id}/locations/global/peerings/{peering_id}`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/peerings/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+name}",
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.peerings.get":
type ProjectsLocationsGlobalPeeringsGetCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets details of a single Peering.
//
// - name: Peering resource name using the form:
// `projects/{project_id}/locations/global/peerings/{peering_id}`.
func (r *ProjectsLocationsGlobalPeeringsService) Get(name string) *ProjectsLocationsGlobalPeeringsGetCall {
c := &ProjectsLocationsGlobalPeeringsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalPeeringsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalPeeringsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsGlobalPeeringsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsGlobalPeeringsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalPeeringsGetCall) Context(ctx context.Context) *ProjectsLocationsGlobalPeeringsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalPeeringsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalPeeringsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.peerings.get" call.
// Exactly one of *Peering or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Peering.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsGlobalPeeringsGetCall) Do(opts ...googleapi.CallOption) (*Peering, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Peering{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets details of a single Peering.",
// "flatPath": "v1/projects/{projectsId}/locations/global/peerings/{peeringsId}",
// "httpMethod": "GET",
// "id": "managedidentities.projects.locations.global.peerings.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. Peering resource name using the form: `projects/{project_id}/locations/global/peerings/{peering_id}`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/peerings/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+name}",
// "response": {
// "$ref": "Peering"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.peerings.getIamPolicy":
type ProjectsLocationsGlobalPeeringsGetIamPolicyCall struct {
s *Service
resource string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// GetIamPolicy: Gets the access control policy for a resource. Returns
// an empty policy if the resource exists and does not have a policy
// set.
//
// - resource: REQUIRED: The resource for which the policy is being
// requested. See the operation documentation for the appropriate
// value for this field.
func (r *ProjectsLocationsGlobalPeeringsService) GetIamPolicy(resource string) *ProjectsLocationsGlobalPeeringsGetIamPolicyCall {
c := &ProjectsLocationsGlobalPeeringsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.resource = resource
return c
}
// OptionsRequestedPolicyVersion sets the optional parameter
// "options.requestedPolicyVersion": The maximum policy version that
// will be used to format the policy. Valid values are 0, 1, and 3.
// Requests specifying an invalid value will be rejected. Requests for
// policies with any conditional role bindings must specify version 3.
// Policies with no conditional role bindings may specify any valid
// value or leave the field unset. The policy in the response might use
// the policy version that you specified, or it might use a lower policy
// version. For example, if you specify version 3, but the policy has no
// conditional role bindings, the response uses version 1. To learn
// which resources support conditions in their IAM policies, see the IAM
// documentation
// (https://cloud.google.com/iam/help/conditions/resource-policies).
func (c *ProjectsLocationsGlobalPeeringsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ProjectsLocationsGlobalPeeringsGetIamPolicyCall {
c.urlParams_.Set("options.requestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalPeeringsGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalPeeringsGetIamPolicyCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsGlobalPeeringsGetIamPolicyCall) IfNoneMatch(entityTag string) *ProjectsLocationsGlobalPeeringsGetIamPolicyCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalPeeringsGetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsGlobalPeeringsGetIamPolicyCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalPeeringsGetIamPolicyCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalPeeringsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"resource": c.resource,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.peerings.getIamPolicy" call.
// Exactly one of *Policy or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Policy.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsGlobalPeeringsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Policy{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.",
// "flatPath": "v1/projects/{projectsId}/locations/global/peerings/{peeringsId}:getIamPolicy",
// "httpMethod": "GET",
// "id": "managedidentities.projects.locations.global.peerings.getIamPolicy",
// "parameterOrder": [
// "resource"
// ],
// "parameters": {
// "options.requestedPolicyVersion": {
// "description": "Optional. The maximum policy version that will be used to format the policy. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional role bindings must specify version 3. Policies with no conditional role bindings may specify any valid value or leave the field unset. The policy in the response might use the policy version that you specified, or it might use a lower policy version. For example, if you specify version 3, but the policy has no conditional role bindings, the response uses version 1. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "resource": {
// "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/peerings/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+resource}:getIamPolicy",
// "response": {
// "$ref": "Policy"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.peerings.list":
type ProjectsLocationsGlobalPeeringsListCall struct {
s *Service
parent string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists Peerings in a given project.
//
// - parent: The resource name of the peering location using the form:
// `projects/{project_id}/locations/global`.
func (r *ProjectsLocationsGlobalPeeringsService) List(parent string) *ProjectsLocationsGlobalPeeringsListCall {
c := &ProjectsLocationsGlobalPeeringsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
return c
}
// Filter sets the optional parameter "filter": Filter specifying
// constraints of a list operation. For example,
// `peering.authorized_network="projects/myprojectid/global/networks/myne
// twork".
func (c *ProjectsLocationsGlobalPeeringsListCall) Filter(filter string) *ProjectsLocationsGlobalPeeringsListCall {
c.urlParams_.Set("filter", filter)
return c
}
// OrderBy sets the optional parameter "orderBy": Specifies the ordering
// of results following syntax at
// https://cloud.google.com/apis/design/design_patterns#sorting_order.
func (c *ProjectsLocationsGlobalPeeringsListCall) OrderBy(orderBy string) *ProjectsLocationsGlobalPeeringsListCall {
c.urlParams_.Set("orderBy", orderBy)
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of items to return. If not specified, a default value of 1000 will be
// used by the service. Regardless of the page_size value, the response
// may include a partial list and a caller should only rely on
// response's next_page_token to determine if there are more instances
// left to be queried.
func (c *ProjectsLocationsGlobalPeeringsListCall) PageSize(pageSize int64) *ProjectsLocationsGlobalPeeringsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": The
// next_page_token value returned from a previous List request, if any.
func (c *ProjectsLocationsGlobalPeeringsListCall) PageToken(pageToken string) *ProjectsLocationsGlobalPeeringsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalPeeringsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalPeeringsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsGlobalPeeringsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsGlobalPeeringsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalPeeringsListCall) Context(ctx context.Context) *ProjectsLocationsGlobalPeeringsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalPeeringsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalPeeringsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/peerings")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.peerings.list" call.
// Exactly one of *ListPeeringsResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListPeeringsResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalPeeringsListCall) Do(opts ...googleapi.CallOption) (*ListPeeringsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListPeeringsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists Peerings in a given project.",
// "flatPath": "v1/projects/{projectsId}/locations/global/peerings",
// "httpMethod": "GET",
// "id": "managedidentities.projects.locations.global.peerings.list",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "filter": {
// "description": "Optional. Filter specifying constraints of a list operation. For example, `peering.authorized_network=\"projects/myprojectid/global/networks/mynetwork\"`.",
// "location": "query",
// "type": "string"
// },
// "orderBy": {
// "description": "Optional. Specifies the ordering of results following syntax at https://cloud.google.com/apis/design/design_patterns#sorting_order.",
// "location": "query",
// "type": "string"
// },
// "pageSize": {
// "description": "Optional. The maximum number of items to return. If not specified, a default value of 1000 will be used by the service. Regardless of the page_size value, the response may include a partial list and a caller should only rely on response's next_page_token to determine if there are more instances left to be queried.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "Optional. The next_page_token value returned from a previous List request, if any.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "Required. The resource name of the peering location using the form: `projects/{project_id}/locations/global`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+parent}/peerings",
// "response": {
// "$ref": "ListPeeringsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsLocationsGlobalPeeringsListCall) Pages(ctx context.Context, f func(*ListPeeringsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "managedidentities.projects.locations.global.peerings.patch":
type ProjectsLocationsGlobalPeeringsPatchCall struct {
s *Service
name string
peering *Peering
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Patch: Updates the labels for specified Peering.
//
// - name: Output only. Unique name of the peering in this scope
// including projects and location using the form:
// `projects/{project_id}/locations/global/peerings/{peering_id}`.
func (r *ProjectsLocationsGlobalPeeringsService) Patch(name string, peering *Peering) *ProjectsLocationsGlobalPeeringsPatchCall {
c := &ProjectsLocationsGlobalPeeringsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.peering = peering
return c
}
// UpdateMask sets the optional parameter "updateMask": Required. Mask
// of fields to update. At least one path must be supplied in this
// field. The elements of the repeated paths field may only include
// these fields from Peering: * `labels`
func (c *ProjectsLocationsGlobalPeeringsPatchCall) UpdateMask(updateMask string) *ProjectsLocationsGlobalPeeringsPatchCall {
c.urlParams_.Set("updateMask", updateMask)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalPeeringsPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalPeeringsPatchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalPeeringsPatchCall) Context(ctx context.Context) *ProjectsLocationsGlobalPeeringsPatchCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalPeeringsPatchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalPeeringsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.peering)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.peerings.patch" call.
// Exactly one of *Operation or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalPeeringsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates the labels for specified Peering.",
// "flatPath": "v1/projects/{projectsId}/locations/global/peerings/{peeringsId}",
// "httpMethod": "PATCH",
// "id": "managedidentities.projects.locations.global.peerings.patch",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Output only. Unique name of the peering in this scope including projects and location using the form: `projects/{project_id}/locations/global/peerings/{peering_id}`.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/peerings/[^/]+$",
// "required": true,
// "type": "string"
// },
// "updateMask": {
// "description": "Required. Mask of fields to update. At least one path must be supplied in this field. The elements of the repeated paths field may only include these fields from Peering: * `labels`",
// "format": "google-fieldmask",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v1/{+name}",
// "request": {
// "$ref": "Peering"
// },
// "response": {
// "$ref": "Operation"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.peerings.setIamPolicy":
type ProjectsLocationsGlobalPeeringsSetIamPolicyCall struct {
s *Service
resource string
setiampolicyrequest *SetIamPolicyRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// SetIamPolicy: Sets the access control policy on the specified
// resource. Replaces any existing policy. Can return `NOT_FOUND`,
// `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
//
// - resource: REQUIRED: The resource for which the policy is being
// specified. See the operation documentation for the appropriate
// value for this field.
func (r *ProjectsLocationsGlobalPeeringsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsGlobalPeeringsSetIamPolicyCall {
c := &ProjectsLocationsGlobalPeeringsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.resource = resource
c.setiampolicyrequest = setiampolicyrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalPeeringsSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalPeeringsSetIamPolicyCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalPeeringsSetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsGlobalPeeringsSetIamPolicyCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalPeeringsSetIamPolicyCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalPeeringsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"resource": c.resource,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.peerings.setIamPolicy" call.
// Exactly one of *Policy or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Policy.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsGlobalPeeringsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Policy{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.",
// "flatPath": "v1/projects/{projectsId}/locations/global/peerings/{peeringsId}:setIamPolicy",
// "httpMethod": "POST",
// "id": "managedidentities.projects.locations.global.peerings.setIamPolicy",
// "parameterOrder": [
// "resource"
// ],
// "parameters": {
// "resource": {
// "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/peerings/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+resource}:setIamPolicy",
// "request": {
// "$ref": "SetIamPolicyRequest"
// },
// "response": {
// "$ref": "Policy"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "managedidentities.projects.locations.global.peerings.testIamPermissions":
type ProjectsLocationsGlobalPeeringsTestIamPermissionsCall struct {
s *Service
resource string
testiampermissionsrequest *TestIamPermissionsRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// TestIamPermissions: Returns permissions that a caller has on the
// specified resource. If the resource does not exist, this will return
// an empty set of permissions, not a `NOT_FOUND` error. Note: This
// operation is designed to be used for building permission-aware UIs
// and command-line tools, not for authorization checking. This
// operation may "fail open" without warning.
//
// - resource: REQUIRED: The resource for which the policy detail is
// being requested. See the operation documentation for the
// appropriate value for this field.
func (r *ProjectsLocationsGlobalPeeringsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsGlobalPeeringsTestIamPermissionsCall {
c := &ProjectsLocationsGlobalPeeringsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.resource = resource
c.testiampermissionsrequest = testiampermissionsrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGlobalPeeringsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsLocationsGlobalPeeringsTestIamPermissionsCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGlobalPeeringsTestIamPermissionsCall) Context(ctx context.Context) *ProjectsLocationsGlobalPeeringsTestIamPermissionsCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGlobalPeeringsTestIamPermissionsCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGlobalPeeringsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211114")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"resource": c.resource,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "managedidentities.projects.locations.global.peerings.testIamPermissions" call.
// Exactly one of *TestIamPermissionsResponse or error will be non-nil.
// Any non-2xx status code is an error. Response headers are in either
// *TestIamPermissionsResponse.ServerResponse.Header or (if a response
// was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsLocationsGlobalPeeringsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &TestIamPermissionsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.",
// "flatPath": "v1/projects/{projectsId}/locations/global/peerings/{peeringsId}:testIamPermissions",
// "httpMethod": "POST",
// "id": "managedidentities.projects.locations.global.peerings.testIamPermissions",
// "parameterOrder": [
// "resource"
// ],
// "parameters": {
// "resource": {
// "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/global/peerings/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v1/{+resource}:testIamPermissions",
// "request": {
// "$ref": "TestIamPermissionsRequest"
// },
// "response": {
// "$ref": "TestIamPermissionsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
|
NewProjectsLocationsGlobalDomainsSqlIntegrationsService
|
0036_alter_event_submission_type.py
|
# Generated by Django 3.2.5 on 2021-08-27 09:16
from django.db import migrations, models
class
|
(migrations.Migration):
dependencies = [
('core', '0035_alter_event_code'),
]
operations = [
migrations.AlterField(
model_name='event',
name='submission_type',
field=models.IntegerField(choices=[(1, 'Code'), (2, 'File')], default=1),
),
]
|
Migration
|
build.rs
|
#![allow(dead_code)]
#![allow(unused_imports)]
#[cfg(target_os = "windows")]
extern crate embed_resource;
use std::env;
use std::fs::File;
use std::io::Write;
#[cfg(target_os = "windows")]
fn generate_version_rc() -> String {
let output_name = "DevolutionsGateway";
let filename = format!("{}.exe", output_name);
let company_name = "Devolutions Inc.";
let legal_copyright = format!("Copyright 2020 {}", company_name);
let version_number = env::var("CARGO_PKG_VERSION").unwrap() + ".0";
let version_commas = version_number.replace(".", ",");
let file_description = output_name;
let file_version = version_number.clone();
let internal_name = filename.clone();
let original_filename = filename;
let product_name = output_name;
let product_version = version_number;
let vs_file_version = version_commas.clone();
let vs_product_version = version_commas;
let version_rc = format!(
r#"#include <winresrc.h>
VS_VERSION_INFO VERSIONINFO
FILEVERSION {vs_file_version}
PRODUCTVERSION {vs_product_version}
FILEFLAGSMASK 0x3fL
#ifdef _DEBUG
FILEFLAGS 0x1L
#else
FILEFLAGS 0x0L
#endif
FILEOS 0x40004L
FILETYPE 0x1L
FILESUBTYPE 0x0L
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "040904b0"
BEGIN
VALUE "CompanyName", "{company_name}"
VALUE "FileDescription", "{file_description}"
VALUE "FileVersion", "{file_version}"
VALUE "InternalName", "{internal_name}"
VALUE "LegalCopyright", "{legal_copyright}"
VALUE "OriginalFilename", "{original_filename}"
VALUE "ProductName", "{product_name}"
VALUE "ProductVersion", "{product_version}"
END
END
BLOCK "VarFileInfo"
BEGIN
VALUE "Translation", 0x409, 1200
END
END
"#,
vs_file_version = vs_file_version,
vs_product_version = vs_product_version,
company_name = company_name,
file_description = file_description,
file_version = file_version,
internal_name = internal_name,
legal_copyright = legal_copyright,
original_filename = original_filename,
product_name = product_name,
product_version = product_version
);
version_rc
}
fn
|
() {
#[cfg(target_os = "windows")]
{
let out_dir = env::var("OUT_DIR").unwrap();
let version_rc_file = format!("{}/version.rc", out_dir);
let version_rc_data = generate_version_rc();
let mut file = File::create(&version_rc_file).expect("cannot create version.rc file");
file.write_all(version_rc_data.as_bytes()).unwrap();
embed_resource::compile(&version_rc_file);
}
}
|
main
|
__init__.py
|
# Necessary imports. Provides library functions to ease writing tests.
from lib import prebuild, testcase, SUBMITTY_TUTORIAL_DIR
import subprocess
import os
import glob
############################################################################
# COPY THE ASSIGNMENT FROM THE SAMPLE ASSIGNMENTS DIRECTORIES
SAMPLE_ASSIGNMENT_CONFIG = SUBMITTY_TUTORIAL_DIR + "/examples/05_cpp_static_analysis/config"
SAMPLE_SUBMISSIONS = SUBMITTY_TUTORIAL_DIR + "/examples/05_cpp_static_analysis/submissions/"
@prebuild
def initialize(test):
try:
os.mkdir(os.path.join(test.testcase_path, "assignment_config"))
except OSError:
pass
try:
os.mkdir(os.path.join(test.testcase_path, "data"))
except OSError:
pass
subprocess.call(["cp",
os.path.join(SAMPLE_ASSIGNMENT_CONFIG, "config.json"),
os.path.join(test.testcase_path, "assignment_config")])
def cleanup(test):
subprocess.call(["rm"] + ["-f"] +
glob.glob(os.path.join(test.testcase_path, "data", "*cpp")))
|
@testcase
def solution(test):
cleanup(test)
subprocess.call(["cp",
os.path.join(SAMPLE_SUBMISSIONS, "solution.cpp"),
os.path.join(test.testcase_path, "data", "solution.cpp")])
test.run_compile()
test.run_run()
test.run_validator()
test.diff("grade.txt", "grade.txt_solution", "-b")
@testcase
def buggy(test):
cleanup(test)
subprocess.call(["cp",
os.path.join(SAMPLE_SUBMISSIONS, "buggy.cpp"),
os.path.join(test.testcase_path, "data", "buggy.cpp")])
test.run_compile()
test.run_run()
test.run_validator()
test.diff("grade.txt", "grade.txt_buggy", "-b")
@testcase
def buggy2(test):
cleanup(test)
subprocess.call(["cp",
os.path.join(SAMPLE_SUBMISSIONS, "buggy2.cpp"),
os.path.join(test.testcase_path, "data", "buggy2.cpp")])
test.run_compile()
test.run_run()
test.run_validator()
test.diff("grade.txt", "grade.txt_buggy2", "-b")
@testcase
def buggy3(test):
cleanup(test)
subprocess.call(["cp",
os.path.join(SAMPLE_SUBMISSIONS, "buggy3.cpp"),
os.path.join(test.testcase_path, "data", "buggy3.cpp")])
test.run_compile()
test.run_run()
test.run_validator()
test.diff("grade.txt", "grade.txt_buggy3", "-b")
| |
test.rs
|
#![crate_name = "test"]
#![allow(unstable)]
/*
* This file is part of the uutils coreutils package.
*
* (c) mahkoh (ju.orth [at] gmail [dot] com)
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate libc;
use std::collections::HashMap;
use std::ffi::CString;
use std::os::{args_as_bytes};
use std::str::{from_utf8};
static NAME: &'static str = "test";
// TODO: decide how to handle non-UTF8 input for all the utils
pub fn uumain(_: Vec<String>) -> isize {
let args = args_as_bytes();
let args: Vec<&[u8]> = args.iter().map(|a| a.as_slice()).collect();
if args.len() == 0 {
return 2;
}
let args =
if !args[0].ends_with(NAME.as_bytes()) {
&args[1..]
} else {
args.as_slice()
};
let args = match args[0] {
b"[" => match args[args.len() - 1] {
b"]" => &args[1..args.len() - 1],
_ => return 2,
},
_ => &args[1..args.len()],
};
let mut error = false;
let retval = 1 - parse_expr(args, &mut error) as isize;
if error {
2
} else {
retval
}
}
fn one(args: &[&[u8]]) -> bool {
args[0].len() > 0
}
fn two(args: &[&[u8]], error: &mut bool) -> bool {
match args[0] {
b"!" => !one(&args[1..]),
b"-b" => path(args[1], PathCondition::BlockSpecial),
b"-c" => path(args[1], PathCondition::CharacterSpecial),
b"-d" => path(args[1], PathCondition::Directory),
b"-e" => path(args[1], PathCondition::Exists),
b"-f" => path(args[1], PathCondition::Regular),
b"-g" => path(args[1], PathCondition::GroupIDFlag),
b"-h" => path(args[1], PathCondition::SymLink),
b"-L" => path(args[1], PathCondition::SymLink),
b"-n" => one(&args[1..]),
b"-p" => path(args[1], PathCondition::FIFO),
b"-r" => path(args[1], PathCondition::Readable),
b"-S" => path(args[1], PathCondition::Socket),
b"-s" => path(args[1], PathCondition::NonEmpty),
b"-t" => isatty(args[1]),
b"-u" => path(args[1], PathCondition::UserIDFlag),
b"-w" => path(args[1], PathCondition::Writable),
b"-x" => path(args[1], PathCondition::Executable),
b"-z" => !one(&args[1..]),
_ => {
*error = true;
false
}
}
}
fn three(args: &[&[u8]], error: &mut bool) -> bool {
match args[1] {
b"=" => args[0] == args[2],
b"!=" => args[0] != args[2],
b"-eq" => integers(args[0], args[2], IntegerCondition::Equal),
b"-ne" => integers(args[0], args[2], IntegerCondition::Unequal),
b"-gt" => integers(args[0], args[2], IntegerCondition::Greater),
b"-ge" => integers(args[0], args[2], IntegerCondition::GreaterEqual),
b"-lt" => integers(args[0], args[2], IntegerCondition::Less),
b"-le" => integers(args[0], args[2], IntegerCondition::LessEqual),
_ => match args[0] {
b"!" => !two(&args[1..], error),
_ => {
*error = true;
false
}
}
}
}
fn four(args: &[&[u8]], error: &mut bool) -> bool {
match args[0] {
b"!" => {
!three(&args[1..], error)
}
_ => {
*error = true;
false
}
}
}
enum IntegerCondition {
Equal,
Unequal,
Greater,
GreaterEqual,
Less,
LessEqual,
}
fn integers(a: &[u8], b: &[u8], cond: IntegerCondition) -> bool {
let (a, b): (&str, &str) = match (from_utf8(a), from_utf8(b)) {
(Ok(a), Ok(b)) => (a, b),
_ => return false,
};
let (a, b): (i64, i64) = match (a.parse(), b.parse()) {
(Some(a), Some(b)) => (a, b),
_ => return false,
};
match cond {
IntegerCondition::Equal => a == b,
IntegerCondition::Unequal => a != b,
IntegerCondition::Greater => a > b,
IntegerCondition::GreaterEqual => a >= b,
IntegerCondition::Less => a < b,
IntegerCondition::LessEqual => a <= b,
}
}
fn isatty(fd: &[u8]) -> bool {
use libc::{isatty};
from_utf8(fd).ok().and_then(|s| s.parse())
.map(|i| unsafe { isatty(i) == 1 }).unwrap_or(false)
}
fn dispatch(args: &mut &[&[u8]], error: &mut bool) -> bool {
let (val, idx) = match args.len() {
0 => {
*error = true;
(false, 0)
}
1 => (one(*args), 1),
2 => dispatch_two(args, error),
3 => dispatch_three(args, error),
_ => dispatch_four(args, error)
};
*args = &(*args)[idx..];
val
}
fn dispatch_two(args: &mut &[&[u8]], error: &mut bool) -> (bool, usize) {
let val = two(*args, error);
if *error {
*error = false;
(one(*args), 1)
} else {
(val, 2)
}
}
fn dispatch_three(args: &mut &[&[u8]], error: &mut bool) -> (bool, usize) {
let val = three(*args, error);
if *error {
*error = false;
dispatch_two(args, error)
} else {
(val, 3)
}
}
fn dispatch_four(args: &mut &[&[u8]], error: &mut bool) -> (bool, usize) {
let val = four(*args, error);
if *error {
*error = false;
dispatch_three(args, error)
} else {
(val, 4)
}
}
enum Precedence {
Unknown = 0,
Paren, // FIXME: this is useless (parentheses have not been implemented)
Or,
And,
BUnOp,
BinOp,
UnOp
}
impl Copy for Precedence {}
fn parse_expr(mut args: &[&[u8]], error: &mut bool) -> bool {
if args.len() == 0 {
false
} else {
let hashmap = setup_hashmap();
let lhs = dispatch(&mut args, error);
if args.len() > 0 {
parse_expr_helper(&hashmap, &mut args, lhs, Precedence::Unknown, error)
} else {
lhs
}
}
}
fn parse_expr_helper<'a>(hashmap: &HashMap<&'a [u8], Precedence>,
args: &mut &[&'a [u8]],
mut lhs: bool,
min_prec: Precedence,
error: &mut bool) -> bool {
let mut prec = *hashmap.get(&args[0]).unwrap_or_else(|| {
*error = true;
&min_prec
});
while !*error && args.len() > 0 && prec as usize >= min_prec as usize {
let op = args[0];
*args = &(*args)[1..];
let mut rhs = dispatch(args, error);
while args.len() > 0 {
let subprec = *hashmap.get(&args[0]).unwrap_or_else(|| {
*error = true;
&min_prec
});
if subprec as usize <= prec as usize || *error {
break;
}
rhs = parse_expr_helper(hashmap, args, rhs, subprec, error);
}
lhs = match prec {
Precedence::UnOp | Precedence::BUnOp => {
*error = true;
false
}
Precedence::And => lhs && rhs,
|
_ => unreachable!()
};
if args.len() > 0 {
prec = *hashmap.get(&args[0]).unwrap_or_else(|| {
*error = true;
&min_prec
});
}
}
lhs
}
#[inline]
fn setup_hashmap<'a>() -> HashMap<&'a [u8], Precedence> {
let mut hashmap = HashMap::<&'a [u8], Precedence>::new();
hashmap.insert(b"-b", Precedence::UnOp);
hashmap.insert(b"-c", Precedence::UnOp);
hashmap.insert(b"-d", Precedence::UnOp);
hashmap.insert(b"-e", Precedence::UnOp);
hashmap.insert(b"-f", Precedence::UnOp);
hashmap.insert(b"-g", Precedence::UnOp);
hashmap.insert(b"-h", Precedence::UnOp);
hashmap.insert(b"-L", Precedence::UnOp);
hashmap.insert(b"-n", Precedence::UnOp);
hashmap.insert(b"-p", Precedence::UnOp);
hashmap.insert(b"-r", Precedence::UnOp);
hashmap.insert(b"-S", Precedence::UnOp);
hashmap.insert(b"-s", Precedence::UnOp);
hashmap.insert(b"-t", Precedence::UnOp);
hashmap.insert(b"-u", Precedence::UnOp);
hashmap.insert(b"-w", Precedence::UnOp);
hashmap.insert(b"-x", Precedence::UnOp);
hashmap.insert(b"-z", Precedence::UnOp);
hashmap.insert(b"=", Precedence::BinOp);
hashmap.insert(b"!=", Precedence::BinOp);
hashmap.insert(b"-eq", Precedence::BinOp);
hashmap.insert(b"-ne", Precedence::BinOp);
hashmap.insert(b"-gt", Precedence::BinOp);
hashmap.insert(b"-ge", Precedence::BinOp);
hashmap.insert(b"-lt", Precedence::BinOp);
hashmap.insert(b"-le", Precedence::BinOp);
hashmap.insert(b"!", Precedence::BUnOp);
hashmap.insert(b"-a", Precedence::And);
hashmap.insert(b"-o", Precedence::Or);
hashmap.insert(b"(", Precedence::Paren);
hashmap.insert(b")", Precedence::Paren);
hashmap
}
#[derive(Eq, PartialEq)]
enum PathCondition {
BlockSpecial,
CharacterSpecial,
Directory,
Exists,
Regular,
GroupIDFlag,
SymLink,
FIFO,
Readable,
Socket,
NonEmpty,
UserIDFlag,
Writable,
Executable,
}
#[cfg(not(windows))]
fn path(path: &[u8], cond: PathCondition) -> bool {
use libc::{stat, lstat, S_IFMT, S_IFLNK, S_IFBLK, S_IFCHR, S_IFDIR, S_IFREG};
use libc::{S_IFIFO, mode_t};
static S_ISUID: mode_t = 0o4000;
static S_ISGID: mode_t = 0o2000;
static S_IFSOCK: mode_t = 0o140000;
enum Permission {
Read = 0o4,
Write = 0o2,
Execute = 0o1,
}
let perm = |&: stat: stat, p: Permission| {
use libc::{getgid, getuid};
let (uid, gid) = unsafe { (getuid(), getgid()) };
if uid == stat.st_uid {
stat.st_mode & ((p as mode_t) << 6) != 0
} else if gid == stat.st_gid {
stat.st_mode & ((p as mode_t) << 3) != 0
} else {
stat.st_mode & ((p as mode_t) << 0) != 0
}
};
let path = CString::from_slice(path);
let mut stat = unsafe { std::mem::zeroed() };
if cond == PathCondition::SymLink {
if unsafe { lstat(path.as_ptr(), &mut stat) } == 0 {
if stat.st_mode & S_IFMT == S_IFLNK {
return true;
}
}
return false;
}
if unsafe { libc::stat(path.as_ptr(), &mut stat) } != 0 {
return false;
}
let file_type = stat.st_mode & S_IFMT;
match cond {
PathCondition::BlockSpecial => file_type == S_IFBLK,
PathCondition::CharacterSpecial => file_type == S_IFCHR,
PathCondition::Directory => file_type == S_IFDIR,
PathCondition::Exists => true,
PathCondition::Regular => file_type == S_IFREG,
PathCondition::GroupIDFlag => stat.st_mode & S_ISGID != 0,
PathCondition::SymLink => true,
PathCondition::FIFO => file_type == S_IFIFO,
PathCondition::Readable => perm(stat, Permission::Read),
PathCondition::Socket => file_type == S_IFSOCK,
PathCondition::NonEmpty => stat.st_size > 0,
PathCondition::UserIDFlag => stat.st_mode & S_ISUID != 0,
PathCondition::Writable => perm(stat, Permission::Write),
PathCondition::Executable => perm(stat, Permission::Execute),
}
}
#[cfg(windows)]
fn path(path: &[u8], cond: PathCondition) -> bool {
use std::io::{TypeFile, TypeDirectory, TypeBlockSpecial, TypeNamedPipe};
use std::io::fs::{stat};
use std::path::{Path};
let path = match Path::new_opt(path) {
Some(p) => p,
None => return false,
};
let stat = match stat(&path) {
Ok(s) => s,
_ => return false,
};
match cond {
BlockSpecial => stat.kind == TypeBlockSpecial,
CharacterSpecial => false,
Directory => stat.kind == TypeDirectory,
Exists => true,
Regular => stat.kind == TypeFile,
GroupIDFlag => false,
SymLink => false,
FIFO => stat.kind == TypeNamedPipe,
Readable => false, // TODO
Socket => false, // TODO?
NonEmpty => stat.size > 0,
UserIDFlag => false,
Writable => false, // TODO
Executable => false, // TODO
}
}
|
Precedence::Or => lhs || rhs,
Precedence::BinOp => three(&[if lhs { b" " } else { b"" }, op, if rhs { b" " } else { b"" }], error),
Precedence::Paren => unimplemented!(), // TODO: implement parentheses
|
differential_evolution.py
|
import sys
import numpy as np
import scipy as sp
from robo.maximizers.base_maximizer import BaseMaximizer
class DifferentialEvolution(BaseMaximizer):
def __init__(self, objective_function, lower, upper, n_iters=20, rng=None):
"""
Parameters
----------
objective_function: acquisition function
The acquisition function which will be maximized
lower: np.ndarray (D)
Lower bounds of the input space
upper: np.ndarray (D)
Upper bounds of the input space
n_iters: int
Number of iterations
"""
self.n_iters = n_iters
super(DifferentialEvolution, self).__init__(objective_function, lower, upper, rng)
|
if np.any(np.isinf(a)):
return sys.float_info.max
return a
return _l
def maximize(self):
"""
Maximizes the given acquisition function.
Returns
-------
np.ndarray(N,D)
Point with highest acquisition value.
"""
bounds = list(zip(self.lower, self.upper))
res = sp.optimize.differential_evolution(self._acquisition_fkt_wrapper(self.objective_func),
bounds, maxiter=self.n_iters)
return np.clip(res["x"], self.lower, self.upper)
|
def _acquisition_fkt_wrapper(self, acq_f):
def _l(x):
a = -acq_f(np.array([np.clip(x, self.lower, self.upper)]))
|
rollup.config.js
|
import babel from '@rollup/plugin-babel';
import commonjs from '@rollup/plugin-commonjs';
import json from '@rollup/plugin-json';
import resolve from '@rollup/plugin-node-resolve';
import bundleSize from 'rollup-plugin-bundle-size';
import {terser} from 'rollup-plugin-terser';
export function
|
() {
return {
resolveId: module => {
if (module === 'vega' || module === 'util' || module === 'd3') {
throw new Error('Cannot import from Vega, Node Util, or D3 in Vega-Lite.');
}
return null;
}
};
}
export function debugImports() {
return {
resolveId: module => {
if (module === 'pako') {
throw new Error('Do not import pako in builds. Did you forget to remove drawDataflow?');
}
return null;
}
};
}
const extensions = ['.js', '.ts'];
const outputs = [];
for (const build of ['es5', 'es6']) {
const buildFolder = build === 'es5' ? 'build-es5' : 'build';
outputs.push({
input: 'src/index.ts',
output: [
{
file: `${buildFolder}/vega-lite.js`,
format: 'umd',
sourcemap: true,
name: 'vegaLite'
},
{
file: `${buildFolder}/vega-lite.min.js`,
format: 'umd',
sourcemap: true,
name: 'vegaLite',
plugins: [terser()]
}
],
plugins: [
disallowedImports(),
debugImports(),
resolve({browser: true, extensions}),
commonjs(),
json(),
babel({
extensions,
babelHelpers: 'bundled',
presets: [
[
'@babel/env',
{
targets: build === 'es5' ? 'defaults' : 'defaults and not IE 11'
}
],
'@babel/typescript'
]
}),
bundleSize()
],
external: ['vega']
});
}
export default outputs;
|
disallowedImports
|
location.py
|
import math
import random
import geocoder
import gpxpy.geo
from geopy import Point, distance
from s2sphere import CellId, LatLng
from .custom_exceptions import GeneralPogoException
from .util import is_float
DEFAULT_RADIUS = 70
# Wrapper for location
class Location(object):
def __init__(self, locationLookup, geo_key, api):
self.geo_key = geo_key
self.api = api
self.setLocation(locationLookup)
def __str__(self):
s = 'Coordinates: {} {} {}'.format(
self.latitude,
self.longitude,
self.altitude
)
return s
@staticmethod
def getDistance(*coords):
return gpxpy.geo.haversine_distance(*coords)
def getFortDistance(self, fort):
lat, lng ,alt = self.getCoordinates()
return self.getDistance(lat, lng, fort.latitude, fort.longitude)
def setLocation(self, search):
if len(search.split(" ")) == 2:
f, s = [i.replace(',','') for i in search.split(" ")]
# Input location is coordinates
if is_float(f) and is_float(s):
self.latitude = float(f)
self.longitude = float(s)
self.altitude = 8
return self.latitude, self.longitude, self.altitude
providers = ['google', 'osm', 'arcgis', 'freegeoip']
for p in providers:
geo = getattr(geocoder, p)(search)
if geo.lat is not None and geo.lng is not None:
elev = geocoder.elevation(geo.latlng)
self.latitude, self.longitude, self.altitude = geo.lat, geo.lng, elev.meters or 8
return self.latitude, self.longitude, self.altitude
raise GeneralPogoException("Location could not be found")
def setCoordinates(self, latitude, longitude, override=True):
self.latitude = latitude
self.longitude = longitude
self.altitude = random.randint(0,10)
self.api.set_position(latitude, longitude, self.altitude)
def getCoordinates(self):
return self.latitude, self.longitude, self.altitude
def getNeighbors(self, lat, lng):
origin = CellId.from_lat_lng(LatLng.from_degrees(lat, lng)).parent(15)
neighbors = {origin.id()}
edge_neighbors = origin.get_edge_neighbors()
surrounding_neighbors = [
edge_neighbors[0], # North neighbor
edge_neighbors[0].get_edge_neighbors()[1], # North-east neighbor
edge_neighbors[1], # East neighbor
edge_neighbors[2].get_edge_neighbors()[1], # South-east neighbor
edge_neighbors[2], # South neighbor
edge_neighbors[2].get_edge_neighbors()[3], # South-west neighbor
edge_neighbors[3], # West neighbor
edge_neighbors[0].get_edge_neighbors()[3], # North-west neighbor
]
for cell in surrounding_neighbors:
neighbors.add(cell.id())
for cell2 in cell.get_edge_neighbors():
neighbors.add(cell2.id())
return list(neighbors)
def getCells(self, lat=0, lon=0):
if not lat: lat = self.latitude
if not lon: lon = self.longitude
return self.getNeighbors(lat, lon)
def getAllSteps(self, radius=140):
start = list(self.getCoordinates()[:2])
allSteps = [start]
if radius <= DEFAULT_RADIUS:
|
distPerStep = 140
steps = math.ceil(radius/distPerStep)
lat, lon = start
origin = Point(lat, lon)
angleBetween = 60
for s in range(1, steps + 1):
for d in range(0, 360, int(angleBetween/min(s, 2))):
destination = distance.VincentyDistance(meters=s*distPerStep).destination(origin, d)
allSteps.append([destination.latitude, destination.longitude])
return allSteps
|
return allSteps
|
topology.rs
|
use super::{Element, Instance, Key, Platform};
use std::collections::{HashMap, HashSet};
use std::rc::Rc;
pub struct Topology<P>
where
P: Platform + ?Sized,
{
element: Element<P>,
/// This is a map of edges.
edges: HashMap<Key, Rc<Instance<P>>>,
}
impl<P> Topology<P>
where
P: Platform + ?Sized,
{
pub fn new(element: Element<P>) -> Topology<P> {
Topology {
element,
edges: HashMap::new(),
}
}
pub fn element(&self) -> &Element<P> {
&self.element
}
pub fn update(&mut self, element: Element<P>) -> Element<P> {
std::mem::replace(&mut self.element, element)
}
pub fn keys(&self) -> HashSet<Key> {
self.edges.keys().cloned().collect()
}
pub fn edge(&self, key: &Key) -> Option<&Rc<Instance<P>>> {
self.edges.get(key)
}
pub fn edges(&self) -> impl Iterator<Item = &Rc<Instance<P>>> {
self.edges.values()
}
pub fn add_edge(&mut self, key: Key, instance: Rc<Instance<P>>)
|
pub fn remove_edge(&mut self, key: &Key) -> Option<Rc<Instance<P>>> {
self.edges.remove(key)
}
}
|
{
self.edges.insert(key, instance);
}
|
jwt.ts
|
// This is an example of how to read a JSON Web Token from an API route
import { getToken } from "next-auth/jwt"
import type { NextApiRequest, NextApiResponse } from "next"
const secret = process.env.NEXTAUTH_SECRET
export default async (req: NextApiRequest, res: NextApiResponse) => {
const token = await getToken({ req, secret })
res.send(JSON.stringify(token, null, 2))
|
}
|
|
playlist-loader.js
|
/**
* @file playlist-loader.js
*
* A state machine that manages the loading, caching, and updating of
* M3U8 playlists.
*
*/
import resolveUrl from './resolve-url';
import {mergeOptions} from 'video.js';
import { isEnabled } from './playlist.js';
import Stream from './stream';
import m3u8 from 'm3u8-parser';
import window from 'global/window';
/**
* Returns a new array of segments that is the result of merging
* properties from an older list of segments onto an updated
* list. No properties on the updated playlist will be overridden.
*
* @param {Array} original the outdated list of segments
* @param {Array} update the updated list of segments
* @param {Number=} offset the index of the first update
* segment in the original segment list. For non-live playlists,
* this should always be zero and does not need to be
* specified. For live playlists, it should be the difference
* between the media sequence numbers in the original and updated
* playlists.
* @return a list of merged segment objects
*/
const updateSegments = function(original, update, offset) {
let result = update.slice();
let length;
let i;
offset = offset || 0;
length = Math.min(original.length, update.length + offset);
for (i = offset; i < length; i++) {
result[i - offset] = mergeOptions(original[i], result[i - offset]);
}
return result;
};
/**
* Returns a new master playlist that is the result of merging an
* updated media playlist into the original version. If the
* updated media playlist does not match any of the playlist
* entries in the original master playlist, null is returned.
*
* @param {Object} master a parsed master M3U8 object
* @param {Object} media a parsed media M3U8 object
* @return {Object} a new object that represents the original
* master playlist with the updated media playlist merged in, or
* null if the merge produced no change.
*/
const updateMaster = function(master, media) {
let changed = false;
let result = mergeOptions(master, {});
let i = master.playlists.length;
let playlist;
let segment;
let j;
while (i--) {
playlist = result.playlists[i];
if (playlist.uri === media.uri) {
// consider the playlist unchanged if the number of segments
// are equal and the media sequence number is unchanged
if (playlist.segments &&
media.segments &&
playlist.segments.length === media.segments.length &&
playlist.mediaSequence === media.mediaSequence) {
continue;
}
result.playlists[i] = mergeOptions(playlist, media);
result.playlists[media.uri] = result.playlists[i];
// if the update could overlap existing segment information,
// merge the two lists
if (playlist.segments) {
result.playlists[i].segments = updateSegments(
playlist.segments,
media.segments,
media.mediaSequence - playlist.mediaSequence
);
}
// resolve any missing segment and key URIs
j = 0;
if (result.playlists[i].segments) {
j = result.playlists[i].segments.length;
}
while (j--) {
segment = result.playlists[i].segments[j];
if (!segment.resolvedUri) {
segment.resolvedUri = resolveUrl(playlist.resolvedUri, segment.uri);
}
if (segment.key && !segment.key.resolvedUri) {
segment.key.resolvedUri = resolveUrl(playlist.resolvedUri, segment.key.uri);
}
if (segment.map && !segment.map.resolvedUri) {
segment.map.resolvedUri = resolveUrl(playlist.resolvedUri, segment.map.uri);
}
}
changed = true;
}
}
return changed ? result : null;
};
/**
* Load a playlist from a remote loacation
*
* @class PlaylistLoader
* @extends Stream
* @param {String} srcUrl the url to start with
* @param {Boolean} withCredentials the withCredentials xhr option
* @constructor
*/
const PlaylistLoader = function(srcUrl, hls, withCredentials) {
/* eslint-disable consistent-this */
let loader = this;
/* eslint-enable consistent-this */
let dispose;
let mediaUpdateTimeout;
let request;
let playlistRequestError;
let haveMetadata;
PlaylistLoader.prototype.constructor.call(this);
this.hls_ = hls;
if (!srcUrl) {
throw new Error('A non-empty playlist URL is required');
}
playlistRequestError = function(xhr, url, startingState) {
loader.setBandwidth(request || xhr);
// any in-flight request is now finished
request = null;
if (startingState) {
loader.state = startingState;
}
loader.error = {
playlist: loader.master.playlists[url],
status: xhr.status,
message: 'HLS playlist request error at URL: ' + url,
responseText: xhr.responseText,
code: (xhr.status >= 500) ? 4 : 2
};
loader.trigger('error');
};
// update the playlist loader's state in response to a new or
// updated playlist.
haveMetadata = function(xhr, url) {
let parser;
let refreshDelay;
let update;
loader.setBandwidth(request || xhr);
// any in-flight request is now finished
request = null;
loader.state = 'HAVE_METADATA';
parser = new m3u8.Parser();
parser.push(xhr.responseText);
parser.end();
parser.manifest.uri = url;
// merge this playlist into the master
update = updateMaster(loader.master, parser.manifest);
refreshDelay = (parser.manifest.targetDuration || 10) * 1000;
loader.targetDuration = parser.manifest.targetDuration;
if (update) {
loader.master = update;
loader.media_ = loader.master.playlists[parser.manifest.uri];
} else {
// if the playlist is unchanged since the last reload,
// try again after half the target duration
refreshDelay /= 2;
}
// refresh live playlists after a target duration passes
if (!loader.media().endList) {
window.clearTimeout(mediaUpdateTimeout);
mediaUpdateTimeout = window.setTimeout(function() {
loader.trigger('mediaupdatetimeout');
}, refreshDelay);
}
loader.trigger('loadedplaylist');
};
// initialize the loader state
loader.state = 'HAVE_NOTHING';
// capture the prototype dispose function
dispose = this.dispose;
/**
* Abort any outstanding work and clean up.
*/
loader.dispose = function() {
loader.stopRequest();
window.clearTimeout(mediaUpdateTimeout);
dispose.call(this);
};
loader.stopRequest = () => {
if (request) {
let oldRequest = request;
request = null;
oldRequest.onreadystatechange = null;
oldRequest.abort();
}
};
/**
* Returns the number of enabled playlists on the master playlist object
*
* @return {Number} number of eneabled playlists
*/
loader.enabledPlaylists_ = function() {
return loader.master.playlists.filter(isEnabled).length;
};
/**
* Returns whether the current playlist is the lowest rendition
*
* @return {Boolean} true if on lowest rendition
*/
loader.isLowestEnabledRendition_ = function() {
let media = loader.media();
if (!media || !media.attributes) {
return false;
}
let currentBandwidth = media.attributes.BANDWIDTH || 0;
return (loader.master.playlists.filter((playlist) => {
const enabled = isEnabled(playlist);
if (!enabled) {
return false;
}
let bandwidth = 0;
if (playlist && playlist.attributes) {
bandwidth = playlist.attributes.BANDWIDTH;
}
return bandwidth < currentBandwidth;
}).length === 0);
};
/**
* When called without any arguments, returns the currently
* active media playlist. When called with a single argument,
* triggers the playlist loader to asynchronously switch to the
* specified media playlist. Calling this method while the
* loader is in the HAVE_NOTHING causes an error to be emitted
* but otherwise has no effect.
*
* @param {Object=} playlis tthe parsed media playlist
* object to switch to
* @return {Playlist} the current loaded media
*/
loader.media = function(playlist) {
let startingState = loader.state;
let mediaChange;
// getter
if (!playlist) {
return loader.media_;
}
// setter
if (loader.state === 'HAVE_NOTHING') {
throw new Error('Cannot switch media playlist from ' + loader.state);
}
// find the playlist object if the target playlist has been
// specified by URI
if (typeof playlist === 'string') {
if (!loader.master.playlists[playlist]) {
throw new Error('Unknown playlist URI: ' + playlist);
}
playlist = loader.master.playlists[playlist];
}
mediaChange = !loader.media_ || playlist.uri !== loader.media_.uri;
// switch to fully loaded playlists immediately
if (loader.master.playlists[playlist.uri].endList) {
// abort outstanding playlist requests
if (request) {
request.onreadystatechange = null;
request.abort();
request = null;
}
loader.state = 'HAVE_METADATA';
loader.media_ = playlist;
// trigger media change if the active media has been updated
if (mediaChange) {
loader.trigger('mediachanging');
loader.trigger('mediachange');
}
return;
}
// switching to the active playlist is a no-op
if (!mediaChange) {
return;
}
loader.state = 'SWITCHING_MEDIA';
// there is already an outstanding playlist request
if (request) {
if (resolveUrl(loader.master.uri, playlist.uri) === request.url) {
// requesting to switch to the same playlist multiple times
// has no effect after the first
return;
}
request.onreadystatechange = null;
request.abort();
request = null;
}
// request the new playlist
if (this.media_) {
this.trigger('mediachanging');
}
request = this.hls_.xhr({
uri: resolveUrl(loader.master.uri, playlist.uri),
withCredentials
}, function(error, req) {
// disposed
if (!request) {
return;
}
if (error) {
return playlistRequestError(request, playlist.uri, startingState);
}
haveMetadata(req, playlist.uri);
// fire loadedmetadata the first time a media playlist is loaded
if (startingState === 'HAVE_MASTER') {
|
loader.trigger('loadedmetadata');
} else {
loader.trigger('mediachange');
}
});
};
/**
* set the bandwidth on an xhr to the bandwidth on the playlist
*/
loader.setBandwidth = function(xhr) {
loader.bandwidth = xhr.bandwidth;
};
// live playlist staleness timeout
loader.on('mediaupdatetimeout', function() {
if (loader.state !== 'HAVE_METADATA') {
// only refresh the media playlist if no other activity is going on
return;
}
loader.state = 'HAVE_CURRENT_METADATA';
request = this.hls_.xhr({
uri: resolveUrl(loader.master.uri, loader.media().uri),
withCredentials
}, function(error, req) {
// disposed
if (!request) {
return;
}
if (error) {
return playlistRequestError(request, loader.media().uri);
}
haveMetadata(request, loader.media().uri);
});
});
// setup initial sync info
loader.on('firstplay', function() {
let playlist = loader.media();
if (playlist) {
playlist.syncInfo = {
mediaSequence: playlist.mediaSequence,
time: 0
};
}
});
/**
* pause loading of the playlist
*/
loader.pause = () => {
loader.stopRequest();
window.clearTimeout(mediaUpdateTimeout);
};
/**
* start loading of the playlist
*/
loader.load = () => {
if (loader.started) {
if (!loader.media().endList) {
loader.trigger('mediaupdatetimeout');
} else {
loader.trigger('loadedplaylist');
}
} else {
loader.start();
}
};
/**
* start loading of the playlist
*/
loader.start = () => {
loader.started = true;
// request the specified URL
request = this.hls_.xhr({
uri: srcUrl,
withCredentials
}, function(error, req) {
let parser;
let playlist;
let i;
// disposed
if (!request) {
return;
}
// clear the loader's request reference
request = null;
if (error) {
loader.error = {
status: req.status,
message: 'HLS playlist request error at URL: ' + srcUrl,
responseText: req.responseText,
// MEDIA_ERR_NETWORK
code: 2
};
return loader.trigger('error');
}
parser = new m3u8.Parser();
parser.push(req.responseText);
parser.end();
loader.state = 'HAVE_MASTER';
parser.manifest.uri = srcUrl;
// loaded a master playlist
if (parser.manifest.playlists) {
loader.master = parser.manifest;
// setup by-URI lookups and resolve media playlist URIs
i = loader.master.playlists.length;
while (i--) {
playlist = loader.master.playlists[i];
loader.master.playlists[playlist.uri] = playlist;
playlist.resolvedUri = resolveUrl(loader.master.uri, playlist.uri);
}
// resolve any media group URIs
for (let groupKey in loader.master.mediaGroups.AUDIO) {
for (let labelKey in loader.master.mediaGroups.AUDIO[groupKey]) {
let alternateAudio = loader.master.mediaGroups.AUDIO[groupKey][labelKey];
if (alternateAudio.uri) {
alternateAudio.resolvedUri =
resolveUrl(loader.master.uri, alternateAudio.uri);
}
}
}
loader.trigger('loadedplaylist');
if (!request) {
// no media playlist was specifically selected so start
// from the first listed one
loader.media(parser.manifest.playlists[0]);
}
return;
}
// loaded a media playlist
// infer a master playlist if none was previously requested
loader.master = {
mediaGroups: {
'AUDIO': {},
'VIDEO': {},
'CLOSED-CAPTIONS': {},
'SUBTITLES': {}
},
uri: window.location.href,
playlists: [{
uri: srcUrl
}]
};
loader.master.playlists[srcUrl] = loader.master.playlists[0];
loader.master.playlists[0].resolvedUri = srcUrl;
haveMetadata(req, srcUrl);
return loader.trigger('loadedmetadata');
});
};
};
PlaylistLoader.prototype = new Stream();
export default PlaylistLoader;
| |
passthrough.go
|
package main
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
"strings"
"time"
"github.com/labstack/echo/v4"
log "github.com/sirupsen/logrus"
"github.com/cloudfoundry-incubator/stratos/src/jetstream/repository/interfaces"
)
// API Host Prefix to replace if the custom header is supplied
const apiPrefix = "api."
const longRunningTimeoutHeader = "x-cap-long-running"
// Timeout for long-running requests, after which we will return indicating request it still active
// to prevent hitting the 2 minute browser timeout
const longRunningRequestTimeout = 30
type PassthroughErrorStatus struct {
StatusCode int `json:"statusCode"`
Status string `json:"status"`
}
type PassthroughError struct {
Error *PassthroughErrorStatus `json:"error"`
ErrorResponse *json.RawMessage `json:"errorResponse"`
}
func getEchoURL(c echo.Context) url.URL {
log.Debug("getEchoURL")
u := c.Request().URL
// dereference so we get a copy
return *u
}
func getEchoHeaders(c echo.Context) http.Header {
log.Debug("getEchoHeaders")
h := make(http.Header)
originalHeader := c.Request().Header
for k, v := range originalHeader {
if k == "Cookie" {
continue
}
vCopy := make([]string, len(v))
copy(vCopy, v)
h[k] = vCopy
}
return h
}
func makeRequestURI(c echo.Context) *url.URL {
log.Debug("makeRequestURI")
uri := getEchoURL(c)
prefix := strings.TrimSuffix(c.Path(), "*")
uri.Path = strings.TrimPrefix(uri.Path, prefix)
return &uri
}
func
|
(c echo.Context) (string, error) {
log.Debug("getPortalUserGUID")
portalUserGUIDIntf := c.Get("user_id")
if portalUserGUIDIntf == nil {
return "", errors.New("Corrupted session")
}
return portalUserGUIDIntf.(string), nil
}
func getRequestParts(c echo.Context) (*http.Request, []byte, error) {
log.Debug("getRequestParts")
var body []byte
var err error
req := c.Request()
if bodyReader := req.Body; bodyReader != nil {
if body, err = ioutil.ReadAll(bodyReader); err != nil {
return nil, nil, errors.New("Failed to read request body")
}
}
return req, body, nil
}
func buildJSONResponse(cnsiList []string, responses map[string]*interfaces.CNSIRequest) map[string]*json.RawMessage {
log.Debug("buildJSONResponse")
jsonResponse := make(map[string]*json.RawMessage)
for _, guid := range cnsiList {
var response []byte
cnsiResponse, ok := responses[guid]
var errorStatus = &PassthroughErrorStatus{
StatusCode: -1,
}
var errorResponse []byte
switch {
case !ok:
errorStatus.StatusCode = 500
errorStatus.Status = "Request timed out"
case cnsiResponse.Error != nil:
errorStatus.StatusCode = 500
errorStatus.Status = cnsiResponse.Error.Error()
case cnsiResponse.Response != nil:
response = cnsiResponse.Response
}
// Check the HTTP Status code to make sure that it is actually a valid response
if cnsiResponse.StatusCode >= 400 {
errorStatus.Status = cnsiResponse.Status
errorStatus.StatusCode = cnsiResponse.StatusCode
if errorStatus.StatusCode <= 0 {
errorStatus.StatusCode = 500
errorStatus.Status = "Failed to proxy request"
}
// Check that the error response was valid json - convert to string otherwise
if !isValidJSON(cnsiResponse.Response) {
errorResponse = []byte(fmt.Sprintf("%q", cnsiResponse.Response))
} else {
errorResponse = cnsiResponse.Response
}
}
if errorStatus.StatusCode >= 0 {
passthroughError := &PassthroughError{
Error: errorStatus,
ErrorResponse: (*json.RawMessage)(&errorResponse),
}
res, _ := json.Marshal(passthroughError)
jsonResponse[guid] = (*json.RawMessage)(&res)
} else {
if len(response) > 0 {
jsonResponse[guid] = (*json.RawMessage)(&response)
} else {
jsonResponse[guid] = nil
}
}
}
return jsonResponse
}
// When we move to goland 1.9 we can use json.isValid()
func isValidJSON(data []byte) bool {
var res interface{}
err := json.Unmarshal(data, &res)
return err == nil
}
func (p *portalProxy) buildCNSIRequest(cnsiGUID string, userGUID string, method string, uri *url.URL, body []byte, header http.Header) (interfaces.CNSIRequest, error) {
log.Debug("buildCNSIRequest")
cnsiRequest := interfaces.CNSIRequest{
GUID: cnsiGUID,
UserGUID: userGUID,
Method: method,
Body: body,
Header: header,
}
cnsiRec, err := p.GetCNSIRecord(cnsiGUID)
if err != nil {
return cnsiRequest, err
}
cnsiRequest.URL = new(url.URL)
*cnsiRequest.URL = *cnsiRec.APIEndpoint
// The APIEndpoint might have a path already - so join the request URI to it
cnsiRequest.URL.Path = path.Join(cnsiRequest.URL.Path, uri.Path)
cnsiRequest.URL.RawQuery = uri.RawQuery
return cnsiRequest, nil
}
func (p *portalProxy) validateCNSIList(cnsiList []string) error {
log.Debug("validateCNSIList")
for _, cnsiGUID := range cnsiList {
if _, err := p.GetCNSIRecord(cnsiGUID); err != nil {
return err
}
}
return nil
}
func fwdCNSIStandardHeaders(cnsiRequest *interfaces.CNSIRequest, req *http.Request) {
log.Debug("fwdCNSIStandardHeaders")
for k, v := range cnsiRequest.Header {
switch {
// Skip these
// - "Referer" causes CF to fail with a 403
// - "Connection", "X-Cap-*" and "Cookie" are consumed by us
// - "Accept-Encoding" must be excluded otherwise the transport will expect us to handle the encoding/compression
// - X-Forwarded-* headers - these will confuse Cloud Foundry in some cases (e.g. load balancers)
case k == "Connection", k == "Cookie", k == "Referer", k == "Accept-Encoding",
strings.HasPrefix(strings.ToLower(k), "x-cap-"),
strings.HasPrefix(strings.ToLower(k), "x-forwarded-"):
// Forwarding everything else
default:
req.Header[k] = v
}
}
}
func (p *portalProxy) proxy(c echo.Context) error {
log.Debug("proxy")
responses, err := p.ProxyRequest(c, makeRequestURI(c))
if err != nil {
return err
}
return p.SendProxiedResponse(c, responses)
}
func (p *portalProxy) ProxyRequest(c echo.Context, uri *url.URL) (map[string]*interfaces.CNSIRequest, error) {
log.Debug("proxy")
cnsiList := strings.Split(c.Request().Header.Get("x-cap-cnsi-list"), ",")
shouldPassthrough := "true" == c.Request().Header.Get("x-cap-passthrough")
longRunning := "true" == c.Request().Header.Get(longRunningTimeoutHeader)
if err := p.validateCNSIList(cnsiList); err != nil {
return nil, echo.NewHTTPError(http.StatusBadRequest, err.Error())
}
header := getEchoHeaders(c)
header.Del("Cookie")
portalUserGUID, err := getPortalUserGUID(c)
if err != nil {
return nil, echo.NewHTTPError(http.StatusInternalServerError, err.Error())
}
req, body, err := getRequestParts(c)
if err != nil {
return nil, echo.NewHTTPError(http.StatusInternalServerError, err.Error())
}
if shouldPassthrough {
if len(cnsiList) > 1 {
err := errors.New("Requested passthrough to multiple CNSIs. Only single CNSI passthroughs are supported")
return nil, echo.NewHTTPError(http.StatusBadRequest, err.Error())
}
}
// Only support one endpoint for long running operation (due to way we do timeout with the response channel)
if longRunning {
if len(cnsiList) > 1 {
err := errors.New("Requested long-running proxy to multiple CNSIs. Only single CNSI is supported for long running passthrough")
return nil, echo.NewHTTPError(http.StatusBadRequest, err.Error())
}
}
// send the request to each CNSI
done := make(chan *interfaces.CNSIRequest)
for _, cnsi := range cnsiList {
cnsiRequest, buildErr := p.buildCNSIRequest(cnsi, portalUserGUID, req.Method, uri, body, header)
if buildErr != nil {
return nil, echo.NewHTTPError(http.StatusBadRequest, buildErr.Error())
}
cnsiRequest.LongRunning = longRunning
// Allow the host part of the API URL to be overridden
apiHost := c.Request().Header.Get("x-cap-api-host")
// Don't allow any '.' chars in the api name
if apiHost != "" && !strings.ContainsAny(apiHost, ".") {
// Add trailing . for when we replace
apiHost = apiHost + "."
// Override the API URL if needed
if strings.HasPrefix(cnsiRequest.URL.Host, apiPrefix) {
// Replace 'api.' prefix with supplied prefix
cnsiRequest.URL.Host = strings.Replace(cnsiRequest.URL.Host, apiPrefix, apiHost, 1)
} else {
// Add supplied prefix to the domain
cnsiRequest.URL.Host = apiHost + cnsiRequest.URL.Host
}
}
go p.doRequest(&cnsiRequest, done)
}
// Wait for all responses
responses := make(map[string]*interfaces.CNSIRequest)
if !longRunning {
for range cnsiList {
res := <-done
responses[res.GUID] = res
}
} else {
// Long running has a timeout
for range cnsiList {
select {
case res := <-done:
responses[res.GUID] = res
case <-time.After(longRunningRequestTimeout * time.Second):
// For all those that have not completed, add a timeout response
for _, id := range cnsiList {
if _, ok := responses[id]; !ok {
// Did not get a response for the endpoint
responses[id] = &interfaces.CNSIRequest{
GUID: id,
UserGUID: portalUserGUID,
Method: req.Method,
StatusCode: http.StatusAccepted,
Status: "Long Running Operation still active",
Response: makeLongRunningTimeoutError(),
Error: nil,
ResponseGUID: id,
}
}
}
break
}
}
}
return responses, nil
}
func makeLongRunningTimeoutError() []byte {
description := "Long Running Operation still active"
var errorStatus = &PassthroughErrorStatus{
StatusCode: http.StatusAccepted,
Status: description,
}
errorResponse := []byte(fmt.Sprint("{\"longRunningTimeout\": true, \"description\": \"" + description + "\", \"error_code\": \"longRunningTimeout\"}"))
passthroughError := &PassthroughError{}
passthroughError.Error = errorStatus
passthroughError.ErrorResponse = (*json.RawMessage)(&errorResponse)
res, e := json.Marshal(passthroughError)
if e != nil {
log.Errorf("makeLongRunningTimeoutError: could not marshal JSON: %+v", e)
}
return res
}
// TODO: This should be used by the function above
func (p *portalProxy) DoProxyRequest(requests []interfaces.ProxyRequestInfo) (map[string]*interfaces.CNSIRequest, error) {
log.Debug("DoProxyRequest")
// send the request to each endpoint
done := make(chan *interfaces.CNSIRequest)
for _, requestInfo := range requests {
cnsiRequest, buildErr := p.buildCNSIRequest(requestInfo.EndpointGUID, requestInfo.UserGUID, requestInfo.Method, requestInfo.URI, requestInfo.Body, requestInfo.Headers)
cnsiRequest.ResponseGUID = requestInfo.ResultGUID
if buildErr != nil {
return nil, echo.NewHTTPError(http.StatusBadRequest, buildErr.Error())
}
go p.doRequest(&cnsiRequest, done)
}
responses := make(map[string]*interfaces.CNSIRequest)
for range requests {
res := <-done
responses[res.ResponseGUID] = res
}
return responses, nil
}
// Convenience helper for a single request
func (p *portalProxy) DoProxySingleRequest(cnsiGUID, userGUID, method, requestUrl string, headers http.Header, body []byte) (*interfaces.CNSIRequest, error) {
requests := make([]interfaces.ProxyRequestInfo, 0)
proxyURL, err := url.Parse(requestUrl)
if err != nil {
return nil, err
}
req := interfaces.ProxyRequestInfo{}
req.UserGUID = userGUID
req.ResultGUID = "REQ_" + cnsiGUID
req.EndpointGUID = cnsiGUID
req.Method = method
req.URI = proxyURL
if headers != nil {
req.Headers = headers
}
if body != nil {
req.Body = body
}
requests = append(requests, req)
responses, err := p.DoProxyRequest(requests)
if err != nil {
return nil, err
}
return responses[req.ResultGUID], err
}
func (p *portalProxy) SendProxiedResponse(c echo.Context, responses map[string]*interfaces.CNSIRequest) error {
shouldPassthrough := "true" == c.Request().Header.Get("x-cap-passthrough")
var cnsiList []string
for k := range responses {
cnsiList = append(cnsiList, k)
}
if shouldPassthrough {
cnsiGUID := cnsiList[0]
res, ok := responses[cnsiGUID]
if !ok {
return echo.NewHTTPError(http.StatusRequestTimeout, "Request timed out")
}
// in passthrough mode, set the status code to that of the single response
c.Response().WriteHeader(res.StatusCode)
// we don't care if this fails
_, err := c.Response().Write(res.Response)
if err != nil {
log.Errorf("Failed to write passthrough response %v", err)
}
return nil
}
jsonResponse := buildJSONResponse(cnsiList, responses)
e := json.NewEncoder(c.Response())
err := e.Encode(jsonResponse)
if err != nil {
log.Errorf("Failed to encode JSON: %v\n%#v\n", err, jsonResponse)
}
return err
}
func (p *portalProxy) doRequest(cnsiRequest *interfaces.CNSIRequest, done chan<- *interfaces.CNSIRequest) {
log.Debugf("doRequest for URL: %s", cnsiRequest.URL.String())
var body io.Reader
var res *http.Response
var req *http.Request
var err error
if len(cnsiRequest.Body) > 0 {
body = bytes.NewReader(cnsiRequest.Body)
}
req, err = http.NewRequest(cnsiRequest.Method, cnsiRequest.URL.String(), body)
if err != nil {
cnsiRequest.Error = err
if done != nil {
done <- cnsiRequest
}
return
}
// get a cnsi token record and a cnsi record
tokenRec, _, err := p.getCNSIRequestRecords(cnsiRequest)
if err != nil {
cnsiRequest.Error = err
if done != nil {
cnsiRequest.StatusCode = 400
cnsiRequest.Status = "Unable to retrieve CNSI token record"
done <- cnsiRequest
}
return
}
// Copy original headers through, except custom portal-proxy Headers
fwdCNSIStandardHeaders(cnsiRequest, req)
// If this is a long running request, add a header which we can use at request time to change the timeout
if cnsiRequest.LongRunning {
req.Header.Set(longRunningTimeoutHeader, "true")
}
// Find the auth provider for the auth type - default ot oauthflow
authHandler := p.GetAuthProvider(tokenRec.AuthType)
if authHandler.Handler != nil {
res, err = authHandler.Handler(cnsiRequest, req)
} else {
res, err = p.DoOAuthFlowRequest(cnsiRequest, req)
}
if err != nil {
cnsiRequest.StatusCode = 500
cnsiRequest.Status = "Error proxing request"
cnsiRequest.Response = []byte(err.Error())
cnsiRequest.Error = err
} else if res.Body != nil {
cnsiRequest.StatusCode = res.StatusCode
cnsiRequest.Status = res.Status
cnsiRequest.Response, cnsiRequest.Error = ioutil.ReadAll(res.Body)
defer res.Body.Close()
}
// If Status Code >=400, log this as a warning
if cnsiRequest.StatusCode >= 400 {
var contentType = "Unknown"
var contentLength int64 = -1
if res != nil {
contentType = res.Header.Get("Content-Type")
contentLength = res.ContentLength
}
log.Warnf("Passthrough response: URL: %s, Status Code: %d, Status: %s, Content Type: %s, Length: %d",
cnsiRequest.URL.String(), cnsiRequest.StatusCode, cnsiRequest.Status, contentType, contentLength)
log.Warn(string(cnsiRequest.Response))
}
if done != nil {
done <- cnsiRequest
}
}
|
getPortalUserGUID
|
RadixSort.py
|
from PuzzleLib.Cuda.Kernels.RadixSort import backendTest
def unittest():
from PuzzleLib.Hip import Backend
backendTest(Backend)
if __name__ == "__main__":
|
unittest()
|
|
notebook_manager.go
|
package command
import (
"archive/tar"
"bytes"
"fmt"
"net/http"
"regexp"
"strings"
"text/template"
petname "github.com/dustinkirkland/golang-petname"
"github.com/labstack/echo"
"github.com/pkg/errors"
requestContext "github.com/determined-ai/determined/master/internal/context"
"github.com/determined-ai/determined/master/internal/db"
"github.com/determined-ai/determined/master/internal/resourcemanagers"
"github.com/determined-ai/determined/master/internal/sproto"
"github.com/determined-ai/determined/master/pkg/actor"
"github.com/determined-ai/determined/master/pkg/archive"
"github.com/determined-ai/determined/master/pkg/check"
"github.com/determined-ai/determined/master/pkg/etc"
"github.com/determined-ai/determined/master/pkg/model"
"github.com/determined-ai/determined/master/pkg/tasks"
"github.com/determined-ai/determined/proto/pkg/apiv1"
"github.com/determined-ai/determined/proto/pkg/notebookv1"
)
const (
jupyterDir = "/run/determined/jupyter/"
jupyterConfigDir = "/run/determined/jupyter/config"
jupyterDataDir = "/run/determined/jupyter/data"
jupyterRuntimeDir = "/run/determined/jupyter/runtime"
jupyterEntrypoint = "/run/determined/jupyter/notebook-entrypoint.sh"
// Agent ports 2600 - 3500 are split between TensorBoards, Notebooks, and Shells.
minNotebookPort = 2900
maxNotebookPort = minNotebookPort + 299
notebookConfigFile = "/run/determined/workdir/jupyter-conf.py"
notebookDefaultPage = "/run/determined/workdir/Notebook.ipynb"
)
var (
notebookEntrypoint = []string{jupyterEntrypoint}
jupyterReadyPattern = regexp.MustCompile("Jupyter Notebook .*is running at")
)
func generateNotebookDescription() (string, error) {
tmpl := "Notebook ({{.PetName}})"
t, err := template.New("").Parse(strings.TrimSpace(tmpl))
if err != nil {
return "", errors.Wrap(err, "parsing template")
}
petName := petname.Generate(model.TaskNameGeneratorWords, model.TaskNameGeneratorSep)
var buf strings.Builder
err = t.Execute(&buf, map[string]string{"PetName": petName})
if err != nil {
return "", errors.Wrap(err, "executing template")
}
return buf.String(), nil
}
func generateServiceAddress(taskID string) (string, error) {
tmpl := "/proxy/{{.TaskID}}/lab/tree/Notebook.ipynb?reset"
t, err := template.New("").Parse(strings.TrimSpace(tmpl))
if err != nil {
return "", errors.Wrap(err, "parsing template")
}
var buf strings.Builder
err = t.Execute(&buf, map[string]string{"TaskID": taskID})
if err != nil {
return "", errors.Wrap(err, "executing template")
}
return buf.String(), nil
}
func generateNotebookConfig(taskID string) ([]byte, error) {
tmpl := `
c.NotebookApp.base_url = "/proxy/{{.TaskID}}/"
c.NotebookApp.allow_origin = "*"
c.NotebookApp.trust_xheaders = True
c.NotebookApp.open_browser = False
c.NotebookApp.allow_root = True
c.NotebookApp.ip = "0.0.0.0"
c.NotebookApp.token = ""
`
t, err := template.New("").Parse(strings.TrimSpace(tmpl))
if err != nil {
return nil, errors.Wrap(err, "parsing template")
}
var buf bytes.Buffer
err = t.Execute(&buf, map[string]string{"TaskID": taskID})
if err != nil {
return nil, errors.Wrap(err, "executing template")
}
return buf.Bytes(), nil
}
type notebookManager struct {
db *db.PgDB
defaultAgentUserGroup model.AgentUserGroup
taskSpec *tasks.TaskSpec
}
// NotebookLaunchRequest describes a request to launch a new notebook.
type NotebookLaunchRequest struct {
CommandParams *CommandParams
User *model.User
}
func (n *notebookManager) processLaunchRequest(
ctx *actor.Context,
req NotebookLaunchRequest,
) (*summary, int, error) {
|
ctx.Self().System(), n.db, *req.User, req.CommandParams, &n.taskSpec.TaskContainerDefaults,
)
if err != nil {
return nil, http.StatusBadRequest, err
}
if commandReq.AgentUserGroup == nil {
commandReq.AgentUserGroup = &n.defaultAgentUserGroup
}
ctx.Log().Info("creating notebook")
notebook, err := n.newNotebook(commandReq)
if err != nil {
return nil, http.StatusInternalServerError, err
}
if err = check.Validate(notebook.config); err != nil {
return nil, http.StatusBadRequest, err
}
a, _ := ctx.ActorOf(notebook.taskID, notebook)
summaryFut := ctx.Ask(a, getSummary{})
if err := summaryFut.Error(); err != nil {
return nil, http.StatusInternalServerError, err
}
summary := summaryFut.Get().(summary)
ctx.Log().Infof("created notebook %s", a.Address().Local())
return &summary, http.StatusOK, nil
}
func (n *notebookManager) Receive(ctx *actor.Context) error {
switch msg := ctx.Message().(type) {
case *apiv1.GetNotebooksRequest:
resp := &apiv1.GetNotebooksResponse{}
for _, notebook := range ctx.AskAll(¬ebookv1.Notebook{}, ctx.Children()...).GetAll() {
resp.Notebooks = append(resp.Notebooks, notebook.(*notebookv1.Notebook))
}
ctx.Respond(resp)
case NotebookLaunchRequest:
summary, statusCode, err := n.processLaunchRequest(ctx, msg)
if err != nil || statusCode > 200 {
ctx.Respond(echo.NewHTTPError(statusCode, errors.Wrap(err, "failed to launch shell").Error()))
return nil
}
ctx.Respond(summary.ID)
case echo.Context:
n.handleAPIRequest(ctx, msg)
}
return nil
}
func (n *notebookManager) handleAPIRequest(ctx *actor.Context, apiCtx echo.Context) {
switch apiCtx.Request().Method {
case echo.GET:
userFilter := apiCtx.QueryParam("user")
ctx.Respond(apiCtx.JSON(
http.StatusOK,
ctx.AskAll(getSummary{userFilter: userFilter}, ctx.Children()...)))
case echo.POST:
var params CommandParams
if err := apiCtx.Bind(¶ms); err != nil {
respondBadRequest(ctx, err)
return
}
user := apiCtx.(*requestContext.DetContext).MustGetUser()
req := NotebookLaunchRequest{
User: &user,
CommandParams: ¶ms,
}
summary, statusCode, err := n.processLaunchRequest(ctx, req)
if err != nil || statusCode > 200 {
ctx.Respond(echo.NewHTTPError(statusCode, err.Error()))
return
}
ctx.Respond(apiCtx.JSON(http.StatusOK, summary))
default:
ctx.Respond(echo.ErrMethodNotAllowed)
}
}
func (n *notebookManager) newNotebook(req *commandRequest) (*command, error) {
config := req.Config
taskID := resourcemanagers.NewTaskID()
// Postprocess the config. Add Jupyter and configuration to the container.
// Select a random port from the range to assign to the notebook. In host
// mode, this mitigates the risk of multiple notebook processes binding
// the same port on an agent.
port := getPort(minNotebookPort, maxNotebookPort)
notebookPorts := map[string]int{"notebook": port}
portVar := fmt.Sprintf("NOTEBOOK_PORT=%d", port)
config.Environment.Ports = notebookPorts
config.Environment.EnvironmentVariables.CPU = append(
config.Environment.EnvironmentVariables.CPU, portVar)
config.Environment.EnvironmentVariables.GPU = append(
config.Environment.EnvironmentVariables.GPU, portVar)
config.Entrypoint = notebookEntrypoint
setPodSpec(&config, n.taskSpec.TaskContainerDefaults)
if config.Description == "" {
var err error
config.Description, err = generateNotebookDescription()
if err != nil {
return nil, errors.Wrap(err, "generating notebook name")
}
}
serviceAddress, err := generateServiceAddress(string(taskID))
if err != nil {
return nil, errors.Wrap(err, "generating service address")
}
notebookConfigContent, err := generateNotebookConfig(string(taskID))
if err != nil {
return nil, errors.Wrap(err, "generating notebook config")
}
return &command{
taskID: taskID,
config: config,
userFiles: req.UserFiles,
additionalFiles: archive.Archive{
req.AgentUserGroup.OwnedArchiveItem(jupyterDir, nil, 0700, tar.TypeDir),
req.AgentUserGroup.OwnedArchiveItem(jupyterConfigDir, nil, 0700, tar.TypeDir),
req.AgentUserGroup.OwnedArchiveItem(jupyterDataDir, nil, 0700, tar.TypeDir),
req.AgentUserGroup.OwnedArchiveItem(jupyterRuntimeDir, nil, 0700, tar.TypeDir),
req.AgentUserGroup.OwnedArchiveItem(
jupyterEntrypoint,
etc.MustStaticFile(etc.NotebookEntrypointResource),
0700,
tar.TypeReg,
),
req.AgentUserGroup.OwnedArchiveItem(
notebookConfigFile, notebookConfigContent, 0644, tar.TypeReg,
),
req.AgentUserGroup.OwnedArchiveItem(
notebookDefaultPage,
etc.MustStaticFile(etc.NotebookTemplateResource),
0644,
tar.TypeReg,
),
},
readinessChecks: map[string]readinessCheck{
"notebook": func(log sproto.ContainerLog) bool {
return jupyterReadyPattern.MatchString(log.String())
},
},
serviceAddress: &serviceAddress,
owner: req.Owner,
agentUserGroup: req.AgentUserGroup,
taskSpec: n.taskSpec,
}, nil
}
|
commandReq, err := parseCommandRequest(
|
blob_access_content_addressable_storage.go
|
package cas
import (
"context"
"io"
"math"
"os"
remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2"
"github.com/buildbarn/bb-storage/pkg/blobstore"
"github.com/buildbarn/bb-storage/pkg/blobstore/buffer"
"github.com/buildbarn/bb-storage/pkg/filesystem"
cas_proto "github.com/buildbarn/bb-storage/pkg/proto/cas"
"github.com/buildbarn/bb-storage/pkg/util"
"github.com/golang/protobuf/proto"
)
type blobAccessContentAddressableStorage struct {
blobAccess blobstore.BlobAccess
maximumMessageSizeBytes int
}
// NewBlobAccessContentAddressableStorage creates a
// ContentAddressableStorage that reads and writes Content Addressable
// Storage (CAS) objects from a BlobAccess based store.
func NewBlobAccessContentAddressableStorage(blobAccess blobstore.BlobAccess, maximumMessageSizeBytes int) ContentAddressableStorage {
return &blobAccessContentAddressableStorage{
blobAccess: blobAccess,
maximumMessageSizeBytes: maximumMessageSizeBytes,
}
}
func (cas *blobAccessContentAddressableStorage) getMessage(ctx context.Context, digest *util.Digest, message proto.Message) error {
data, err := cas.blobAccess.Get(ctx, digest).ToByteSlice(cas.maximumMessageSizeBytes)
if err != nil {
return err
}
return proto.Unmarshal(data, message)
}
func (cas *blobAccessContentAddressableStorage) GetAction(ctx context.Context, digest *util.Digest) (*remoteexecution.Action, error) {
var action remoteexecution.Action
if err := cas.getMessage(ctx, digest, &action); err != nil {
return nil, err
}
return &action, nil
}
func (cas *blobAccessContentAddressableStorage) GetUncachedActionResult(ctx context.Context, digest *util.Digest) (*cas_proto.UncachedActionResult, error) {
var uncachedActionResult cas_proto.UncachedActionResult
if err := cas.getMessage(ctx, digest, &uncachedActionResult); err != nil {
return nil, err
}
return &uncachedActionResult, nil
}
func (cas *blobAccessContentAddressableStorage) GetCommand(ctx context.Context, digest *util.Digest) (*remoteexecution.Command, error) {
var command remoteexecution.Command
if err := cas.getMessage(ctx, digest, &command); err != nil {
return nil, err
}
return &command, nil
}
func (cas *blobAccessContentAddressableStorage) GetDirectory(ctx context.Context, digest *util.Digest) (*remoteexecution.Directory, error) {
var directory remoteexecution.Directory
if err := cas.getMessage(ctx, digest, &directory); err != nil {
return nil, err
}
return &directory, nil
}
func (cas *blobAccessContentAddressableStorage) GetFile(ctx context.Context, digest *util.Digest, directory filesystem.Directory, name string, isExecutable bool) error {
var mode os.FileMode = 0444
if isExecutable {
mode = 0555
}
w, err := directory.OpenAppend(name, filesystem.CreateExcl(mode))
if err != nil {
return err
}
defer w.Close()
if err := cas.blobAccess.Get(ctx, digest).IntoWriter(w); err != nil {
// Ensure no traces are left behind upon failure.
directory.Remove(name)
return err
}
return nil
}
func (cas *blobAccessContentAddressableStorage) GetTree(ctx context.Context, digest *util.Digest) (*remoteexecution.Tree, error) {
var tree remoteexecution.Tree
if err := cas.getMessage(ctx, digest, &tree); err != nil {
return nil, err
}
return &tree, nil
}
func (cas *blobAccessContentAddressableStorage) putBlob(ctx context.Context, data []byte, parentDigest *util.Digest) (*util.Digest, error) {
// Compute new digest of data.
digestGenerator := parentDigest.NewDigestGenerator()
if _, err := digestGenerator.Write(data); err != nil {
return nil, err
}
digest := digestGenerator.Sum()
if err := cas.blobAccess.Put(ctx, digest, buffer.NewValidatedBufferFromByteSlice(data)); err != nil {
return nil, err
}
return digest, nil
}
func (cas *blobAccessContentAddressableStorage) putMessage(ctx context.Context, message proto.Message, parentDigest *util.Digest) (*util.Digest, error) {
data, err := proto.Marshal(message)
if err != nil {
return nil, err
}
return cas.putBlob(ctx, data, parentDigest)
}
func (cas *blobAccessContentAddressableStorage) PutFile(ctx context.Context, directory filesystem.Directory, name string, parentDigest *util.Digest) (*util.Digest, error) {
file, err := directory.OpenRead(name)
if err != nil {
return nil, err
}
// Walk through the file to compute the digest.
digestGenerator := parentDigest.NewDigestGenerator()
sizeBytes, err := io.Copy(digestGenerator, io.NewSectionReader(file, 0, math.MaxInt64))
if err != nil {
file.Close()
return nil, err
}
digest := digestGenerator.Sum()
// Rewind and store it. Limit uploading to the size that was
// used to compute the digest. This ensures uploads succeed,
// even if more data gets appended in the meantime. This is not
// uncommon, especially for stdout and stderr logs.
if err := cas.blobAccess.Put(
ctx,
digest,
buffer.NewCASBufferFromReader(
digest,
newSectionReadCloser(file, 0, sizeBytes),
buffer.UserProvided)); err != nil {
return nil, err
}
return digest, nil
}
// newSectionReadCloser returns an io.ReadCloser that reads from r at a
// given offset, but stops with EOF after n bytes. This function is
// identical to io.NewSectionReader(), except that it provides an
// io.ReadCloser instead of an io.Reader.
func newSectionReadCloser(r filesystem.FileReader, off int64, n int64) io.ReadCloser
|
func (cas *blobAccessContentAddressableStorage) PutLog(ctx context.Context, log []byte, parentDigest *util.Digest) (*util.Digest, error) {
return cas.putBlob(ctx, log, parentDigest)
}
func (cas *blobAccessContentAddressableStorage) PutTree(ctx context.Context, tree *remoteexecution.Tree, parentDigest *util.Digest) (*util.Digest, error) {
return cas.putMessage(ctx, tree, parentDigest)
}
func (cas *blobAccessContentAddressableStorage) PutUncachedActionResult(ctx context.Context, uncachedActionResult *cas_proto.UncachedActionResult, parentDigest *util.Digest) (*util.Digest, error) {
return cas.putMessage(ctx, uncachedActionResult, parentDigest)
}
|
{
return &struct {
io.SectionReader
io.Closer
}{
SectionReader: *io.NewSectionReader(r, off, n),
Closer: r,
}
}
|
distribute.py
|
# coding=utf-8
# Copyright 2017-2019 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
_ENGINE = None
def enable_distributed_training():
global _ENGINE
try:
import horovod.tensorflow as hvd
_ENGINE = hvd
hvd.init()
except ImportError:
sys.stderr.write("Error: You must install horovod first in order to"
" enable distributed training.\n")
exit()
def is_distributed_training_mode():
return _ENGINE is not None
def rank():
|
def local_rank():
return _ENGINE.local_rank()
def size():
return _ENGINE.size()
def all_reduce(tensor):
return _ENGINE.allreduce(tensor, compression=_ENGINE.Compression.fp16)
def get_broadcast_hook():
return _ENGINE.BroadcastGlobalVariablesHook(0)
|
return _ENGINE.rank()
|
nexusReader.service.ts
|
import { REST } from '../../../../api/ApiService';
export class
|
{
public static getNexusNames(): PromiseLike<string[]> {
return REST('/nexus/names').get();
}
}
|
NexusReaderService
|
driver.py
|
'''
Most of the driver API is unsupported in the simulator, but some stubs are
provided to allow tests to import correctly.
'''
def device_memset(dst, val, size, stream=0):
dst.view('u1')[:size].fill(bytes([val])[0])
|
def device_to_host(dst, src, size, stream=0):
host_to_device(dst, src, size)
def device_memory_size(obj):
return obj.itemsize * obj.size
def device_to_device(dst, src, size, stream=0):
host_to_device(dst, src, size)
class FakeDriver(object):
def get_device_count(self):
return 1
driver = FakeDriver()
Linker = None
class LinkerError(RuntimeError):
pass
class CudaAPIError(RuntimeError):
pass
def launch_kernel(*args, **kwargs):
msg = 'Launching kernels directly is not supported in the simulator'
raise RuntimeError(msg)
|
def host_to_device(dst, src, size, stream=0):
dst.view('u1')[:size] = src.view('u1')[:size]
|
slice.ts
|
import { shuffle } from 'd3-array';
import { arrange, desc } from './arrange';
import { SingleOrArray } from './helpers/singleOrArray';
import { Comparator, Key, TidyFn } from './types';
/**
* Truncates the array to the specified range
*/
export function slice<T extends object>(
start: number,
end?: number
): TidyFn<T> {
const _slice: TidyFn<T> = (items: T[]): T[] => items.slice(start, end);
return _slice;
}
// -------------------------------------------------------------------
/**
* Truncates the array to the first N items
*/
export const sliceHead = <T extends object>(n: number) => slice<T>(0, n);
// -------------------------------------------------------------------
/**
* Truncates the array to the last N items
*/
export const sliceTail = <T extends object>(n: number) => slice<T>(-n);
|
* Truncates the array to the first N items ordered by some key
*/
export function sliceMin<T extends object>(
n: number,
orderBy: SingleOrArray<Key | Comparator<T>>
): TidyFn<T> {
const _sliceMin: TidyFn<T> = (items: T[]): T[] =>
arrange<T>(orderBy)(items).slice(0, n);
return _sliceMin;
}
// -------------------------------------------------------------------
/**
* Truncates the array to the last N items ordered by some key
*/
export function sliceMax<T extends object>(
n: number,
orderBy: SingleOrArray<Key | Comparator<T>>
): TidyFn<T> {
// note: we use desc() so we get proper handling of nullish values
// unless they provided an explicit comparator.
const _sliceMax: TidyFn<T> = (items: T[]): T[] =>
typeof orderBy === 'function'
? arrange<T>(orderBy)(items).slice(-n).reverse()
: arrange<T>(desc(orderBy as any))(items).slice(0, n);
return _sliceMax;
}
// -------------------------------------------------------------------
type SampleOptions = {
replace?: boolean;
};
/**
* Truncates the array to the last N items ordered by some key
*/
export function sliceSample<T extends object>(
n: number,
options?: SampleOptions | null | undefined
): TidyFn<T> {
options = options ?? {};
const { replace } = options;
const _sliceSample: TidyFn<T> = (items: T[]) => {
if (!items.length) return items.slice();
// sample items with replacement
if (replace) {
const sliced = [];
for (let i = 0; i < n; ++i) {
sliced.push(items[Math.floor(Math.random() * items.length)]);
}
return sliced;
}
// sample items without replacement
return shuffle(items.slice()).slice(0, n);
};
return _sliceSample;
}
|
// -------------------------------------------------------------------
/**
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.