file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
profiles_controller.ts
import { BaseController } from "./base_controller" import { Users } from "../db/users" export class
extends BaseController { constructor() { super() } show({match }: ActionRequest) { let username = match.groups!.username // save a users profile if we just happen to be looking at it let user = Users.fromHTML(document.documentElement.outerHTML, {userId: username}) Users.persist(user) } }
ProfilesController
test_per.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import unittest from .utils import Asn1ToolsBaseTest import asn1tools import sys from copy import deepcopy sys.path.append('tests/files') sys.path.append('tests/files/3gpp') sys.path.append('tests/files/oma') from rrc_8_6_0 import EXPECTED as RRC_8_6_0 from s1ap_14_4_0 import EXPECTED as S1AP_14_4_0 from x691_a4 import EXPECTED as X691_A4 from ulp import EXPECTED as OMA_ULP class Asn1ToolsPerTest(Asn1ToolsBaseTest): maxDiff = None def test_boolean(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= BOOLEAN " "B ::= SEQUENCE { " " a BOOLEAN, " " b BOOLEAN " "} " "END", 'per') datas = [ ('A', True, b'\x80'), ('A', False, b'\x00'), ('B', {'a': False, 'b': False}, b'\x00'), ('B', {'a': True, 'b': False}, b'\x80'), ('B', {'a': False, 'b': True}, b'\x40'), ('B', {'a': True, 'b': True}, b'\xc0') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) with self.assertRaises(asn1tools.DecodeError) as cm: foo.decode('A', b'') self.assertEqual(str(cm.exception), 'A: out of data (At bit offset: 0)') def test_integer(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= INTEGER " "B ::= INTEGER (5..99) " "C ::= SEQUENCE { " " a BOOLEAN, " " b INTEGER, " " c BOOLEAN, " " d INTEGER (-10..400) " "} " "D ::= INTEGER (0..254) " "E ::= INTEGER (0..255) " "F ::= INTEGER (0..256) " "G ::= INTEGER (0..65535) " "H ::= INTEGER (0..65536) " "I ::= INTEGER (0..10000000000) " "J ::= SEQUENCE { " " a BOOLEAN, " " b INTEGER (0..254), " " c INTEGER (0..255), " " d BOOLEAN, " " e INTEGER (0..256) " "} " "K ::= B (6..7) " "L ::= SEQUENCE { " " a K (7..7) " "} " "M ::= INTEGER (5..99, ..., 101..105) " "N ::= INTEGER (0..65535) " "O ::= INTEGER (0..65536) " "P ::= INTEGER (0..2147483647) " "Q ::= INTEGER (0..4294967295) " "R ::= INTEGER (0..4294967296) " "S ::= SEQUENCE { " " a BOOLEAN, " " b INTEGER (-10000..704000000000000001), " " c BOOLEAN " "} " "END", 'per') datas = [ ('A', 32768, b'\x03\x00\x80\x00'), ('A', 32767, b'\x02\x7f\xff'), ('A', 256, b'\x02\x01\x00'), ('A', 255, b'\x02\x00\xff'), ('A', 128, b'\x02\x00\x80'), ('A', 127, b'\x01\x7f'), ('A', 2, b'\x01\x02'), ('A', 1, b'\x01\x01'), ('A', 0, b'\x01\x00'), ('A', -1, b'\x01\xff'), ('A', -128, b'\x01\x80'), ('A', -129, b'\x02\xff\x7f'), ('A', -256, b'\x02\xff\x00'), ('A', -32768, b'\x02\x80\x00'), ('A', -32769, b'\x03\xff\x7f\xff'), ('B', 5, b'\x00'), ('B', 6, b'\x02'), ('B', 99, b'\xbc'), ('C', {'a': True, 'b': 43554344223, 'c': False, 'd': -9}, b'\x80\x05\x0a\x24\x0a\x8d\x1f\x00\x00\x01'), ('D', 253, b'\xfd'), ('E', 253, b'\xfd'), ('F', 253, b'\x00\xfd'), ('G', 253, b'\x00\xfd'), ('H', 253, b'\x00\xfd'), ('H', 256, b'\x40\x01\x00'), ('H', 65536, b'\x80\x01\x00\x00'), ('I', 0, b'\x00\x00'), ('I', 1, b'\x00\x01'), ('I', 10000000000, b'\x80\x02\x54\x0b\xe4\x00'), ('J', {'a': False, 'b': 253, 'c': 253, 'd': False, 'e': 253}, b'\x7e\x80\xfd\x00\x00\xfd'), ('K', 7, b'\x80'), ('L', {'a': 7}, b''), ('M', 103, b'\x80\x01\x67'), ('N', 1, b'\x00\x01'), ('N', 255, b'\x00\xff'), ('N', 256, b'\x01\x00'), ('N', 65535, b'\xff\xff'), ('O', 1, b'\x00\x01'), ('O', 255, b'\x00\xff'), ('O', 256, b'\x40\x01\x00'), ('O', 65535, b'\x40\xff\xff'), ('O', 65536, b'\x80\x01\x00\x00'), ('P', 1, b'\x00\x01'), ('P', 255, b'\x00\xff'), ('P', 256, b'\x40\x01\x00'), ('P', 65535, b'\x40\xff\xff'), ('P', 65536, b'\x80\x01\x00\x00'), ('P', 16777215, b'\x80\xff\xff\xff'), ('P', 16777216, b'\xc0\x01\x00\x00\x00'), ('P', 100000000, b'\xc0\x05\xf5\xe1\x00'), ('Q', 4294967295, b'\xc0\xff\xff\xff\xff'), ('R', 4294967296, b'\x80\x01\x00\x00\x00\x00'), ('S', {'a': True, 'b': 0, 'c': True}, b'\x90\x27\x10\x80') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) def test_real(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= REAL " "B ::= SEQUENCE { " " a REAL, " " ... " "}" "END", 'per') datas = [ ('A', 0.0, b'\x00'), ('A', -0.0, b'\x00'), ('A', float('inf'), b'\x01\x40'), ('A', float('-inf'), b'\x01\x41'), ('A', 1.0, b'\x03\x80\x00\x01'), ('B', {'a': 1.0}, b'\x00\x03\x80\x00\x01'), ('B', {'a': 1000000000}, b'\x00\x05\x80\x09\x1d\xcd\x65') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) def test_bit_string(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= BIT STRING " "B ::= BIT STRING (SIZE (9)) " "C ::= BIT STRING (SIZE (5..7)) " "D ::= SEQUENCE { " " a BOOLEAN, " " b BIT STRING " "} " "E ::= SEQUENCE { " " a BOOLEAN, " " b BIT STRING (SIZE(1)), " " c BIT STRING (SIZE(16)) " "} " "F ::= BIT STRING { " " a (0), " " b (1), " " c (2) " "} " "G ::= SEQUENCE { " " a BIT STRING, " " b BOOLEAN " "} " "H ::= SEQUENCE SIZE (0..2) OF BIT STRING (SIZE(1..255)) " "I ::= SEQUENCE SIZE (0..2) OF BIT STRING (SIZE(1..256)) " "J ::= SEQUENCE SIZE (0..2) OF BIT STRING (SIZE(2..256)) " "K ::= SEQUENCE SIZE (0..2) OF BIT STRING (SIZE(2..257)) " "L ::= BIT STRING (SIZE (1..160, ...)) " "M ::= SEQUENCE { " " a BOOLEAN, " " b BIT STRING (SIZE (1..160, ...)) " "} " "N ::= BIT STRING (SIZE(0..65535)) " "O ::= BIT STRING (SIZE(0..65536)) " "END", 'per') datas = [ ('A', (b'\x40', 4), b'\x04\x40'), ('A', (299 * b'\x55' + b'\x54', 2399), b'\x89\x5f' + 299 * b'\x55' + b'\x54'), ('A', (2048 * b'\x55', 16384), b'\xc1' + 2048 * b'\x55' + b'\x00'), ('B', (b'\x12\x80', 9), b'\x12\x80'), ('C', (b'\x34', 6), b'\x40\x34'), ('D', {'a': True, 'b': (b'\x40', 4)}, b'\x80\x04\x40'), ('E', {'a': True, 'b': (b'\x80', 1), 'c': (b'\x7f\x01', 16)}, b'\xdf\xc0\x40'), ('F', (b'\x80', 1), b'\x01\x80'), ('F', (b'\xe0', 3), b'\x03\xe0'), ('F', (b'\x01', 8), b'\x08\x01'), ('G', {'a': (b'\x80', 2), 'b': True}, b'\x02\xa0'), ('G', {'a': (b'', 0), 'b': True}, b'\x00\x80'), ('H', [(b'\x40', 2)], b'\x40\x40\x40'), ('I', [(b'\x40', 2)], b'\x40\x01\x40'), ('J', [(b'\x40', 2)], b'\x40\x00\x40'), ('K', [(b'\x40', 2)], b'\x40\x00\x40'), ('L', (b'\x80', 1), b'\x00\x00\x80'), ('M', {'a': True, 'b': (b'\xe0', 3)}, b'\x80\x80\xe0'), ('N', (b'', 0), b'\x00\x00'), ('O', (b'', 0), b'\x00') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) # Trailing zero bits should be stripped when encoding named # bit list. Default value is not encoded, but part of # decoded. Also ignore dangling bits. datas = [ ('F', (b'\x80', 2), b'\x01\x80', (b'\x80', 1)), ('F', (b'\x40', 3), b'\x02\x40', (b'\x40', 2)), ('F', (b'\x00', 3), b'\x00', (b'', 0)), ('F', (b'\x00', 8), b'\x00', (b'', 0)) ] for type_name, decoded_1, encoded, decoded_2 in datas: self.assertEqual(foo.encode(type_name, decoded_1), encoded) self.assertEqual(foo.decode(type_name, encoded), decoded_2) def test_octet_string(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= OCTET STRING " "B ::= OCTET STRING (SIZE (2)) " "C ::= OCTET STRING (SIZE (3)) " "D ::= OCTET STRING (SIZE (3..7)) " "E ::= SEQUENCE { " " a BOOLEAN, " " b OCTET STRING " "} " "F ::= SEQUENCE { " " a BOOLEAN, " " b OCTET STRING (SIZE(1)), " " c OCTET STRING (SIZE(2)) " "} " "G ::= SEQUENCE { " " a BOOLEAN, " " b OCTET STRING (SIZE(3)) " "} " "H ::= OCTET STRING (SIZE (65535)) " "I ::= OCTET STRING (SIZE (65536)) " "J ::= OCTET STRING (SIZE (1..MAX)) " "K ::= OCTET STRING (SIZE (MIN..5)) " "L ::= OCTET STRING (SIZE (1..2, ...)) " "M ::= SEQUENCE SIZE (0..2) OF OCTET STRING (SIZE(1..255)) " "N ::= SEQUENCE SIZE (0..2) OF OCTET STRING (SIZE(1..256)) " "O ::= SEQUENCE SIZE (0..2) OF OCTET STRING (SIZE(2..256)) " "P ::= SEQUENCE SIZE (0..2) OF OCTET STRING (SIZE(2..257)) " "END", 'per') datas = [ ('A', b'\x00', b'\x01\x00'), ('A', 500 * b'\x00', b'\x81\xf4' + 500 * b'\x00'), ('B', b'\xab\xcd', b'\xab\xcd'), ('C', b'\xab\xcd\xef', b'\xab\xcd\xef'), ('D', b'\x89\xab\xcd\xef', b'\x20\x89\xab\xcd\xef'), ('E', {'a': True, 'b': b'\x00'}, b'\x80\x01\x00'), ('E', {'a': True, 'b': b'\x00\x01\x02'}, b'\x80\x03\x00\x01\x02'), ('F', {'a': True, 'b': b'\x12', 'c': b'\x34\x56'}, b'\x89\x1a\x2b\x00'), ('G', {'a': True, 'b': b'\x00\x01\x02'}, b'\x80\x00\x01\x02'), ('H', 32767 * b'\x01\x02' + b'\x01', 32767 * b'\x01\x02' + b'\x01'), ('I', 32768 * b'\x01\x02', b'\xc4' + 32768 * b'\x01\x02' + b'\x00'), ('A', 4095 * b'\x00\x01\x02\x03' + b'\x00\x01\x02', b'\xbf\xff' + 4095 * b'\x00\x01\x02\x03' + b'\x00\x01\x02'), ('A', 4095 * b'\x00\x01\x02\x03' + b'\x00\x01\x02\x03', b'\xc1' + 4095 * b'\x00\x01\x02\x03' + b'\x00\x01\x02\x03' + b'\x00'), ('A', 4095 * b'\x00\x01\x02\x03' + b'\x00\x01\x02\x03\x00', b'\xc1' + 4095 * b'\x00\x01\x02\x03' + b'\x00\x01\x02\x03' + b'\x01' + b'\x00'), ('J', b'\x12', b'\x01\x12'), ('K', b'', b'\x00'), ('L', b'\x12\x34', b'\x40\x12\x34'), ('L', b'\x12\x34\x56', b'\x80\x03\x12\x34\x56'), ('M', [b'\x12\x34'], b'\x40\x40\x12\x34'), ('M', [b'\x12\x34\x56\x78'], b'\x40\xc0\x12\x34\x56\x78'), ('N', [b'\x12\x34'], b'\x40\x01\x12\x34'), ('N', [b'\x12\x34\x56\x78'], b'\x40\x03\x12\x34\x56\x78'), ('O', [b'\x12\x34\x56'], b'\x40\x40\x12\x34\x56'), ('O', [b'\x12\x34\x56\x78'], b'\x40\x80\x12\x34\x56\x78'), ('P', [b'\x12\x34\x56'], b'\x40\x01\x12\x34\x56'), ('P', [b'\x12\x34\x56\x78'], b'\x40\x02\x12\x34\x56\x78') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) def test_object_identifier(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= OBJECT IDENTIFIER " "B ::= SEQUENCE { " " a BOOLEAN, " " b OBJECT IDENTIFIER " "} " "END", 'per') datas = [ ('A', '1.2', b'\x01\x2a'), ('A', '1.2.3321', b'\x03\x2a\x99\x79'), ('B', {'a': True, 'b': '1.2'}, b'\x80\x01\x2a') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) def test_external(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= EXTERNAL " "END", 'per') datas = [ ('A', {'encoding': ('octet-aligned', b'\x12')}, b'\x08\x01\x12') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) def test_enumerated(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= ENUMERATED { one(1) } " "B ::= ENUMERATED { zero(0), one(1), ... } " "C ::= ENUMERATED { one(1), four(4), two(2), ..., six(6), nine(9) } " "D ::= ENUMERATED { a, ..., " "aa, ab, ac, ad, ae, af, ag, ah, ai, aj, ak, al, am, an, ao, ap, " "aq, ar, as, at, au, av, aw, ax, ay, az, ba, bb, bc, bd, be, bf, " "bg, bh, bi, bj, bk, bl, bm, bn, bo, bp, bq, br, bs, bt, bu, bv, " "bw, bx, by, bz, ca, cb, cc, cd, ce, cf, cg, ch, ci, cj, ck, cl, " "cm, cn, co, cp, cq, cr, cs, ct, cu, cv, cw, cx, cy, cz, da, db, " "dc, dd, de, df, dg, dh, di, dj, dk, dl, dm, dn, do, dp, dq, dr, " "ds, dt, du, dv, dw, dx, dy, dz, ea, eb, ec, ed, ee, ef, eg, eh, " "ei, ej, ek, el, em, en, eo, ep, eq, er, es, et, eu, ev, ew, ex, " "ey, ez, fa, fb, fc, fd, fe, ff, fg, fh, fi, fj, fk, fl, fm, fn, " "fo, fp, fq, fr, fs, ft, fu, fv, fw, fx, fy, fz, ga, gb, gc, gd, " "ge, gf, gg, gh, gi, gj, gk, gl, gm, gn, go, gp, gq, gr, gs, gt, " "gu, gv, gw, gx, gy, gz, ha, hb, hc, hd, he, hf, hg, hh, hi, hj, " "hk, hl, hm, hn, ho, hp, hq, hr, hs, ht, hu, hv, hw, hx, hy, hz, " "ia, ib, ic, id, ie, if, ig, ih, ii, ij, ik, il, im, in, io, ip, " "iq, ir, is, it, iu, iv, iw, ix, iy, iz, ja, jb, jc, jd, je, jf, " "jg, jh, ji, jj, jk, jl, jm, jn, jo, jp, jq, jr, js, jt, ju, jv, " "jw, jx, jy, jz } " "E ::= SEQUENCE { " " a BOOLEAN, " " b B " "} " "F ::= SEQUENCE {" " a ENUMERATED { zero(0), one(1) } DEFAULT one" "}" "END", 'per') datas = [ ('A', 'one', b''), ('B', 'zero', b'\x00'), ('B', 'one', b'\x40'), ('C', 'one', b'\x00'), ('C', 'two', b'\x20'), ('C', 'four', b'\x40'), ('C', 'six', b'\x80'), ('C', 'nine', b'\x81'), ('D', 'aa', b'\x80'), ('D', 'cl', b'\xbf'), ('D', 'cm', b'\xc0\x50\x00'), ('D', 'jv', b'\xc0\x7f\xc0'), ('D', 'jw', b'\xc0\x80\x40\x00'), ('D', 'jz', b'\xc0\x80\x40\xc0'), ('E', {'a': True, 'b': 'zero'}, b'\x80'), ('E', {'a': True, 'b': 'one'}, b'\xa0'), ('F', {'a': 'zero'}, b'\x80'), ('F', {'a': 'one'}, b'\x00') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) # Default value is not encoded, but part of decoded. datas = [ ('F', {}, b'\x00', {'a': 'one'}) ] for type_name, decoded_1, encoded_1, decoded_2 in datas: self.assertEqual(foo.encode(type_name, decoded_1), encoded_1) self.assertEqual(foo.decode(type_name, encoded_1), decoded_2) # Bad root index. with self.assertRaises(asn1tools.DecodeError) as cm: foo.decode('C', b'\x70') self.assertEqual(str(cm.exception), "C: Expected enumeration index 0, 1 or 2, but got 3.") # Unknown additions index. self.assertEqual(foo.decode('C', b'\x8f'), None) def test_sequence(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= SEQUENCE {} " "B ::= SEQUENCE { " " a INTEGER DEFAULT 0 " "} " "C ::= SEQUENCE { " " a BOOLEAN, " " ... " "} " "D ::= SEQUENCE { " " a BOOLEAN, " " ..., " " [[ " " b BOOLEAN " " ]] " "} " "E ::= SEQUENCE { " " a BOOLEAN, " " ..., " " [[ " " b BOOLEAN " " ]], " " ... " "} " "F ::= SEQUENCE { " " a BOOLEAN, " " ..., " " [[ " " b BOOLEAN " " ]], " " ..., " " c BOOLEAN " "} " "G ::= SEQUENCE { " " a BOOLEAN, " " ..., " " [[ " " b BOOLEAN " " ]], " " [[ " " c BOOLEAN " " ]], " " ..., " " d BOOLEAN " "} " "H ::= SEQUENCE { " " a BOOLEAN, " " ..., " " ... " "} " "I ::= SEQUENCE { " " a BOOLEAN, " " ..., " " b BOOLEAN " "} " "J ::= SEQUENCE { " " a BOOLEAN, " " ..., " " b BOOLEAN OPTIONAL " "} " "K ::= SEQUENCE { " " a BOOLEAN, " " ..., " " b BOOLEAN, " " c BOOLEAN " "} " "L ::= SEQUENCE { " " a BOOLEAN, " " ..., " " [[ " " b BOOLEAN, " " c BOOLEAN " " ]] " "} " "M ::= SEQUENCE { " " a BOOLEAN, " " ..., " " [[ " " b SEQUENCE { " " a INTEGER" " } OPTIONAL, " " c BOOLEAN " " ]] " "} " "N ::= SEQUENCE { " " a BOOLEAN DEFAULT TRUE " "} " "O ::= SEQUENCE { " " ..., " " a BOOLEAN DEFAULT TRUE " "} " "P ::= SEQUENCE { " " ..., " " [[ " " a BOOLEAN, " " b BOOLEAN DEFAULT TRUE " " ]] " "} " "Q ::= SEQUENCE { " " a C, " " b INTEGER " "} " "R ::= SEQUENCE { " " a D, " " b INTEGER " "} " "S ::= SEQUENCE { " " a BOOLEAN, " " ..., " " b SEQUENCE { " " a BOOLEAN, " " b BOOLEAN OPTIONAL, " " ... " " } " "} " "T ::= SEQUENCE { " " a SEQUENCE OF T OPTIONAL " "} " "U ::= SEQUENCE { " " ..., " " a SEQUENCE { " " a INTEGER " " } " "} " "V ::= SEQUENCE { " " ..., " " a OCTET STRING, " " b INTEGER " "} " "W ::= SEQUENCE { " " a BOOLEAN, " " ..., " " b NULL " "} " "END", 'per') datas = [ ('A', {}, b''), ('O', {}, b'\x00'), ('B', {'a': 0}, b'\x00'), ('B', {'a': 1}, b'\x80\x01\x01'), ('C', {'a': True}, b'\x40'), ('D', {'a': True}, b'\x40'), ('E', {'a': True}, b'\x40'), ('H', {'a': True}, b'\x40'), ('I', {'a': True}, b'\x40'), ('J', {'a': True}, b'\x40'), ('K', {'a': True}, b'\x40'), ('L', {'a': True}, b'\x40'), ('M', {'a': True}, b'\x40'), ('N', {'a': True}, b'\x00'), ('N', {'a': False}, b'\x80'), ('P', {}, b'\x00'), ('O', {'a': True}, b'\x80\x80\x01\x80'), ('O', {'a': False}, b'\x80\x80\x01\x00'), ('P', {'a': True, 'b': True}, b'\x80\x80\x01\x40'), ('P', {'a': True, 'b': False}, b'\x80\x80\x01\xc0'), ('D', {'a': True, 'b': True}, b'\xc0\x40\x01\x80'), ('E', {'a': True, 'b': True}, b'\xc0\x40\x01\x80'), ('F', {'a': True, 'c': True}, b'\x60'), ('G', {'a': True, 'd': True}, b'\x60'), ('I', {'a': True, 'b': True}, b'\xc0\x40\x01\x80'), ('J', {'a': True, 'b': True}, b'\xc0\x40\x01\x80'), ('K', {'a': True, 'b': True}, b'\xc0\xc0\x01\x80'), ('F', {'a': True, 'b': True, 'c': True}, b'\xe0\x20\x01\x80'), ('K', {'a': True, 'b': True, 'c': True}, b'\xc0\xe0\x01\x80\x01\x80'), ('L', {'a': True, 'b': True, 'c': True}, b'\xc0\x40\x01\xc0'), ('G', {'a': True, 'b': True, 'd': True}, b'\xe0\x60\x01\x80'), ('G', {'a': True, 'b': True, 'c': True, 'd': True}, b'\xe0\x70\x01\x80\x01\x80'), ('M', {'a': True, 'b': {'a': 5}, 'c': True}, b'\xc0\x40\x04\x80\x01\x05\x80'), ('Q', {'a': {'a': True}, 'b': 100}, b'\x40\x01\x64'), ('R', {'a': {'a': True, 'b': True}, 'b': 100}, b'\xc0\x40\x01\x80\x01\x64'), ('S', {'a': True, 'b': {'a': True, 'b': True}}, b'\xc0\x40\x01\x70'), ('T', {'a': [{}]}, b'\x80\x01\x00'), ('T', {'a': [{'a': []}]}, b'\x80\x01\x80\x00'), ('V', {'a': 5000 * b'\x00', 'b': 1000}, b'\x81\xc0\x93\x8a\x93\x88' + 5000 * b'\x00' + b'\x03\x02\x03\xe8'), ('W', {'a': True, 'b': None}, b'\xc0\x40\x00') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) # Non-symmetrical encoding and decoding because default values # are not encoded, but part of the decoded (given that the # root and addition is present). self.assertEqual(foo.encode('N', {}), b'\x00') self.assertEqual(foo.decode('N', b'\x00'), {'a': True}) self.assertEqual(foo.encode('P', {'a': True}), b'\x80\x80\x01\x40') self.assertEqual(foo.decode('P', b'\x80\x80\x01\x40'), {'a': True, 'b': True}) # Decode D as C. Extension addition "a.b" should be skipped. self.assertEqual(foo.decode('C', b'\xc0\x40\x01\x80'), {'a': True}) # Decode R as Q. Extension addition "a.b" should be skipped. self.assertEqual(foo.decode('Q', b'\xc0\x40\x01\x80\x01\x64'), {'a': {'a': True}, 'b': 100}) # Decode error of present addition member (out of data). with self.assertRaises(asn1tools.DecodeError) as cm: foo.decode('U', b'\x80\x80\x03\x02\x05') self.assertEqual(str(cm.exception), 'U.a.a: out of data (At bit offset: 32)') # Missing root member. with self.assertRaises(asn1tools.EncodeError) as cm: foo.encode('K', {'b': True}) self.assertEqual(str(cm.exception), "K: Sequence member 'a' not found in {'b': True}.") def test_sequence_of(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= SEQUENCE OF INTEGER " "B ::= SEQUENCE SIZE (2) OF INTEGER " "C ::= SEQUENCE SIZE (1..5) OF INTEGER " "D ::= SEQUENCE SIZE (1..2, ...) OF INTEGER " "E ::= SEQUENCE { " " a BOOLEAN, " " b SEQUENCE OF INTEGER " "} " "F ::= SEQUENCE { " " a BOOLEAN, " " b SEQUENCE SIZE(1) OF INTEGER " "} " "G ::= SEQUENCE SIZE (1..2, ..., 6..7) OF INTEGER " "H ::= SEQUENCE SIZE (1..MAX) OF INTEGER " "I ::= SEQUENCE SIZE (1..10000) OF OCTET STRING " "END", 'per') datas = [ ('A', [], b'\x00'), ('A', [1], b'\x01\x01\x01'), ('A', [1, 2], b'\x02\x01\x01\x01\x02'), ('A', 1000 * [1, 2], b'\x87\xd0' + 1000 * b'\x01\x01\x01\x02'), ('A', 16384 * [1], b'\xc1' + 16384 * b'\x01\x01' + b'\x00'), ('A', 65535 * [1], b'\xc3' + 49152 * b'\x01\x01' + b'\xbf\xff' + 16383 * b'\x01\x01'), ('A', 100000 * [1], b'\xc4' + 65536 * b'\x01\x01' + b'\xc2' + 32768 * b'\x01\x01' + b'\x86\xa0' + 1696 * b'\x01\x01'), ('B', [1, 2], b'\x01\x01\x01\x02'), ('B', [4663, 222322233], b'\x02\x12\x37\x04\x0d\x40\x5e\x39'), ('C', [1], b'\x00\x01\x01'), ('C', [1, 2], b'\x20\x01\x01\x01\x02'), ('D', [2, 1], b'\x40\x01\x02\x01\x01'), ('E', {'a': False, 'b': []}, b'\x00\x00'), ('E', {'a': False, 'b': [1]}, b'\x00\x01\x01\x01'), ('F', {'a': False, 'b': [1]}, b'\x00\x01\x01'), ('G', 6 * [1], b'\x80\x06\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01'), ('H', [1], b'\x01\x01\x01'), ('I', 300 * [b'\x56'], b'\x01\x2b' + 300 * b'\x01\x56') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) def test_choice(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= CHOICE { " " a BOOLEAN " "} " "B ::= CHOICE { " " a BOOLEAN, " " ... " "} " "C ::= CHOICE { " " a BOOLEAN, " " b INTEGER, " " ..., " " [[ " " c BOOLEAN " " ]] " "} " "D ::= CHOICE { " " a BOOLEAN, " " ..., " " [[ " " b BOOLEAN " " ]], " " ... " "} " "E ::= CHOICE { " " a BOOLEAN, " " ..., " " [[ " " b BOOLEAN " " ]], " " [[ " " c BOOLEAN " " ]], " " ... " "} " "F ::= CHOICE { " " a BOOLEAN, " " ..., " " ... " "} " "G ::= CHOICE { " " a BOOLEAN, " " ..., " " b BOOLEAN " "} " "H ::= CHOICE { " " a BOOLEAN, " " ..., " " b BOOLEAN, " " c BOOLEAN " "} " "I ::= CHOICE { " " a BOOLEAN, " " ..., " " [[ " " b BOOLEAN, " " c BOOLEAN " " ]] " "} " "J ::= CHOICE { " " a BOOLEAN, " " ..., " " [[ " " b CHOICE { " " a INTEGER" " }, " " c BOOLEAN " " ]] " "} " "K ::= CHOICE { " " a BOOLEAN, " " b BOOLEAN, " " c BOOLEAN, " " ..., " " d BOOLEAN, " " e BOOLEAN, " " f BOOLEAN, " " g BOOLEAN, " " h BOOLEAN " "} " "L ::= CHOICE { " " a BOOLEAN, " " b BOOLEAN, " " c BOOLEAN, " " ..., " " d BOOLEAN, " " e BOOLEAN, " " f BOOLEAN, " " g BOOLEAN, " " h BOOLEAN, " " i BOOLEAN " "} " "END", 'per') datas = [ ('A', ('a', True), b'\x80'), ('B', ('a', True), b'\x40'), ('C', ('a', True), b'\x20'), ('C', ('b', 1), b'\x40\x01\x01'), ('C', ('c', True), b'\x80\x01\x80'), ('D', ('a', True), b'\x40'), ('D', ('b', True), b'\x80\x01\x80'), ('E', ('a', True), b'\x40'), ('E', ('b', True), b'\x80\x01\x80'), ('E', ('c', True), b'\x81\x01\x80'), ('F', ('a', True), b'\x40'), ('G', ('a', True), b'\x40'), ('G', ('b', True), b'\x80\x01\x80'), ('H', ('a', True), b'\x40'), ('H', ('b', True), b'\x80\x01\x80'), ('H', ('c', True), b'\x81\x01\x80'), ('I', ('a', True), b'\x40'), ('I', ('b', True), b'\x80\x01\x80'), ('I', ('c', True), b'\x81\x01\x80'), ('J', ('a', True), b'\x40'), ('J', ('b', ('a', 1)), b'\x80\x02\x01\x01'), ('J', ('c', True), b'\x81\x01\x80'), ('L', ('i', True), b'\x85\x01\x80') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) # Bad root index. with self.assertRaises(asn1tools.DecodeError) as cm: foo.decode('K', b'\x70') self.assertEqual(str(cm.exception), "K: Expected choice index 0, 1 or 2, but got 3.") # Bad additions index becomes None. decoded = foo.decode('K', b'\x85\x01\x80') self.assertEqual(decoded, (None, None)) # Bad value. with self.assertRaises(asn1tools.EncodeError) as cm: foo.encode('K', ('i', True), check_types=False) self.assertEqual( str(cm.exception), "K: Expected choice 'a', 'b', 'c', 'd', 'e', 'f', 'g' or 'h', but " "got 'i'.") # Bad value. with self.assertRaises(asn1tools.EncodeError) as cm: foo.encode('A', ('b', True), check_types=False, check_constraints=False) self.assertEqual(str(cm.exception), "A: Expected choice 'a', but got 'b'.") def test_utf8_string(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= SEQUENCE { " " a BOOLEAN, " " b UTF8String, " " c UTF8String OPTIONAL" "} " "B ::= UTF8String (SIZE (10)) " "C ::= UTF8String (SIZE (0..1)) " "D ::= UTF8String (SIZE (2..3) ^ (FROM (\"a\"..\"g\"))) " "E ::= UTF8String " "END", 'per') datas = [ ('A', {'a': True, 'b': u''}, b'\x40\x00'), ('A', {'a': True, 'b': u'1', 'c': u'foo'}, b'\xc0\x01\x31\x03\x66\x6f\x6f'), ('A', {'a': True, 'b': 300 * u'1'}, b'\x40\x81\x2c' + 300 * b'\x31'), ('B', u'1234567890', b'\x0a\x31\x32\x33\x34\x35\x36\x37\x38\x39\x30'), ('C', u'', b'\x00'), ('C', u'P', b'\x01\x50'), ('D', u'agg', b'\x03\x61\x67\x67'), ('E', u'bar', b'\x03\x62\x61\x72'), ('E', u'a\u1010c', b'\x05\x61\xe1\x80\x90\x63'), ('E', 15000 * u'123' + u'\u1010', b'\xc2' + 10922 * b'123' + b'12\xaf\xcb3' + 4077 * b'123' + b'\xe1\x80\x90'), ('E', u'1𐈃Q', b'\x06\x31\xf0\x90\x88\x83\x51') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) with self.assertRaises(asn1tools.DecodeError) as cm: foo.decode('A', b'\x40\xc5\x00\x00\x00\x00') self.assertEqual(str(cm.exception), 'A.b: Bad length determinant fragmentation value 0xc5.') with self.assertRaises(asn1tools.DecodeError) as cm: foo.decode('A', b'\x40\xc1\x00\x00\x00\x00') self.assertEqual(str(cm.exception), 'A.b: out of data (At bit offset: 16)') def test_numeric_string(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= NumericString (FROM (\"0\"..\"2\", ..., \"4\"..\"5\")) " "B ::= NumericString (SIZE (1..4)) " "C ::= NumericString (SIZE (1..4, ...)) " "D ::= NumericString (SIZE (1..4, ..., 6..7)) " "E ::= NumericString (SIZE (0..MAX)) " "F ::= NumericString (SIZE (2..MAX)) " "END", 'per') datas = [ ('A', '2', b'\x01\x30'), ('B', '1234', b'\xc0\x23\x45'), ('C', '1234', b'\x60\x23\x45'), ('D', '1234', b'\x60\x23\x45'), ('E', '', b'\x00'), ('F', '345', b'\x03\x45\x60') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) # Encode size extension is not yet supported. with self.assertRaises(NotImplementedError) as cm: foo.encode('D', '123456') self.assertEqual( str(cm.exception), "String size extension is not yet implemented.") # Decode size extension is not yet supported. with self.assertRaises(NotImplementedError) as cm: foo.decode('D', b'\x80\x06\x23\x45\x67') self.assertEqual( str(cm.exception), "String size extension is not yet implemented.") def test_printable_string(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "D ::= SEQUENCE { " " a BOOLEAN, " " b PrintableString (SIZE (36)), " " c BOOLEAN " "} " "E ::= SEQUENCE { " " a BOOLEAN, " " b PrintableString (SIZE (0..22)), " " c BOOLEAN " "} " "F ::= SEQUENCE { " " a BOOLEAN, " " b PrintableString, " " c BOOLEAN " "} " "END", 'per') datas = [ ('D', {'a': True, 'b': 12 * '123', 'c': True}, b'\x80\x31\x32\x33\x31\x32\x33\x31\x32\x33\x31\x32\x33\x31\x32\x33' b'\x31\x32\x33\x31\x32\x33\x31\x32\x33\x31\x32\x33\x31\x32\x33\x31' b'\x32\x33\x31\x32\x33\x80'), ('E', {'a': True, 'b': '', 'c': True}, b'\x82'), ('E', {'a': True, 'b': '1', 'c': True}, b'\x84\x31\x80'), ('F', {'a': True, 'b': '123', 'c': True}, b'\x80\x03\x31\x32\x33\x80') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) def tes
lf): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= IA5String " "END", 'per') datas = [ ('A', 1638 * '1234567890' + '123', b'\xbf\xff' + 1638 * b'\x31\x32\x33\x34\x35\x36\x37\x38\x39\x30' + b'\x31\x32\x33'), ('A', 1638 * '1234567890' + '1234', b'\xc1' + 1638 * b'\x31\x32\x33\x34\x35\x36\x37\x38\x39\x30' + b'\x31\x32\x33\x34' + b'\x00'), ('A', 1638 * '1234567890' + '12345', b'\xc1' + 1638 * b'\x31\x32\x33\x34\x35\x36\x37\x38\x39\x30' + b'\x31\x32\x33\x34' + b'\x01' + b'\x35') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) def test_visible_string(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= VisibleString (SIZE (19..133)) " "B ::= VisibleString (SIZE (5)) " "C ::= VisibleString (SIZE (19..1000)) " "D ::= SEQUENCE { " " a BOOLEAN, " " b VisibleString (SIZE (1)) " "} " "E ::= SEQUENCE { " " a BOOLEAN, " " b VisibleString (SIZE (2)) " "} " "F ::= SEQUENCE { " " a BOOLEAN, " " b VisibleString (SIZE (3)) " "} " "G ::= SEQUENCE { " " a BOOLEAN, " " b VisibleString (SIZE (0..1)) " "} " "H ::= SEQUENCE { " " a BOOLEAN, " " b VisibleString (SIZE (0..2)) " "} " "I ::= VisibleString (FROM (\"a\"..\"z\")) (SIZE (1..255)) " "J ::= VisibleString (FROM (\"a\")) " "K ::= VisibleString (FROM (\"a\"..\"a\")) " "END", 'per') datas = [ ('A', 'HejHoppHappHippAbcde', b'\x02\x48\x65\x6a\x48\x6f\x70\x70\x48\x61\x70\x70\x48\x69\x70\x70' b'\x41\x62\x63\x64\x65'), ('B', 'Hejaa', b'\x48\x65\x6a\x61\x61'), ('C', 17 * 'HejHoppHappHippAbcde', b'\x01\x41' + 17 * (b'\x48\x65\x6a\x48\x6f\x70\x70\x48\x61\x70' b'\x70\x48\x69\x70\x70\x41\x62\x63\x64\x65')), ('D', {'a': True, 'b': '1'}, b'\x98\x80'), ('E', {'a': True, 'b': '12'}, b'\x98\x99\x00'), ('F', {'a': True, 'b': '123'}, b'\x80\x31\x32\x33'), ('G', {'a': True, 'b': '1'}, b'\xcc\x40'), ('H', {'a': True, 'b': '1'}, b'\xa0\x31'), ('I', 'hej', b'\x02\x68\x65\x6a'), ('J', 'a', b'\x01'), ('K', 'a', b'\x01') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) # Bad character 0x19 should raise an exception. with self.assertRaises(asn1tools.EncodeError) as cm: foo.encode('A', '\x19', check_constraints=False) self.assertEqual( str(cm.exception), "A: Expected a character in ' !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEF" "GHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~', but got" " '.' (0x19)'.") def test_general_string(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= GeneralString " "B ::= SEQUENCE { " " a BOOLEAN, " " b GeneralString " "} " "END", 'per') datas = [ ('A', '', b'\x00'), ('A', '2', b'\x01\x32'), ('B', {'a': False, 'b': u'K'}, b'\x00\x01\x4b') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) def test_bmp_string(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= BMPString " "B ::= SEQUENCE { " " a BOOLEAN, " " b BMPString " "} " "C ::= SEQUENCE { " " a BMPString (SIZE(1..128)), " " b BMPString (SIZE(1..256)) " "} " "END", 'per') datas = [ ('A', '', b'\x00'), ('A', '123', b'\x03\x00\x31\x00\x32\x00\x33'), ('B', {'a': False, 'b': u'K'}, b'\x00\x01\x00\x4b'), ('C', {'a': '123', 'b': '123'}, b'\x04\x001\x002\x003\x02\x001\x002\x003') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) with self.assertRaises(asn1tools.DecodeError) as cm: foo.decode('A', b'\x01\xd8\x00') valid_chars = [v for v in range(65536) if v < 0xd800 or v > 0xdfff] self.assertEqual(str(cm.exception), "A: Expected a value in %s, but got %d." % (valid_chars, 0xd800,)) def test_graphic_string(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= GraphicString " "B ::= SEQUENCE { " " a BOOLEAN, " " b GraphicString " "} " "END", 'per') datas = [ ('A', '', b'\x00'), ('A', '2', b'\x01\x32'), ('B', {'a': False, 'b': u'K'}, b'\x00\x01\x4b') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) def test_teletex_string(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= TeletexString " "B ::= SEQUENCE { " " a BOOLEAN, " " b TeletexString " "} " "END", 'per') datas = [ ('A', u'123', b'\x03\x31\x32\x33'), ('B', {'a': False, 'b': u'K'}, b'\x00\x01\x4b') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) def test_universal_string(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= UniversalString " "B ::= SEQUENCE { " " a BOOLEAN, " " b UniversalString " "} " "END", 'per') datas = [ ('A', u'åäö', b'\x03\x00\x00\x00\xe5\x00\x00\x00\xe4\x00\x00\x00\xf6'), ('A', u'1𐈃Q', b'\x03\x00\x00\x00\x31\x00\x01\x02\x03\x00\x00\x00\x51'), ('B', {'a': False, 'b': u'K'}, b'\x00\x01\x00\x00\x00\x4b') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(foo, type_name, decoded, encoded) def test_foo(self): foo = asn1tools.compile_files('tests/files/foo.asn', 'per') self.assertEqual(len(foo.types), 2) self.assertTrue(foo.types['Question'] is not None) self.assertTrue(foo.types['Answer'] is not None) self.assertEqual(len(foo.modules), 1) self.assertTrue(foo.modules['Foo'] is not None) # Encode a question. encoded = foo.encode('Question', {'id': 1, 'question': 'Is 1+1=3?'}) self.assertEqual(encoded, b'\x01\x01\x09\x49\x73\x20\x31\x2b\x31\x3d\x33\x3f') # Decode the encoded question. decoded = foo.decode('Question', encoded) self.assertEqual(decoded, {'id': 1, 'question': 'Is 1+1=3?'}) # Encode an answer. encoded = foo.encode('Answer', {'id': 1, 'answer': False}) self.assertEqual(encoded, b'\x01\x01\x00') # Decode the encoded answer. decoded = foo.decode('Answer', encoded) self.assertEqual(decoded, {'id': 1, 'answer': False}) def test_decode_length(self): foo = asn1tools.compile_files('tests/files/foo.asn', 'per') with self.assertRaises(asn1tools.DecodeError) as cm: foo.decode_length(b'') self.assertEqual(str(cm.exception), 'Decode length is not supported for this codec.') def test_versions(self): foo = asn1tools.compile_files('tests/files/versions.asn', 'per') # Encode as V1, decode as V1, V2 and V3 decoded_v1 = { 'userName': 'myUserName', 'password': 'myPassword', 'accountNumber': 54224445 } encoded_v1 = foo.encode('V1', decoded_v1) self.assertEqual(foo.decode('V1', encoded_v1), decoded_v1) self.assertEqual(foo.decode('V2', encoded_v1), decoded_v1) self.assertEqual(foo.decode('V3', encoded_v1), decoded_v1) # Encode as V2, decode as V1, V2 and V3 decoded_v2 = { 'userName': 'myUserName', 'password': 'myPassword', 'accountNumber': 54224445, 'minutesLastLoggedIn': 5 } encoded_v2 = foo.encode('V2', decoded_v2) self.assertEqual(foo.decode('V1', encoded_v2), decoded_v1) self.assertEqual(foo.decode('V2', encoded_v2), decoded_v2) self.assertEqual(foo.decode('V3', encoded_v2), decoded_v2) # Encode as V3, decode as V1, V2 and V3 decoded_v3 = { 'userName': 'myUserName', 'password': 'myPassword', 'accountNumber': 54224445, 'minutesLastLoggedIn': 5, 'certificate': None, 'thumb': None } encoded_v3 = foo.encode('V3', decoded_v3) self.assertEqual(foo.decode('V1', encoded_v3), decoded_v1) self.assertEqual(foo.decode('V2', encoded_v3), decoded_v2) self.assertEqual(foo.decode('V3', encoded_v3), decoded_v3) def test_x691_a1(self): a1 = asn1tools.compile_files('tests/files/x691_a1.asn', 'per') decoded = { 'name': { 'givenName': 'John', 'initial': 'P', 'familyName': 'Smith' }, 'title': 'Director', 'number': 51, 'dateOfHire': '19710917', 'nameOfSpouse': { 'givenName': 'Mary', 'initial': 'T', 'familyName': 'Smith' }, 'children': [ { 'name': { 'givenName': 'Ralph', 'initial': 'T', 'familyName': 'Smith' }, 'dateOfBirth': '19571111' }, { 'name': { 'givenName': 'Susan', 'initial': 'B', 'familyName': 'Jones' }, 'dateOfBirth': '19590717' } ] } encoded = ( b'\x80\x04\x4a\x6f\x68\x6e\x01\x50\x05\x53\x6d\x69\x74\x68\x01\x33' b'\x08\x44\x69\x72\x65\x63\x74\x6f\x72\x08\x31\x39\x37\x31\x30\x39' b'\x31\x37\x04\x4d\x61\x72\x79\x01\x54\x05\x53\x6d\x69\x74\x68\x02' b'\x05\x52\x61\x6c\x70\x68\x01\x54\x05\x53\x6d\x69\x74\x68\x08\x31' b'\x39\x35\x37\x31\x31\x31\x31\x05\x53\x75\x73\x61\x6e\x01\x42\x05' b'\x4a\x6f\x6e\x65\x73\x08\x31\x39\x35\x39\x30\x37\x31\x37' ) self.assert_encode_decode(a1, 'PersonnelRecord', decoded, encoded) def test_x691_a2(self): a2 = asn1tools.compile_files('tests/files/x691_a2.asn', 'per') decoded = { 'name': { 'givenName': 'John', 'initial': 'P', 'familyName': 'Smith' }, 'title': 'Director', 'number': 51, 'dateOfHire': '19710917', 'nameOfSpouse': { 'givenName': 'Mary', 'initial': 'T', 'familyName': 'Smith' }, 'children': [ { 'name': { 'givenName': 'Ralph', 'initial': 'T', 'familyName': 'Smith' }, 'dateOfBirth': '19571111' }, { 'name': { 'givenName': 'Susan', 'initial': 'B', 'familyName': 'Jones' }, 'dateOfBirth': '19590717' } ] } encoded = ( b'\x86\x4a\x6f\x68\x6e\x50\x10\x53\x6d\x69\x74\x68\x01\x33\x08\x44' b'\x69\x72\x65\x63\x74\x6f\x72\x19\x71\x09\x17\x0c\x4d\x61\x72\x79' b'\x54\x10\x53\x6d\x69\x74\x68\x02\x10\x52\x61\x6c\x70\x68\x54\x10' b'\x53\x6d\x69\x74\x68\x19\x57\x11\x11\x10\x53\x75\x73\x61\x6e\x42' b'\x10\x4a\x6f\x6e\x65\x73\x19\x59\x07\x17' ) self.assert_encode_decode(a2, 'PersonnelRecord', decoded, encoded) def test_x691_a3(self): a3 = asn1tools.compile_files('tests/files/x691_a3.asn', 'per') decoded = { 'name': { 'givenName': 'John', 'initial': 'P', 'familyName': 'Smith' }, 'title': 'Director', 'number': 51, 'dateOfHire': '19710917', 'nameOfSpouse': { 'givenName': 'Mary', 'initial': 'T', 'familyName': 'Smith' }, 'children': [ { 'name': { 'givenName': 'Ralph', 'initial': 'T', 'familyName': 'Smith' }, 'dateOfBirth': '19571111' }, { 'name': { 'givenName': 'Susan', 'initial': 'B', 'familyName': 'Jones' }, 'dateOfBirth': '19590717', 'sex': 'female' } ] } encoded = ( b'\x40\xc0\x4a\x6f\x68\x6e\x50\x08\x53\x6d\x69\x74\x68\x00\x00\x33' b'\x08\x44\x69\x72\x65\x63\x74\x6f\x72\x00\x19\x71\x09\x17\x03\x4d' b'\x61\x72\x79\x54\x08\x53\x6d\x69\x74\x68\x01\x00\x52\x61\x6c\x70' b'\x68\x54\x08\x53\x6d\x69\x74\x68\x00\x19\x57\x11\x11\x82\x00\x53' b'\x75\x73\x61\x6e\x42\x08\x4a\x6f\x6e\x65\x73\x00\x19\x59\x07\x17' b'\x01\x01\x40' ) self.assert_encode_decode(a3, 'PersonnelRecord', decoded, encoded) def test_x691_a4(self): a4 = asn1tools.compile_dict(deepcopy(X691_A4), 'per') decoded = { 'a': 253, 'b': True, 'c': ('e', True), 'g': '123', 'h': True } encoded = ( b'\x9e\x00\x01\x80\x01\x02\x91\xa4' ) self.assert_encode_decode(a4, 'Ax', decoded, encoded) def test_rrc_8_6_0(self): rrc = asn1tools.compile_dict(deepcopy(RRC_8_6_0), 'per') # Message 1. decoded = { 'message': ( 'c1', ( 'paging', { 'systemInfoModification': 'true', 'nonCriticalExtension': { } } ) ) } encoded = b'\x28' self.assert_encode_decode(rrc, 'PCCH-Message', decoded, encoded) # Message 2. decoded = { 'message': ( 'c1', ( 'paging', { } ) ) } encoded = b'\x00' self.assert_encode_decode(rrc, 'PCCH-Message', decoded, encoded) # Message 3. decoded = { 'message': { 'dl-Bandwidth': 'n6', 'phich-Config': { 'phich-Duration': 'normal', 'phich-Resource': 'half' }, 'systemFrameNumber': (b'\x12', 8), 'spare': (b'\x34\x40', 10) } } encoded = b'\x04\x48\xd1' self.assert_encode_decode(rrc, 'BCCH-BCH-Message', decoded, encoded) # Message #4. decoded = { 'message': ( 'c1', ( 'systemInformation', { 'criticalExtensions': ( 'systemInformation-r8', { 'sib-TypeAndInfo': [ ( 'sib2', { 'ac-BarringInfo': { 'ac-BarringForEmergency': True, 'ac-BarringForMO-Data': { 'ac-BarringFactor': 'p95', 'ac-BarringTime': 's128', 'ac-BarringForSpecialAC': (b'\xf0', 5) } }, 'radioResourceConfigCommon': { 'rach-ConfigCommon': { 'preambleInfo': { 'numberOfRA-Preambles': 'n24', 'preamblesGroupAConfig': { 'sizeOfRA-PreamblesGroupA': 'n28', 'messageSizeGroupA': 'b144', 'messagePowerOffsetGroupB': 'minusinfinity' } }, 'powerRampingParameters': { 'powerRampingStep': 'dB0', 'preambleInitialReceivedTargetPower': 'dBm-102' }, 'ra-SupervisionInfo': { 'preambleTransMax': 'n8', 'ra-ResponseWindowSize': 'sf6', 'mac-ContentionResolutionTimer': 'sf48' }, 'maxHARQ-Msg3Tx': 8 }, 'bcch-Config': { 'modificationPeriodCoeff': 'n2' }, 'pcch-Config': { 'defaultPagingCycle': 'rf256', 'nB': 'twoT' }, 'prach-Config': { 'rootSequenceIndex': 836, 'prach-ConfigInfo': { 'prach-ConfigIndex': 33, 'highSpeedFlag': False, 'zeroCorrelationZoneConfig': 10, 'prach-FreqOffset': 64 } }, 'pdsch-ConfigCommon': { 'referenceSignalPower': -60, 'p-b': 2 }, 'pusch-ConfigCommon': { 'pusch-ConfigBasic': { 'n-SB': 1, 'hoppingMode': 'interSubFrame', 'pusch-HoppingOffset': 10, 'enable64QAM': False }, 'ul-ReferenceSignalsPUSCH': { 'groupHoppingEnabled': True, 'groupAssignmentPUSCH': 22, 'sequenceHoppingEnabled': False, 'cyclicShift': 5 } }, 'pucch-ConfigCommon': { 'deltaPUCCH-Shift': 'ds1', 'nRB-CQI': 98, 'nCS-AN': 4, 'n1PUCCH-AN': 2047 }, 'soundingRS-UL-ConfigCommon': ( 'setup', { 'srs-BandwidthConfig': 'bw0', 'srs-SubframeConfig': 'sc4', 'ackNackSRS-SimultaneousTransmission': True }), 'uplinkPowerControlCommon': { 'p0-NominalPUSCH': -126, 'alpha': 'al0', 'p0-NominalPUCCH': -127, 'deltaFList-PUCCH': { 'deltaF-PUCCH-Format1': 'deltaF-2', 'deltaF-PUCCH-Format1b': 'deltaF1', 'deltaF-PUCCH-Format2': 'deltaF0', 'deltaF-PUCCH-Format2a': 'deltaF-2', 'deltaF-PUCCH-Format2b': 'deltaF0' }, 'deltaPreambleMsg3': -1 }, 'ul-CyclicPrefixLength': 'len1' }, 'ue-TimersAndConstants': { 't300': 'ms100', 't301': 'ms200', 't310': 'ms50', 'n310': 'n2', 't311': 'ms30000', 'n311': 'n2' }, 'freqInfo': { 'additionalSpectrumEmission': 3 }, 'timeAlignmentTimerCommon': 'sf500' } ), ( 'sib3', { 'cellReselectionInfoCommon': { 'q-Hyst': 'dB0', 'speedStateReselectionPars': { 'mobilityStateParameters': { 't-Evaluation': 's180', 't-HystNormal': 's180', 'n-CellChangeMedium': 1, 'n-CellChangeHigh': 16 }, 'q-HystSF': { 'sf-Medium': 'dB-6', 'sf-High': 'dB-4' } } }, 'cellReselectionServingFreqInfo': { 'threshServingLow': 7, 'cellReselectionPriority': 3 }, 'intraFreqCellReselectionInfo': { 'q-RxLevMin': -33, 's-IntraSearch': 0, 'presenceAntennaPort1': False, 'neighCellConfig': (b'\x80', 2), 't-ReselectionEUTRA': 4 } } ), ( 'sib4', { } ), ( 'sib5', { 'interFreqCarrierFreqList': [ { 'dl-CarrierFreq': 1, 'q-RxLevMin': -45, 't-ReselectionEUTRA': 0, 'threshX-High': 31, 'threshX-Low': 29, 'allowedMeasBandwidth': 'mbw6', 'presenceAntennaPort1': True, 'neighCellConfig': (b'\x00', 2), 'q-OffsetFreq': 'dB0' } ] } ), ( 'sib6', { 't-ReselectionUTRA': 3 } ), ( 'sib7', { 't-ReselectionGERAN': 3 } ), ( 'sib8', { 'parameters1XRTT': { 'longCodeState1XRTT': (b'\x01\x23\x45\x67\x89\x00', 42) } } ), ( 'sib9', { 'hnb-Name': b'4' } ), ( 'sib10', { 'messageIdentifier': (b'#4', 16), 'serialNumber': (b'\x124', 16), 'warningType': b'2\x12' } ), ( 'sib11', { 'messageIdentifier': (b'g\x88', 16), 'serialNumber': (b'T5', 16), 'warningMessageSegmentType': 'notLastSegment', 'warningMessageSegmentNumber': 19, 'warningMessageSegment': b'\x12' } ) ] } ) } ) ) } encoded = ( b'\x04\x81\x3f\xbe\x2a\x64\x12\xb2\xf3\x20\x03\x44\x85\x50\x00\x40' b'\x53\x65\x31\x40\x07\xff\x82\x40\x00\x01\x10\x02\x4e\x20\x80\x50' b'\x6c\x3c\x47\x69\x28\x14\x10\x0c\x00\x00\x00\x01\x64\x7f\xa2\x10' b'\x19\x43\x30\x50\x01\x23\x45\x67\x89\x0e\x80\x34\x40\x46\x68\x24' b'\x68\x64\x24\x91\x9e\x21\x50\xd4\x98\x01\x12' ) self.assert_encode_decode(rrc, 'BCCH-DL-SCH-Message', decoded, encoded) def test_all_types_automatic_tags(self): all_types = asn1tools.compile_files( 'tests/files/all_types_automatic_tags.asn', 'per') datas = [ ('Sequence3', {'a': 1, 'c': 2,'d': True}, b'\x00\x01\x01\x01\x02\x80') ] for type_name, decoded, encoded in datas: self.assert_encode_decode(all_types, type_name, decoded, encoded) def test_bar(self): """A simple example. """ bar = asn1tools.compile_files('tests/files/bar.asn', 'per') # Message 1. decoded = { 'headerOnly': True, 'lock': False, 'acceptTypes': { 'standardTypes': [(b'\x40', 2), (b'\x80', 1)] }, 'url': b'/ses/magic/moxen.html' } encoded = ( b'\xd0\x02\x02\x40\x01\x80\x15\x2f\x73\x65\x73\x2f\x6d\x61\x67\x69' b'\x63\x2f\x6d\x6f\x78\x65\x6e\x2e\x68\x74\x6d\x6c' ) self.assert_encode_decode(bar, 'GetRequest', decoded, encoded) # Message 2. decoded = { 'headerOnly': False, 'lock': False, 'url': b'0' } encoded = b'\x00\x01\x30' self.assert_encode_decode(bar, 'GetRequest', decoded, encoded) def test_repr_all_types(self): all_types = asn1tools.compile_files('tests/files/all_types.asn', 'per') self.assertEqual(repr(all_types.types['Boolean']), 'Boolean(Boolean)') self.assertEqual(repr(all_types.types['Integer']), 'Integer(Integer)') self.assertEqual(repr(all_types.types['Bitstring']), 'BitString(Bitstring)') self.assertEqual(repr(all_types.types['Octetstring']), 'OctetString(Octetstring)') self.assertEqual(repr(all_types.types['Null']), 'Null(Null)') self.assertEqual(repr(all_types.types['Objectidentifier']), 'ObjectIdentifier(Objectidentifier)') self.assertEqual(repr(all_types.types['Enumerated']), 'Enumerated(Enumerated)') self.assertEqual(repr(all_types.types['Utf8string']), 'UTF8String(Utf8string)') self.assertEqual(repr(all_types.types['Sequence']), 'Sequence(Sequence, [])') self.assertEqual(repr(all_types.types['Set']), 'Set(Set, [])') self.assertEqual(repr(all_types.types['Sequence2']), 'Sequence(Sequence2, [Integer(a)])') self.assertEqual(repr(all_types.types['Set2']), 'Set(Set2, [Integer(a)])') self.assertEqual(repr(all_types.types['Numericstring']), 'NumericString(Numericstring)') self.assertEqual(repr(all_types.types['Printablestring']), 'PrintableString(Printablestring)') self.assertEqual(repr(all_types.types['Ia5string']), 'IA5String(Ia5string)') self.assertEqual(repr(all_types.types['Universalstring']), 'UniversalString(Universalstring)') self.assertEqual(repr(all_types.types['Visiblestring']), 'VisibleString(Visiblestring)') self.assertEqual(repr(all_types.types['Generalstring']), 'GeneralString(Generalstring)') self.assertEqual(repr(all_types.types['Bmpstring']), 'BMPString(Bmpstring)') self.assertEqual(repr(all_types.types['Teletexstring']), 'TeletexString(Teletexstring)') self.assertEqual(repr(all_types.types['Graphicstring']), 'GraphicString(Graphicstring)') self.assertEqual(repr(all_types.types['Utctime']), 'UTCTime(Utctime)') self.assertEqual(repr(all_types.types['SequenceOf']), 'SequenceOf(SequenceOf, Integer())') self.assertEqual(repr(all_types.types['SetOf']), 'SetOf(SetOf, Integer())') self.assertEqual(repr(all_types.types['Choice']), "Choice(Choice, ['a'])") self.assertEqual(repr(all_types.types['Any']), 'Any(Any)') self.assertEqual(repr(all_types.types['Sequence12']), 'Sequence(Sequence12, [SequenceOf(a, Recursive(Sequence12))])') def test_s1ap_14_4_0(self): # ToDo: Do not skip! return with self.assertRaises(asn1tools.CompileError): s1ap = asn1tools.compile_dict(deepcopy(S1AP_14_4_0), 'per') # Message 1. decoded_message = ( 'successfulOutcome', { 'procedureCode': 17, 'criticality': 'reject', 'value': { 'protocolIEs': [ { 'id': 105, 'criticality': 'reject', 'value': [ { 'servedPLMNs': [ b'\xab\xcd\xef', b'\x12\x34\x56' ], 'servedGroupIDs': [ b'\x22\x22' ], 'servedMMECs': [ b'\x11' ] } ] } ] } } ) encoded_message = ( b'\x20\x11\x00\x15\x00\x00\x01\x00\x69\x00\x0e\x00\x40\xab\xcd\xef' b'\x12\x34\x56\x00\x00\x22\x22\x00\x11' ) encoded = s1ap.encode('S1AP-PDU', decoded_message) self.assertEqual(encoded, encoded_message) def test_information_object(self): # ToDo: Fix when supported. return information_object = asn1tools.compile_files( 'tests/files/information_object.asn', 'per') # Message 1 - without constraints. decoded_message = { 'id': 0, 'value': b'\x05', 'comment': 'item 0', 'extra': 2 } encoded_message = ( b'\x01\x00\x01\x05\x06\x69\x74\x65\x6d\x20\x30\x01\x02' ) self.assert_encode_decode(information_object, 'ItemWithoutConstraints', decoded_message, encoded_message) # Message 1 - with constraints. decoded_message = { 'id': 0, 'value': True, 'comment': 'item 0', 'extra': 2 } encoded_message = ( b'\x01\x00\x01\x80\x06\x69\x74\x65\x6d\x20\x30\x01\x02' ) # ToDo: Constraints are not yet implemented. with self.assertRaises(TypeError) as cm: self.assert_encode_decode(information_object, 'ItemWithConstraints', decoded_message, encoded_message) self.assertEqual(str(cm.exception), "object of type 'bool' has no len()") # Message 2. decoded_message = { 'id': 1, 'value': { 'myValue': 7, 'myType': 0 }, 'comment': 'item 1', 'extra': 5 } encoded_message = ( b'\x01\x01\x05\x02\x01\x07\x01\x00\x06\x69\x74\x65\x6d\x20\x31\x01' b'\x05' ) # ToDo: Constraints are not yet implemented. with self.assertRaises(TypeError): self.assert_encode_decode(information_object, 'ItemWithConstraints', decoded_message, encoded_message) # Message 3 - error class. decoded_message = { 'errorCategory': 'A', 'errors': [ { 'errorCode': 1, 'errorInfo': 3 }, { 'errorCode': 2, 'errorInfo': True } ] } encoded_message = ( b'\x41\x02\x01\x01\x02\x01\x03\x01\x02\x01\x80' ) # ToDo: Constraints are not yet implemented. with self.assertRaises(TypeError): self.assert_encode_decode(information_object, 'ErrorReturn', decoded_message, encoded_message) # Message 4 - C. decoded_message = { 'a': 0 } encoded_message = ( b'\x00\x01\x00' ) encoded = information_object.encode('C', decoded_message) self.assertEqual(encoded, encoded_message) # Message 5 - C. decoded_message = { 'a': 0, 'b': { 'a': 0 } } encoded_message = ( b'\x80\x01\x00\x03\x00\x01\x00' ) with self.assertRaises(TypeError): encoded = information_object.encode('C', decoded_message) self.assertEqual(encoded, encoded_message) # Message 6 - C. decoded_message = { 'a': 0, 'b': { 'a': 0, 'b': { 'a': 0, 'b': { 'a': 0 } } } } encoded_message = ( b'\x80\x01\x00\x0b\x80\x01\x00\x07\x80\x01\x00\x03\x00\x01\x00' ) with self.assertRaises(TypeError): encoded = information_object.encode('C', decoded_message) self.assertEqual(encoded, encoded_message) def test_oma_ulp(self): ulp = asn1tools.compile_dict(deepcopy(OMA_ULP), 'per') decoded = { 'length': 162, 'version': {'maj': 2, 'min': 0, 'servind': 0}, 'sessionID': { 'setSessionID': { 'sessionId': 8838, 'setId': ('imsi', b'\x64\x00\x00\x00\x00\x00\x20\xf2') }, 'slpSessionID': { 'sessionID': b'\x00\x00\x40\x00', 'slpId': ('iPAddress', ('ipv4Address', b'\x7f\x00\x00\x01')) } }, 'message': ( 'msSUPLPOSINIT', { 'sETCapabilities': { 'posTechnology': { 'agpsSETassisted': True, 'agpsSETBased': True, 'autonomousGPS': False, 'aFLT': False, 'eCID': True, 'eOTD': False, 'oTDOA': True, 'ver2-PosTechnology-extension': { 'gANSSPositionMethods': [ { 'ganssId': 4, 'gANSSPositioningMethodTypes': { 'setAssisted': True, 'setBased': True, 'autonomous': True }, 'gANSSSignals': (b'\x80', 1) } ] } }, 'prefMethod': 'noPreference', 'posProtocol': { 'tia801': False, 'rrlp': False, 'rrc': False, 'ver2-PosProtocol-extension': { 'lpp': True, 'posProtocolVersionLPP': { 'majorVersionField': 12, 'technicalVersionField': 4, 'editorialVersionField': 0 } } } }, 'locationId': { 'cellInfo': ( 'ver2-CellInfo-extension', ( 'lteCell', { 'cellGlobalIdEUTRA': { 'plmn-Identity': { 'mcc': [3, 1, 0], 'mnc': [3, 1, 0] }, 'cellIdentity': (b'\x34\xa3\x20\x20', 28) }, 'physCellId': 304, 'trackingAreaCode': (b'\x13\x8e', 16), 'rsrpResult': 59, 'rsrqResult': 24, 'tA': 1, 'measResultListEUTRA': [ { 'physCellId': 275, 'measResult': { 'rsrpResult': 45, 'rsrqResult': 14 } }, { 'physCellId': 200, 'measResult': { 'rsrpResult': 39, 'rsrqResult': 8 } } ] } ) ), 'status': 'current' }, 'sUPLPOS': { 'posPayLoad': ( 'ver2-PosPayLoad-extension', { 'lPPPayload': [ b'\x92\x2b\x08\x31\xe2\x00\x5d\x00\x82\x17' b'\x40\x27\x04\x88\x22\x1b\x80\x00\x2d\xe4' b'\x00\x00\x41\x88\x3c\x09\x24\x30\x44\x18' b'\xb3\x18\x66\x8f\xc0\x03\x24\x01\x01', b'\x92\x2c\x10\x62\x62\x13\x10\x34\xa3\x20' b'\x26\xa4\x01\x40\x84\x00\x00\x00\x00\x01' b'\x41\x20\x02\x00\x00\x00\x00' ] } ) }, 'ver': (b'\x52\x88\xec\xab\xa9\x37\x5c\x4e', 64) } ) } encoded = ( b'\x00\xa2\x02\x00\x00\xc0\x22\x86\x30\x64\x00\x00\x00\x00\x00' b'\x20\xf2\x00\x00\x40\x00\x00\x7f\x00\x00\x01\x31\xb9\x40\x40' b'\x04\x40\x47\x00\x80\xa0\x04\x04\x0c\x0c\x04\x00\x40\x00\x1b' b'\x27\xa6\x21\x31\x00\x34\xa3\x20\x20\x01\x30\x13\x8e\x76\xc0' b'\x00\x01\x20\x01\x13\x6b\x4e\x00\x00\xc8\x69\xc8\x24\x00\x47' b'\x48\x00\x26\x92\x2b\x08\x31\xe2\x00\x5d\x00\x82\x17\x40\x27' b'\x04\x88\x22\x1b\x80\x00\x2d\xe4\x00\x00\x41\x88\x3c\x09\x24' b'\x30\x44\x18\xb3\x18\x66\x8f\xc0\x03\x24\x01\x01\x00\x1a\x92' b'\x2c\x10\x62\x62\x13\x10\x34\xa3\x20\x26\xa4\x01\x40\x84\x00' b'\x00\x00\x00\x01\x41\x20\x02\x00\x00\x00\x00\x52\x88\xec\xab' b'\xa9\x37\x5c\x4e' ) self.assert_encode_decode(ulp, 'ULP-PDU', decoded, encoded) def test_not_support_decode_with_length(self): foo = asn1tools.compile_string( "Foo DEFINITIONS AUTOMATIC TAGS ::= " "BEGIN " "A ::= OCTET STRING " "END", 'oer') with self.assertRaises(NotImplementedError) as cm: foo.decode_with_length('A', b'\x01\x23\x45\x67\x89\xab\xcd\xef') self.assertEqual(str(cm.exception), "This codec does not support decode_with_length().") if __name__ == '__main__': unittest.main()
t_ia5_string(se
mindepth_test.go
package binarytree_test import ( "testing" "github.com/wenjiax/go-algorithms/binarytree" ) func TestMinDepth(t *testing.T) { // 给定一个二叉树,找出其最小深度。 // 最小深度是从根节点到最近叶子节点的最短路径上的节点数量。 // 说明: 叶子节点是指没有子节点的节点。 // 示例: // // 33 // / \ // 16 50 // / \ / \ // 13 18 34 58 // \ / \ / \ // 15 17 25 51 66 // / \ \ // 19 27 55 // 返回它的最小深度 3 。 tree := binarytree.NewBinarySearchTree() // 根节点 tree.Insert(33) // 左子树 tree.Insert(16) tree.Insert(13) tree.Insert(18) tree.Insert(15) tree.Insert(17) tree.Insert(25) tree.Insert(19) tree.Insert(27) // 右子树 tree.Insert(50) tree.Insert(34) tree.Insert(58) tree.Insert(51) tree.Insert(66) tree.Insert(55) // 最小深度 3 t.Log(minDepthDFS(tree.Find(33))) // 最小深度 3 t.Log(minDepthBFS(tree.Find(33))) } // 找出二叉树最小深度, DFS 实现 func minDepthDFS(root *binarytree.TreeNode) int { if root == nil { return 0 } if root.Left() == nil { return minDepthDFS(root.Right()) + 1 } if root.Right() == nil { return minDepthDFS(root.Left()) + 1 } left, right := minDepthDFS(root.L
{ for i, l := 0, len(queue); i < l; i++ { p := queue[0] queue = queue[1:] if p.Left() == nil && p.Right() == nil { return level + 1 } if p.Left() != nil { queue = append(queue, p.Left()) } if p.Right() != nil { queue = append(queue, p.Right()) } } level++ } return level }
eft()), minDepthDFS(root.Right()) if left < right { return left + 1 } return right + 1 } // 找出二叉树最小深度, BFS 实现 func minDepthBFS(root *binarytree.TreeNode) int { if root == nil { return 0 } var queue []*binarytree.TreeNode queue = append(queue, root) var level int for len(queue) > 0
kubemeta_test.go
package process_test import ( "context" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "github.com/slok/kahoy/internal/log" "github.com/slok/kahoy/internal/model" "github.com/slok/kahoy/internal/resource/process" ) func
(kubeAPIVersion, kubeType, ns, name string) model.Resource { return newCustomResource(kubeAPIVersion, kubeType, ns, name, nil, nil) } func newLabeledResource(name string, labels map[string]string) model.Resource { return newCustomResource("v1", "Pod", "testns", name, labels, nil) } func newAnnotatedResource(name string, annotations map[string]string) model.Resource { return newCustomResource("v1", "Pod", "testns", name, nil, annotations) } func newCustomResource(kubeAPIVersion, kubeType, ns, name string, labels, annotations map[string]string) model.Resource { type tm = map[string]interface{} objAnnotations := tm{} for k, v := range annotations { objAnnotations[k] = v } objLabels := tm{} for k, v := range labels { objLabels[k] = v } return model.Resource{ ID: name, K8sObject: &unstructured.Unstructured{ Object: tm{ "apiVersion": kubeAPIVersion, "kind": kubeType, "metadata": tm{ "name": name, "namespace": ns, "labels": objLabels, "annotations": objAnnotations, }, }, }, } } func TestExcludeKubeTypeProcessor(t *testing.T) { tests := map[string]struct { regexes []string resources []model.Resource expResources []model.Resource expErr bool }{ "No regexes should not filter anything.": { regexes: []string{}, resources: []model.Resource{ newResource("v1", "Pod", "test-ns", "test-name"), newResource("networking.k8s.io/v1beta1", "Ingress", "test-ns", "test-name2"), }, expResources: []model.Resource{ newResource("v1", "Pod", "test-ns", "test-name"), newResource("networking.k8s.io/v1beta1", "Ingress", "test-ns", "test-name2"), }, }, "Not mathing regexes should not filter resources.": { regexes: []string{ "Deployment", }, resources: []model.Resource{ newResource("v1", "Pod", "test-ns", "test-name"), newResource("networking.k8s.io/v1beta1", "Ingress", "test-ns", "test-name2"), }, expResources: []model.Resource{ newResource("v1", "Pod", "test-ns", "test-name"), newResource("networking.k8s.io/v1beta1", "Ingress", "test-ns", "test-name2"), }, }, "Mathing regexes should filter resources (specific type).": { regexes: []string{ ".*/Pod", }, resources: []model.Resource{ newResource("v1", "Pod", "test-ns", "test-name"), newResource("networking.k8s.io/v1beta1", "Ingress", "test-ns", "test-name2"), newResource("v1", "Pod", "test-ns", "test-name3"), newResource("v1", "Service", "test-ns", "test-name4"), }, expResources: []model.Resource{ newResource("networking.k8s.io/v1beta1", "Ingress", "test-ns", "test-name2"), newResource("v1", "Service", "test-ns", "test-name4"), }, }, "Mathing regexes should filter resources (specific group).": { regexes: []string{ "^v1/.*", }, resources: []model.Resource{ newResource("v1", "Pod", "test-ns", "test-name"), newResource("networking.k8s.io/v1beta1", "Ingress", "test-ns", "test-name2"), newResource("v1", "Pod", "test-ns", "test-name3"), newResource("v1", "Service", "test-ns", "test-name4"), }, expResources: []model.Resource{ newResource("networking.k8s.io/v1beta1", "Ingress", "test-ns", "test-name2"), }, }, "Mathing regexes should filter resources (specific type and group).": { regexes: []string{ "^apps/v1/StatefulSet$", }, resources: []model.Resource{ newResource("apps/v1", "StatefulSet", "test-ns", "test-name"), newResource("apps/v1", "StatefulSet", "test-ns", "test-name2"), newResource("apps/v1", "Deployment", "test-ns", "test-name3"), newResource("apps/v1", "Deployment", "test-ns", "test-name4"), }, expResources: []model.Resource{ newResource("apps/v1", "Deployment", "test-ns", "test-name3"), newResource("apps/v1", "Deployment", "test-ns", "test-name4"), }, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { assert := assert.New(t) require := require.New(t) proc, err := process.NewExcludeKubeTypeProcessor(test.regexes, log.Noop) require.NoError(err) gotResources, err := proc.Process(context.TODO(), test.resources) if test.expErr { assert.Error(err) } else if assert.NoError(err) { assert.Equal(test.expResources, gotResources) } }) } } func TestIncludeNamespaceProcessor(t *testing.T) { tests := map[string]struct { regexes []string resources []model.Resource expResources []model.Resource expErr bool }{ "No regexes should not filter anything.": { regexes: []string{}, resources: []model.Resource{ newResource("v1", "Pod", "test-ns1", "test-name"), newResource("v1", "Pod", "test-ns2", "test-name"), }, expResources: []model.Resource{ newResource("v1", "Pod", "test-ns1", "test-name"), newResource("v1", "Pod", "test-ns2", "test-name"), }, }, "Matching regexes should keep resources and exclude non matching ones.": { regexes: []string{ "test-ns*", }, resources: []model.Resource{ newResource("v1", "Pod", "test-ns1", "test-name"), newResource("v1", "Pod", "test-ns2", "test-name"), newResource("v1", "Pod", "ns3", "test-name"), }, expResources: []model.Resource{ newResource("v1", "Pod", "test-ns1", "test-name"), newResource("v1", "Pod", "test-ns2", "test-name"), }, }, "If no resources match given regex no resources are returned.": { regexes: []string{ "test-ns*", }, resources: []model.Resource{ newResource("v1", "Pod", "ns1", "test-name"), newResource("v1", "Pod", "ns2", "test-name"), newResource("v1", "Pod", "ns3", "test-name"), }, expResources: []model.Resource{}, }, "If all resources match regexes, all resources are returned.": { regexes: []string{ "ns1", "ns2", }, resources: []model.Resource{ newResource("v1", "Pod", "ns1", "test-name"), newResource("v1", "Pod", "ns2", "test-name"), }, expResources: []model.Resource{ newResource("v1", "Pod", "ns1", "test-name"), newResource("v1", "Pod", "ns2", "test-name"), }, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { assert := assert.New(t) require := require.New(t) proc, err := process.NewIncludeNamespaceProcessor(test.regexes, log.Noop) require.NoError(err) gotResources, err := proc.Process(context.TODO(), test.resources) if test.expErr { assert.Error(err) } else if assert.NoError(err) { assert.Equal(test.expResources, gotResources) } }) } } func TestKubeLabelSelectorProcessor(t *testing.T) { tests := map[string]struct { selector string resources []model.Resource expResources []model.Resource expErr bool }{ "No selector should not filter anything.": { selector: "", resources: []model.Resource{ newLabeledResource("r0", map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}), newLabeledResource("r1", map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}), }, expResources: []model.Resource{ newLabeledResource("r0", map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}), newLabeledResource("r1", map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}), }, }, "Equality selector should filter the ones that don't have that label.": { selector: "k1=v1", resources: []model.Resource{ newLabeledResource("r0", map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}), newLabeledResource("r1", map[string]string{"k2": "v2", "k3": "v3"}), }, expResources: []model.Resource{ newLabeledResource("r0", map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}), }, }, "Not equality selector should filter the ones that have that label.": { selector: "k1!=v1", resources: []model.Resource{ newLabeledResource("r0", map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}), newLabeledResource("r1", map[string]string{"k2": "v2", "k3": "v3"}), }, expResources: []model.Resource{ newLabeledResource("r1", map[string]string{"k2": "v2", "k3": "v3"}), }, }, "Multiple selectors should filter the correctly.": { selector: "k1=v1,k2 in (v21,v22),k3!=v3", resources: []model.Resource{ newLabeledResource("r0", map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}), newLabeledResource("r1", map[string]string{"k2": "v2", "k3": "v3"}), newLabeledResource("r2", map[string]string{"k1": "v1", "k2": "v21", "k4": "v4"}), newLabeledResource("r3", map[string]string{"k1": "v1", "k2": "v2", "k3": "v3", "k4": "v4"}), newLabeledResource("r4", map[string]string{"k1": "v1", "k4": "v4"}), newLabeledResource("r5", map[string]string{"k1": "v1", "k2": "v22", "k5": "v5"}), newLabeledResource("r6", map[string]string{"k1": "v1", "k2": "v23", "k5": "v5"}), }, expResources: []model.Resource{ newLabeledResource("r2", map[string]string{"k1": "v1", "k2": "v21", "k4": "v4"}), newLabeledResource("r5", map[string]string{"k1": "v1", "k2": "v22", "k5": "v5"}), }, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { assert := assert.New(t) require := require.New(t) proc, err := process.NewKubeLabelSelectorProcessor(test.selector, log.Noop) require.NoError(err) gotResources, err := proc.Process(context.TODO(), test.resources) if test.expErr { assert.Error(err) } else if assert.NoError(err) { assert.Equal(test.expResources, gotResources) } }) } } func TestKubeAnnotationSelectorProcessor(t *testing.T) { tests := map[string]struct { selector string resources []model.Resource expResources []model.Resource expErr bool }{ "No selector should not filter anything.": { selector: "", resources: []model.Resource{ newAnnotatedResource("r0", map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}), newAnnotatedResource("r1", map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}), }, expResources: []model.Resource{ newAnnotatedResource("r0", map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}), newAnnotatedResource("r1", map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}), }, }, "Equality selector should filter the ones that don't have that annotation.": { selector: "k1=v1", resources: []model.Resource{ newAnnotatedResource("r0", map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}), newAnnotatedResource("r1", map[string]string{"k2": "v2", "k3": "v3"}), }, expResources: []model.Resource{ newAnnotatedResource("r0", map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}), }, }, "Not equality selector should filter the ones that have that annotation.": { selector: "k1!=v1", resources: []model.Resource{ newAnnotatedResource("r0", map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}), newAnnotatedResource("r1", map[string]string{"k2": "v2", "k3": "v3"}), }, expResources: []model.Resource{ newAnnotatedResource("r1", map[string]string{"k2": "v2", "k3": "v3"}), }, }, "Multiple selectors should filter the correctly.": { selector: "k1=v1,k2 in (v21,v22),k3!=v3", resources: []model.Resource{ newAnnotatedResource("r0", map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}), newAnnotatedResource("r1", map[string]string{"k2": "v2", "k3": "v3"}), newAnnotatedResource("r2", map[string]string{"k1": "v1", "k2": "v21", "k4": "v4"}), newAnnotatedResource("r3", map[string]string{"k1": "v1", "k2": "v2", "k3": "v3", "k4": "v4"}), newAnnotatedResource("r4", map[string]string{"k1": "v1", "k4": "v4"}), newAnnotatedResource("r5", map[string]string{"k1": "v1", "k2": "v22", "k5": "v5"}), newAnnotatedResource("r6", map[string]string{"k1": "v1", "k2": "v23", "k5": "v5"}), }, expResources: []model.Resource{ newAnnotatedResource("r2", map[string]string{"k1": "v1", "k2": "v21", "k4": "v4"}), newAnnotatedResource("r5", map[string]string{"k1": "v1", "k2": "v22", "k5": "v5"}), }, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { assert := assert.New(t) require := require.New(t) proc, err := process.NewKubeAnnotationSelectorProcessor(test.selector, log.Noop) require.NoError(err) gotResources, err := proc.Process(context.TODO(), test.resources) if test.expErr { assert.Error(err) } else if assert.NoError(err) { assert.Equal(test.expResources, gotResources) } }) } }
newResource
types.ts
export class UsersList { static from(jsonArray: Array<Object>): UsersList { return new UsersList(jsonArray.map((jsonObj: User) => User.from(jsonObj))); } constructor(readonly users: User[]) { } } export class
{ static from(json: any): User { return new User( json['id'], json['name'], json['surname'], json['age'], json['email'], Address.from(json['address'][0]) ); } constructor(public id: number, public name: String, public surname: String, public age: String, public email: String, public address: Address) { } } export class Address { static from(json: any): Address { return new Address( json['city'], json['country'] ); } constructor(public city: String, public country: String) { } }
User
volume.go
package storage import ( "encoding/binary" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "os" "path" "strconv" "sync" ) const ( VolumeExtension = ".vol" MetaName = "meta.json" ) type Volume struct { Id VolumeId `json:"id"` Size uint64 `json:"size"` FileCount int `json:"file_count"` ReadOnly bool `json:"read_only"` Uptime uint64 `json:"uptime"` Status int `json:"status"` //正常 恢复(恢复中) mutex sync.Mutex dir string } func NewVolume(dirname string, id VolumeId) (*Volume, error) { v := &Volum
ume) String() string { return fmt.Sprintf("Id:%v, dir:%s", v.Id, v.dir) } func (v *Volume) GetInfo() VolumeInfo { v.mutex.Lock() defer v.mutex.Unlock() return VolumeInfo{ Id: v.Id, Size: v.Size, FileCount: v.FileCount, ReadOnly: v.ReadOnly, Uptime: v.Uptime, // TODO status 正常 恢复(恢复中) } } func (v *Volume) GetStat() (os.FileInfo, error) { return os.Stat(v.dir) } func (v *Volume) Directory() string { return v.dir } func (v *Volume) MetaPath() string { return path.Join(v.dir, MetaName) } func (v *Volume) Destroy() { v.mutex.Lock() defer v.mutex.Unlock() // TODO: first remove then async delete, 保证不出错 _ = os.RemoveAll(v.dir) } func (v *Volume) Sync() error { v.mutex.Lock() defer v.mutex.Unlock() return v.dump(v.MetaPath()) } // Close cleanly shuts down this volume func (v *Volume) Close() { v.mutex.Lock() defer v.mutex.Unlock() v.dump(v.MetaPath()) } func (v *Volume) SaveFile(fid FileId, fsize int, flag byte, r io.Reader) (*Needle, error) { fidstr := fid.String() fpath := path.Join(v.dir, fidstr) needle, err := WriteFile(fpath, fsize, flag, r) if err == nil { v.mutex.Lock() v.FileCount += 1 v.Size += uint64(fsize) + uint64(binary.Size(needle)) v.mutex.Unlock() } return needle, err } func (v *Volume) LoadFile(fid FileId, w http.ResponseWriter) error { fidstr := fid.String() fpath := path.Join(v.dir, fidstr) err := ReadFile(fpath, func(n *Needle, r io.Reader) error { w.Header().Set("Content-length", strconv.FormatInt(int64(n.DataSize), 10)) w.Header().Set("CRC32", strconv.FormatUint(uint64(n.Checksum), 16)) w.Header().Set("Flag", strconv.FormatUint(uint64(n.Flags), 16)) _, err := io.Copy(w, r) return err }) return err } func (v *Volume) DeleteFile(fid FileId) { fidstr := fid.String() fpath := path.Join(v.dir, fidstr) os.Remove(fpath) // TODO async delete, no errro } func (v *Volume) load(path string) error { blob, err := ioutil.ReadFile(path) if err != nil { return err } return json.Unmarshal(blob, v) } func (v *Volume) dump(path string) error { blob, err := json.Marshal(v) if err != nil { return err } newpath := path + "_new" if err = ioutil.WriteFile(newpath, blob, 0644); err != nil { return err } return os.Rename(newpath, path) }
e{ Id: id, Size: 0, FileCount: 0, ReadOnly: false, Uptime: 0, Status: 0, dir: path.Join(dirname, id.String()) + VolumeExtension, } if err := os.MkdirAll(v.dir, 0755); os.IsExist(err) { return v, v.load(v.MetaPath()) } else if err != nil { return nil, err } v.dump(v.MetaPath()) return v, nil } func (v *Vol
bank_new.go
// Code generated - DO NOT EDIT. // This file is a generated binding and any manual changes will be lost. package contract import ( "errors" "math/big" "strings" ethereum "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" ) // Reference imports to suppress errors if they are not otherwise used. var ( _ = big.NewInt _ = strings.NewReader _ = ethereum.NotFound _ = abi.U256 _ = bind.Bind _ = common.Big1 _ = types.BloomLookup _ = event.NewSubscription ) // BankABI is the input ABI used to generate the binding from. const BankABI = "[{\"constant\":false,\"inputs\":[{\"name\":\"etherValue\",\"type\":\"uint256\"}],\"name\":\"withdraw\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[],\"name\":\"kill\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwner\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"getCoinBalance\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"getBankBalance\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"getOwner\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"to\",\"type\":\"address\"},{\"name\":\"coinValue\",\"type\":\"uint256\"}],\"name\":\"transferCoin\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"coinValue\",\"type\":\"uint256\"}],\"name\":\"mint\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"to\",\"type\":\"address\"},{\"name\":\"etherValue\",\"type\":\"uint256\"}],\"name\":\"transfer\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[],\"name\":\"deposit\",\"outputs\":[],\"payable\":true,\"stateMutability\":\"payable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"coinValue\",\"type\":\"uint256\"}],\"name\":\"buy\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"payable\":true,\"stateMutability\":\"payable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"timestamp\",\"type\":\"uint256\"}],\"name\":\"DepositEvent\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"timestamp\",\"type\":\"uint256\"}],\"name\":\"WithdrawEvent\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"timestamp\",\"type\":\"uint256\"}],\"name\":\"TransferEvent\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"timestamp\",\"type\":\"uint256\"}],\"name\":\"MintEvent\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"timestamp\",\"type\":\"uint256\"}],\"name\":\"BuyCoinEvent\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"timestamp\",\"type\":\"uint256\"}],\"name\":\"TransferCoinEvent\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"oldOwner\",\"type\":\"address\"},{\"indexed\":true,\"name\":\"newOwner\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"timestamp\",\"type\":\"uint256\"}],\"name\":\"TransferOwnerEvent\",\"type\":\"event\"}]" // BankBin is the compiled bytecode used for deploying new contracts. //const BankBin = `0x608060405260008054600160a060020a03191633179055610a22806100256000396000f3fe6080604052600436106100ae5763ffffffff7c01000000000000000000000000000000000000000000000000000000006000350416632e1a7d4d81146100b357806341c0e1b5146100df5780634fb2e45d146100f457806356fbd78f146101275780637b83b50b1461014e578063893d20e8146101635780638dde60fa14610194578063a0712d68146101cd578063a9059cbb146101f7578063d0e30db014610230578063d96a094a14610238575b600080fd5b3480156100bf57600080fd5b506100dd600480360360208110156100d657600080fd5b5035610262565b005b3480156100eb57600080fd5b506100dd610381565b34801561010057600080fd5b506100dd6004803603602081101561011757600080fd5b5035600160a060020a03166103e6565b34801561013357600080fd5b5061013c6104d1565b60408051918252519081900360200190f35b34801561015a57600080fd5b5061013c6104e4565b34801561016f57600080fd5b506101786104f7565b60408051600160a060020a039092168252519081900360200190f35b3480156101a057600080fd5b506100dd600480360360408110156101b757600080fd5b50600160a060020a038135169060200135610506565b3480156101d957600080fd5b506100dd600480360360208110156101f057600080fd5b5035610610565b34801561020357600080fd5b506100dd6004803603604081101561021a57600080fd5b50600160a060020a0381351690602001356106d6565b6100dd6107e0565b34801561024457600080fd5b506100dd6004803603602081101561025b57600080fd5b5035610838565b33600090815260016020526040902054670de0b6b3a76400008202908111156102fb576040805160e560020a62461bcd02815260206004820152602360248201527f596f75722065746865722062616c616e63657320617265206e6f7420656e6f7560448201527f67682e0000000000000000000000000000000000000000000000000000000000606482015290519081900360840190fd5b604051339082156108fc029083906000818181858888f19350505050158015610328573d6000803e3d6000fd5b5033600081815260016020908152604091829020805485900390558151858152429181019190915281517f5bb95829671915ece371da722f91d5371159095dcabf2f75cd6c53facb7e1bab929181900390910190a25050565b600054600160a060020a031633146103e3576040805160e560020a62461bcd02815260206004820152601260248201527f596f7520617265206e6f74206f776e65722e0000000000000000000000000000604482015290519081900360640190fd5b33ff5b600054600160a060020a03163314610448576040805160e560020a62461bcd02815260206004820152601260248201527f596f7520617265206e6f74206f776e65722e0000000000000000000000000000604482015290519081900360640190fd5b600160a060020a038116151561045d57600080fd5b600054604080514281529051600160a060020a038085169316917f587a4fcff87b7be11c779eb502f8b2584f996387d8b8cda0e5113fef424f7316919081900360200190a36000805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a0392909216919091179055565b3360009081526002602052604090205490565b3360009081526001602052604090205490565b600054600160a060020a031690565b33600090815260026020526040902054670de0b6b3a764000082029081111561059f576040805160e560020a62461bcd02815260206004820152602760248201527f596f75722062616e6b20636f696e2062616c616e63657320617265206e6f742060448201527f656e6f7567682e00000000000000000000000000000000000000000000000000606482015290519081900360840190fd5b33600081815260026020908152604080832080548690039055600160a060020a038716808452928190208054860190558051868152429281019290925280519293927f941d755df54ad0234b406209d0c923107cabf6d4f1ce335b8ae5d89d6a28c2d29281900390910190a3505050565b600054600160a060020a03163314610672576040805160e560020a62461bcd02815260206004820152601260248201527f596f7520617265206e6f74206f776e65722e0000000000000000000000000000604482015290519081900360640190fd5b336000818152600260209081526040918290208054670de0b6b3a764000086029081019091558251858152429281019290925282519093927f8069ef4945469d029cc32e222031bccdc99b2eaaf4ee374cd268012f7ddee907928290030190a25050565b33600090815260016020526040902054670de0b6b3a764000082029081111561076f576040805160e560020a62461bcd02815260206004820152602360248201527f596f75722065746865722062616c616e63657320617265206e6f7420656e6f7560448201527f67682e0000000000000000000000000000000000000000000000000000000000606482015290519081900360840190fd5b33600081815260016020908152604080832080548690039055600160a060020a038716808452928190208054860190558051868152429281019290925280519293927fbabc8cd3bd6701ee99131f374fd2ab4ea66f48dc4e4182ed78fecb0502e44dd69281900390910190a3505050565b336000818152600160209081526040918290208054349081019091558251908152429181019190915281517fad40ae5dc69974ba932d08b0a608e89109412d41d04850f5196f144875ae2660929181900390910190a2565b60008054600160a060020a0316815260026020526040902054670de0b6b3a76400008202908111156108da576040805160e560020a62461bcd02815260206004820152602a60248201527f4f776e657227732062616e6b20636f696e2062616c616e63657320617265206e60448201527f6f7420656e6f7567682e00000000000000000000000000000000000000000000606482015290519081900360840190fd5b33600090815260016020526040902054811115610967576040805160e560020a62461bcd02815260206004820152602360248201527f596f75722065746865722062616c616e63657320617265206e6f7420656e6f7560448201527f67682e0000000000000000000000000000000000000000000000000000000000606482015290519081900360840190fd5b336000818152600160209081526040808320805486900390558254600160a060020a03908116845281842080548701905584845260028352818420805487019055835416835291829020805485900390558151858152429181019190915281517f4c5ad1aea676c1e1613de5416105424342b84655de046903409dea58418bedff929181900390910190a2505056fea165627a7a72305820063f5a6735fc1ccd7c898926ee5036708bc3e55ec9df1b24a7554dd1ede3ee0a0029` var BankBin string = "" func SetBankBin(solBin string) { BankBin = solBin } // DeployBank deploys a new Ethereum contract, binding an instance of Bank to it. func DeployBank(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *Bank, error) { parsed, err := abi.JSON(strings.NewReader(BankABI)) if err != nil { return common.Address{}, nil, nil, err } if BankBin == "" { return common.Address{}, nil, nil, errors.New("BankBin is empty") } address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(BankBin), backend) if err != nil { return common.Address{}, nil, nil, err } return address, tx, &Bank{BankCaller: BankCaller{contract: contract}, BankTransactor: BankTransactor{contract: contract}, BankFilterer: BankFilterer{contract: contract}}, nil } // Bank is an auto generated Go binding around an Ethereum contract. type Bank struct { BankCaller // Read-only binding to the contract BankTransactor // Write-only binding to the contract BankFilterer // Log filterer for contract events } // BankCaller is an auto generated read-only Go binding around an Ethereum contract. type BankCaller struct { contract *bind.BoundContract // Generic contract wrapper for the low level calls } // BankTransactor is an auto generated write-only Go binding around an Ethereum contract. type BankTransactor struct { contract *bind.BoundContract // Generic contract wrapper for the low level calls } // BankFilterer is an auto generated log filtering Go binding around an Ethereum contract events. type BankFilterer struct { contract *bind.BoundContract // Generic contract wrapper for the low level calls } // BankSession is an auto generated Go binding around an Ethereum contract, // with pre-set call and transact options. type BankSession struct { Contract *Bank // Generic contract binding to set the session for CallOpts bind.CallOpts // Call options to use throughout this session TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session } // BankCallerSession is an auto generated read-only Go binding around an Ethereum contract, // with pre-set call options. type BankCallerSession struct { Contract *BankCaller // Generic contract caller binding to set the session for CallOpts bind.CallOpts // Call options to use throughout this session } // BankTransactorSession is an auto generated write-only Go binding around an Ethereum contract, // with pre-set transact options. type BankTransactorSession struct { Contract *BankTransactor // Generic contract transactor binding to set the session for TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session } // BankRaw is an auto generated low-level Go binding around an Ethereum contract. type BankRaw struct { Contract *Bank // Generic contract binding to access the raw methods on } // BankCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. type BankCallerRaw struct { Contract *BankCaller // Generic read-only contract binding to access the raw methods on } // BankTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. type BankTransactorRaw struct { Contract *BankTransactor // Generic write-only contract binding to access the raw methods on } // NewBank creates a new instance of Bank, bound to a specific deployed contract. func NewBank(address common.Address, backend bind.ContractBackend) (*Bank, error) { contract, err := bindBank(address, backend, backend, backend) if err != nil { return nil, err } return &Bank{BankCaller: BankCaller{contract: contract}, BankTransactor: BankTransactor{contract: contract}, BankFilterer: BankFilterer{contract: contract}}, nil } // NewBankCaller creates a new read-only instance of Bank, bound to a specific deployed contract. func NewBankCaller(address common.Address, caller bind.ContractCaller) (*BankCaller, error) { contract, err := bindBank(address, caller, nil, nil) if err != nil { return nil, err } return &BankCaller{contract: contract}, nil } // NewBankTransactor creates a new write-only instance of Bank, bound to a specific deployed contract. func NewBankTransactor(address common.Address, transactor bind.ContractTransactor) (*BankTransactor, error) { contract, err := bindBank(address, nil, transactor, nil) if err != nil { return nil, err } return &BankTransactor{contract: contract}, nil } // NewBankFilterer creates a new log filterer instance of Bank, bound to a specific deployed contract. func NewBankFilterer(address common.Address, filterer bind.ContractFilterer) (*BankFilterer, error) { contract, err := bindBank(address, nil, nil, filterer) if err != nil { return nil, err } return &BankFilterer{contract: contract}, nil } // bindBank binds a generic wrapper to an already deployed contract. func bindBank(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { parsed, err := abi.JSON(strings.NewReader(BankABI)) if err != nil { return nil, err } return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil } // Call invokes the (constant) contract method with params as input values and // sets the output to result. The result type might be a single field for simple // returns, a slice of interfaces for anonymous returns and a struct for named // returns. func (_Bank *BankRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { return _Bank.Contract.BankCaller.contract.Call(opts, result, method, params...) } // Transfer initiates a plain transaction to move funds to the contract, calling // its default method if one is available. func (_Bank *BankRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { return _Bank.Contract.BankTransactor.contract.Transfer(opts) } // Transact invokes the (paid) contract method with params as input values. func (_Bank *BankRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { return _Bank.Contract.BankTransactor.contract.Transact(opts, method, params...) } // Call invokes the (constant) contract method with params as input values and // sets the output to result. The result type might be a single field for simple // returns, a slice of interfaces for anonymous returns and a struct for named // returns. func (_Bank *BankCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { return _Bank.Contract.contract.Call(opts, result, method, params...) } // Transfer initiates a plain transaction to move funds to the contract, calling // its default method if one is available. func (_Bank *BankTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { return _Bank.Contract.contract.Transfer(opts) } // Transact invokes the (paid) contract method with params as input values. func (_Bank *BankTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { return _Bank.Contract.contract.Transact(opts, method, params...) } // GetBankBalance is a free data retrieval call binding the contract method 0x7b83b50b. // // Solidity: function getBankBalance() constant returns(uint256) func (_Bank *BankCaller) GetBankBalance(opts *bind.CallOpts) (*big.Int, error) { var ( ret0 = new(*big.Int) ) out := ret0 err := _Bank.contract.Call(opts, out, "getBankBalance") return *ret0, err } // GetBankBalance is a free data retrieval call binding the contract method 0x7b83b50b. // // Solidity: function getBankBalance() constant returns(uint256) func (_Bank *BankSession) GetBankBalance() (*big.Int, error) { return _Bank.Contract.GetBankBalance(&_Bank.CallOpts) } // GetBankBalance is a free data retrieval call binding the contract method 0x7b83b50b. // // Solidity: function getBankBalance() constant returns(uint256) func (_Bank *BankCallerSession) GetBankBalance() (*big.Int, error) { return _Bank.Contract.GetBankBalance(&_Bank.CallOpts) } // GetCoinBalance is a free data retrieval call binding the contract method 0x56fbd78f. // // Solidity: function getCoinBalance() constant returns(uint256) func (_Bank *BankCaller) GetCoinBalance(opts *bind.CallOpts) (*big.Int, error) { var ( ret0 = new(*big.Int) ) out := ret0 err := _Bank.contract.Call(opts, out, "getCoinBalance") return *ret0, err } // GetCoinBalance is a free data retrieval call binding the contract method 0x56fbd78f. // // Solidity: function getCoinBalance() constant returns(uint256) func (_Bank *BankSession) GetCoinBalance() (*big.Int, error) { return _Bank.Contract.GetCoinBalance(&_Bank.CallOpts) } // GetCoinBalance is a free data retrieval call binding the contract method 0x56fbd78f. // // Solidity: function getCoinBalance() constant returns(uint256) func (_Bank *BankCallerSession) GetCoinBalance() (*big.Int, error) { return _Bank.Contract.GetCoinBalance(&_Bank.CallOpts) } // GetOwner is a free data retrieval call binding the contract method 0x893d20e8. // // Solidity: function getOwner() constant returns(address) func (_Bank *BankCaller) GetOwner(opts *bind.CallOpts) (common.Address, error) { var ( ret0 = new(common.Address) ) out := ret0 err := _Bank.contract.Call(opts, out, "getOwner") return *ret0, err } // GetOwner is a free data retrieval call binding the contract method 0x893d20e8. // // Solidity: function getOwner() constant returns(address) func (_Bank *BankSession) GetOwner() (common.Address, error) { return _Bank.Contract.GetOwner(&_Bank.CallOpts) } // GetOwner is a free data retrieval call binding the contract method 0x893d20e8. // // Solidity: function getOwner() constant returns(address) func (_Bank *BankCallerSession) GetOwner() (common.Address, error) { return _Bank.Contract.GetOwner(&_Bank.CallOpts) } // Buy is a paid mutator transaction binding the contract method 0xd96a094a. // // Solidity: function buy(coinValue uint256) returns() func (_Bank *BankTransactor) Buy(opts *bind.TransactOpts, coinValue *big.Int) (*types.Transaction, error) { return _Bank.contract.Transact(opts, "buy", coinValue) } // Buy is a paid mutator transaction binding the contract method 0xd96a094a. // // Solidity: function buy(coinValue uint256) returns() func (_Bank *BankSession) Buy(coinValue *big.Int) (*types.Transaction, error) { return _Bank.Contract.Buy(&_Bank.TransactOpts, coinValue) } // Buy is a paid mutator transaction binding the contract method 0xd96a094a. // // Solidity: function buy(coinValue uint256) returns() func (_Bank *BankTransactorSession) Buy(coinValue *big.Int) (*types.Transaction, error) { return _Bank.Contract.Buy(&_Bank.TransactOpts, coinValue) } // Deposit is a paid mutator transaction binding the contract method 0xd0e30db0. // // Solidity: function deposit() returns() func (_Bank *BankTransactor) Deposit(opts *bind.TransactOpts) (*types.Transaction, error) { return _Bank.contract.Transact(opts, "deposit") } // Deposit is a paid mutator transaction binding the contract method 0xd0e30db0. // // Solidity: function deposit() returns() func (_Bank *BankSession) Deposit() (*types.Transaction, error) { return _Bank.Contract.Deposit(&_Bank.TransactOpts) } // Deposit is a paid mutator transaction binding the contract method 0xd0e30db0. // // Solidity: function deposit() returns() func (_Bank *BankTransactorSession) Deposit() (*types.Transaction, error) { return _Bank.Contract.Deposit(&_Bank.TransactOpts) } // Kill is a paid mutator transaction binding the contract method 0x41c0e1b5. // // Solidity: function kill() returns() func (_Bank *BankTransactor) Kill(opts *bind.TransactOpts) (*types.Transaction, error) { return _Bank.contract.Transact(opts, "kill") } // Kill is a paid mutator transaction binding the contract method 0x41c0e1b5. // // Solidity: function kill() returns() func (_Bank *BankSession) Kill() (*types.Transaction, error) { return _Bank.Contract.Kill(&_Bank.TransactOpts) } // Kill is a paid mutator transaction binding the contract method 0x41c0e1b5. // // Solidity: function kill() returns() func (_Bank *BankTransactorSession) Kill() (*types.Transaction, error) { return _Bank.Contract.Kill(&_Bank.TransactOpts) } // Mint is a paid mutator transaction binding the contract method 0xa0712d68. // // Solidity: function mint(coinValue uint256) returns() func (_Bank *BankTransactor) Mint(opts *bind.TransactOpts, coinValue *big.Int) (*types.Transaction, error) { return _Bank.contract.Transact(opts, "mint", coinValue) } // Mint is a paid mutator transaction binding the contract method 0xa0712d68. // // Solidity: function mint(coinValue uint256) returns() func (_Bank *BankSession) Mint(coinValue *big.Int) (*types.Transaction, error) { return _Bank.Contract.Mint(&_Bank.TransactOpts, coinValue) } // Mint is a paid mutator transaction binding the contract method 0xa0712d68. // // Solidity: function mint(coinValue uint256) returns() func (_Bank *BankTransactorSession) Mint(coinValue *big.Int) (*types.Transaction, error) { return _Bank.Contract.Mint(&_Bank.TransactOpts, coinValue) } // Transfer is a paid mutator transaction binding the contract method 0xa9059cbb. // // Solidity: function transfer(to address, etherValue uint256) returns() func (_Bank *BankTransactor) Transfer(opts *bind.TransactOpts, to common.Address, etherValue *big.Int) (*types.Transaction, error) { return _Bank.contract.Transact(opts, "transfer", to, etherValue) } // Transfer is a paid mutator transaction binding the contract method 0xa9059cbb. // // Solidity: function transfer(to address, etherValue uint256) returns() func (_Bank *BankSession) Transfer(to common.Address, etherValue *big.Int) (*types.Transaction, error) { return _Bank.Contract.Transfer(&_Bank.TransactOpts, to, etherValue) } // Transfer is a paid mutator transaction binding the contract method 0xa9059cbb. // // Solidity: function transfer(to address, etherValue uint256) returns() func (_Bank *BankTransactorSession) Transfer(to common.Address, etherValue *big.Int) (*types.Transaction, error) { return _Bank.Contract.Transfer(&_Bank.TransactOpts, to, etherValue) } // TransferCoin is a paid mutator transaction binding the contract method 0x8dde60fa. // // Solidity: function transferCoin(to address, coinValue uint256) returns() func (_Bank *BankTransactor) TransferCoin(opts *bind.TransactOpts, to common.Address, coinValue *big.Int) (*types.Transaction, error) { return _Bank.contract.Transact(opts, "transferCoin", to, coinValue) } // TransferCoin is a paid mutator transaction binding the contract method 0x8dde60fa. // // Solidity: function transferCoin(to address, coinValue uint256) returns() func (_Bank *BankSession) TransferCoin(to common.Address, coinValue *big.Int) (*types.Transaction, error) { return _Bank.Contract.TransferCoin(&_Bank.TransactOpts, to, coinValue) } // TransferCoin is a paid mutator transaction binding the contract method 0x8dde60fa. // // Solidity: function transferCoin(to address, coinValue uint256) returns() func (_Bank *BankTransactorSession) TransferCoin(to common.Address, coinValue *big.Int) (*types.Transaction, error) { return _Bank.Contract.TransferCoin(&_Bank.TransactOpts, to, coinValue) } // TransferOwner is a paid mutator transaction binding the contract method 0x4fb2e45d. // // Solidity: function transferOwner(newOwner address) returns() func (_Bank *BankTransactor) TransferOwner(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) { return _Bank.contract.Transact(opts, "transferOwner", newOwner) } // TransferOwner is a paid mutator transaction binding the contract method 0x4fb2e45d. // // Solidity: function transferOwner(newOwner address) returns() func (_Bank *BankSession) TransferOwner(newOwner common.Address) (*types.Transaction, error) { return _Bank.Contract.TransferOwner(&_Bank.TransactOpts, newOwner) } // TransferOwner is a paid mutator transaction binding the contract method 0x4fb2e45d. // // Solidity: function transferOwner(newOwner address) returns() func (_Bank *BankTransactorSession) TransferOwner(newOwner common.Address) (*types.Transaction, error) { return _Bank.Contract.TransferOwner(&_Bank.TransactOpts, newOwner) } // Withdraw is a paid mutator transaction binding the contract method 0x2e1a7d4d. // // Solidity: function withdraw(etherValue uint256) returns() func (_Bank *BankTransactor) Withdraw(opts *bind.TransactOpts, etherValue *big.Int) (*types.Transaction, error) { return _Bank.contract.Transact(opts, "withdraw", etherValue) } // Withdraw is a paid mutator transaction binding the contract method 0x2e1a7d4d. // // Solidity: function withdraw(etherValue uint256) returns() func (_Bank *BankSession) Withdraw(etherValue *big.Int) (*types.Transaction, error) { return _Bank.Contract.Withdraw(&_Bank.TransactOpts, etherValue) } // Withdraw is a paid mutator transaction binding the contract method 0x2e1a7d4d. // // Solidity: function withdraw(etherValue uint256) returns() func (_Bank *BankTransactorSession) Withdraw(etherValue *big.Int) (*types.Transaction, error) { return _Bank.Contract.Withdraw(&_Bank.TransactOpts, etherValue) } // BankBuyCoinEventIterator is returned from FilterBuyCoinEvent and is used to iterate over the raw logs and unpacked data for BuyCoinEvent events raised by the Bank contract. type BankBuyCoinEventIterator struct { Event *BankBuyCoinEvent // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data logs chan types.Log // Log channel receiving the found contract events sub ethereum.Subscription // Subscription for errors, completion and termination done bool // Whether the subscription completed delivering logs fail error // Occurred error to stop iteration } // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. func (it *BankBuyCoinEventIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false } // If the iterator completed, deliver directly whatever's available if it.done { select { case log := <-it.logs: it.Event = new(BankBuyCoinEvent) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false } it.Event.Raw = log return true default: return false } } // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: it.Event = new(BankBuyCoinEvent) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false } it.Event.Raw = log return true case err := <-it.sub.Err(): it.done = true it.fail = err return it.Next() } } // Error returns any retrieval or parsing error occurred during filtering. func (it *BankBuyCoinEventIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. func (it *BankBuyCoinEventIterator) Close() error { it.sub.Unsubscribe() return nil } // BankBuyCoinEvent represents a BuyCoinEvent event raised by the Bank contract. type BankBuyCoinEvent struct { From common.Address Value *big.Int Timestamp *big.Int Raw types.Log // Blockchain specific contextual infos } // FilterBuyCoinEvent is a free log retrieval operation binding the contract event 0x4c5ad1aea676c1e1613de5416105424342b84655de046903409dea58418bedff. // // Solidity: e BuyCoinEvent(from indexed address, value uint256, timestamp uint256) func (_Bank *BankFilterer) FilterBuyCoinEvent(opts *bind.FilterOpts, from []common.Address) (*BankBuyCoinEventIterator, error) { var fromRule []interface{} for _, fromItem := range from { fromRule = append(fromRule, fromItem) } logs, sub, err := _Bank.contract.FilterLogs(opts, "BuyCoinEvent", fromRule) if err != nil { return nil, err } return &BankBuyCoinEventIterator{contract: _Bank.contract, event: "BuyCoinEvent", logs: logs, sub: sub}, nil } // WatchBuyCoinEvent is a free log subscription operation binding the contract event 0x4c5ad1aea676c1e1613de5416105424342b84655de046903409dea58418bedff. // // Solidity: e BuyCoinEvent(from indexed address, value uint256, timestamp uint256) func (_Bank *BankFilterer) WatchBuyCoinEvent(opts *bind.WatchOpts, sink chan<- *BankBuyCoinEvent, from []common.Address) (event.Subscription, error) { var fromRule []interface{} for _, fromItem := range from { fromRule = append(fromRule, fromItem) } logs, sub, err := _Bank.contract.WatchLogs(opts, "BuyCoinEvent", fromRule) if err != nil { return nil, err } return event.NewSubscription(func(quit <-chan struct{}) error { defer sub.Unsubscribe() for { select { case log := <-logs: // New log arrived, parse the event and forward to the user event := new(BankBuyCoinEvent) if err := _Bank.contract.UnpackLog(event, "BuyCoinEvent", log); err != nil { return err } event.Raw = log select { case sink <- event: case err := <-sub.Err(): return err case <-quit: return nil } case err := <-sub.Err(): return err case <-quit: return nil } } }), nil } // BankDepositEventIterator is returned from FilterDepositEvent and is used to iterate over the raw logs and unpacked data for DepositEvent events raised by the Bank contract. type BankDepositEventIterator struct { Event *BankDepositEvent // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data logs chan types.Log // Log channel receiving the found contract events sub ethereum.Subscription // Subscription for errors, completion and termination done bool // Whether the subscription completed delivering logs fail error // Occurred error to stop iteration } // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. func (it *BankDepositEventIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false }
it.Event = new(BankDepositEvent) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false } it.Event.Raw = log return true default: return false } } // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: it.Event = new(BankDepositEvent) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false } it.Event.Raw = log return true case err := <-it.sub.Err(): it.done = true it.fail = err return it.Next() } } // Error returns any retrieval or parsing error occurred during filtering. func (it *BankDepositEventIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. func (it *BankDepositEventIterator) Close() error { it.sub.Unsubscribe() return nil } // BankDepositEvent represents a DepositEvent event raised by the Bank contract. type BankDepositEvent struct { From common.Address Value *big.Int Timestamp *big.Int Raw types.Log // Blockchain specific contextual infos } // FilterDepositEvent is a free log retrieval operation binding the contract event 0xad40ae5dc69974ba932d08b0a608e89109412d41d04850f5196f144875ae2660. // // Solidity: e DepositEvent(from indexed address, value uint256, timestamp uint256) func (_Bank *BankFilterer) FilterDepositEvent(opts *bind.FilterOpts, from []common.Address) (*BankDepositEventIterator, error) { var fromRule []interface{} for _, fromItem := range from { fromRule = append(fromRule, fromItem) } logs, sub, err := _Bank.contract.FilterLogs(opts, "DepositEvent", fromRule) if err != nil { return nil, err } return &BankDepositEventIterator{contract: _Bank.contract, event: "DepositEvent", logs: logs, sub: sub}, nil } // WatchDepositEvent is a free log subscription operation binding the contract event 0xad40ae5dc69974ba932d08b0a608e89109412d41d04850f5196f144875ae2660. // // Solidity: e DepositEvent(from indexed address, value uint256, timestamp uint256) func (_Bank *BankFilterer) WatchDepositEvent(opts *bind.WatchOpts, sink chan<- *BankDepositEvent, from []common.Address) (event.Subscription, error) { var fromRule []interface{} for _, fromItem := range from { fromRule = append(fromRule, fromItem) } logs, sub, err := _Bank.contract.WatchLogs(opts, "DepositEvent", fromRule) if err != nil { return nil, err } return event.NewSubscription(func(quit <-chan struct{}) error { defer sub.Unsubscribe() for { select { case log := <-logs: // New log arrived, parse the event and forward to the user event := new(BankDepositEvent) if err := _Bank.contract.UnpackLog(event, "DepositEvent", log); err != nil { return err } event.Raw = log select { case sink <- event: case err := <-sub.Err(): return err case <-quit: return nil } case err := <-sub.Err(): return err case <-quit: return nil } } }), nil } // BankMintEventIterator is returned from FilterMintEvent and is used to iterate over the raw logs and unpacked data for MintEvent events raised by the Bank contract. type BankMintEventIterator struct { Event *BankMintEvent // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data logs chan types.Log // Log channel receiving the found contract events sub ethereum.Subscription // Subscription for errors, completion and termination done bool // Whether the subscription completed delivering logs fail error // Occurred error to stop iteration } // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. func (it *BankMintEventIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false } // If the iterator completed, deliver directly whatever's available if it.done { select { case log := <-it.logs: it.Event = new(BankMintEvent) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false } it.Event.Raw = log return true default: return false } } // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: it.Event = new(BankMintEvent) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false } it.Event.Raw = log return true case err := <-it.sub.Err(): it.done = true it.fail = err return it.Next() } } // Error returns any retrieval or parsing error occurred during filtering. func (it *BankMintEventIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. func (it *BankMintEventIterator) Close() error { it.sub.Unsubscribe() return nil } // BankMintEvent represents a MintEvent event raised by the Bank contract. type BankMintEvent struct { From common.Address Value *big.Int Timestamp *big.Int Raw types.Log // Blockchain specific contextual infos } // FilterMintEvent is a free log retrieval operation binding the contract event 0x8069ef4945469d029cc32e222031bccdc99b2eaaf4ee374cd268012f7ddee907. // // Solidity: e MintEvent(from indexed address, value uint256, timestamp uint256) func (_Bank *BankFilterer) FilterMintEvent(opts *bind.FilterOpts, from []common.Address) (*BankMintEventIterator, error) { var fromRule []interface{} for _, fromItem := range from { fromRule = append(fromRule, fromItem) } logs, sub, err := _Bank.contract.FilterLogs(opts, "MintEvent", fromRule) if err != nil { return nil, err } return &BankMintEventIterator{contract: _Bank.contract, event: "MintEvent", logs: logs, sub: sub}, nil } // WatchMintEvent is a free log subscription operation binding the contract event 0x8069ef4945469d029cc32e222031bccdc99b2eaaf4ee374cd268012f7ddee907. // // Solidity: e MintEvent(from indexed address, value uint256, timestamp uint256) func (_Bank *BankFilterer) WatchMintEvent(opts *bind.WatchOpts, sink chan<- *BankMintEvent, from []common.Address) (event.Subscription, error) { var fromRule []interface{} for _, fromItem := range from { fromRule = append(fromRule, fromItem) } logs, sub, err := _Bank.contract.WatchLogs(opts, "MintEvent", fromRule) if err != nil { return nil, err } return event.NewSubscription(func(quit <-chan struct{}) error { defer sub.Unsubscribe() for { select { case log := <-logs: // New log arrived, parse the event and forward to the user event := new(BankMintEvent) if err := _Bank.contract.UnpackLog(event, "MintEvent", log); err != nil { return err } event.Raw = log select { case sink <- event: case err := <-sub.Err(): return err case <-quit: return nil } case err := <-sub.Err(): return err case <-quit: return nil } } }), nil } // BankTransferCoinEventIterator is returned from FilterTransferCoinEvent and is used to iterate over the raw logs and unpacked data for TransferCoinEvent events raised by the Bank contract. type BankTransferCoinEventIterator struct { Event *BankTransferCoinEvent // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data logs chan types.Log // Log channel receiving the found contract events sub ethereum.Subscription // Subscription for errors, completion and termination done bool // Whether the subscription completed delivering logs fail error // Occurred error to stop iteration } // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. func (it *BankTransferCoinEventIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false } // If the iterator completed, deliver directly whatever's available if it.done { select { case log := <-it.logs: it.Event = new(BankTransferCoinEvent) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false } it.Event.Raw = log return true default: return false } } // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: it.Event = new(BankTransferCoinEvent) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false } it.Event.Raw = log return true case err := <-it.sub.Err(): it.done = true it.fail = err return it.Next() } } // Error returns any retrieval or parsing error occurred during filtering. func (it *BankTransferCoinEventIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. func (it *BankTransferCoinEventIterator) Close() error { it.sub.Unsubscribe() return nil } // BankTransferCoinEvent represents a TransferCoinEvent event raised by the Bank contract. type BankTransferCoinEvent struct { From common.Address To common.Address Value *big.Int Timestamp *big.Int Raw types.Log // Blockchain specific contextual infos } // FilterTransferCoinEvent is a free log retrieval operation binding the contract event 0x941d755df54ad0234b406209d0c923107cabf6d4f1ce335b8ae5d89d6a28c2d2. // // Solidity: e TransferCoinEvent(from indexed address, to indexed address, value uint256, timestamp uint256) func (_Bank *BankFilterer) FilterTransferCoinEvent(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*BankTransferCoinEventIterator, error) { var fromRule []interface{} for _, fromItem := range from { fromRule = append(fromRule, fromItem) } var toRule []interface{} for _, toItem := range to { toRule = append(toRule, toItem) } logs, sub, err := _Bank.contract.FilterLogs(opts, "TransferCoinEvent", fromRule, toRule) if err != nil { return nil, err } return &BankTransferCoinEventIterator{contract: _Bank.contract, event: "TransferCoinEvent", logs: logs, sub: sub}, nil } // WatchTransferCoinEvent is a free log subscription operation binding the contract event 0x941d755df54ad0234b406209d0c923107cabf6d4f1ce335b8ae5d89d6a28c2d2. // // Solidity: e TransferCoinEvent(from indexed address, to indexed address, value uint256, timestamp uint256) func (_Bank *BankFilterer) WatchTransferCoinEvent(opts *bind.WatchOpts, sink chan<- *BankTransferCoinEvent, from []common.Address, to []common.Address) (event.Subscription, error) { var fromRule []interface{} for _, fromItem := range from { fromRule = append(fromRule, fromItem) } var toRule []interface{} for _, toItem := range to { toRule = append(toRule, toItem) } logs, sub, err := _Bank.contract.WatchLogs(opts, "TransferCoinEvent", fromRule, toRule) if err != nil { return nil, err } return event.NewSubscription(func(quit <-chan struct{}) error { defer sub.Unsubscribe() for { select { case log := <-logs: // New log arrived, parse the event and forward to the user event := new(BankTransferCoinEvent) if err := _Bank.contract.UnpackLog(event, "TransferCoinEvent", log); err != nil { return err } event.Raw = log select { case sink <- event: case err := <-sub.Err(): return err case <-quit: return nil } case err := <-sub.Err(): return err case <-quit: return nil } } }), nil } // BankTransferEventIterator is returned from FilterTransferEvent and is used to iterate over the raw logs and unpacked data for TransferEvent events raised by the Bank contract. type BankTransferEventIterator struct { Event *BankTransferEvent // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data logs chan types.Log // Log channel receiving the found contract events sub ethereum.Subscription // Subscription for errors, completion and termination done bool // Whether the subscription completed delivering logs fail error // Occurred error to stop iteration } // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. func (it *BankTransferEventIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false } // If the iterator completed, deliver directly whatever's available if it.done { select { case log := <-it.logs: it.Event = new(BankTransferEvent) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false } it.Event.Raw = log return true default: return false } } // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: it.Event = new(BankTransferEvent) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false } it.Event.Raw = log return true case err := <-it.sub.Err(): it.done = true it.fail = err return it.Next() } } // Error returns any retrieval or parsing error occurred during filtering. func (it *BankTransferEventIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. func (it *BankTransferEventIterator) Close() error { it.sub.Unsubscribe() return nil } // BankTransferEvent represents a TransferEvent event raised by the Bank contract. type BankTransferEvent struct { From common.Address To common.Address Value *big.Int Timestamp *big.Int Raw types.Log // Blockchain specific contextual infos } // FilterTransferEvent is a free log retrieval operation binding the contract event 0xbabc8cd3bd6701ee99131f374fd2ab4ea66f48dc4e4182ed78fecb0502e44dd6. // // Solidity: e TransferEvent(from indexed address, to indexed address, value uint256, timestamp uint256) func (_Bank *BankFilterer) FilterTransferEvent(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*BankTransferEventIterator, error) { var fromRule []interface{} for _, fromItem := range from { fromRule = append(fromRule, fromItem) } var toRule []interface{} for _, toItem := range to { toRule = append(toRule, toItem) } logs, sub, err := _Bank.contract.FilterLogs(opts, "TransferEvent", fromRule, toRule) if err != nil { return nil, err } return &BankTransferEventIterator{contract: _Bank.contract, event: "TransferEvent", logs: logs, sub: sub}, nil } // WatchTransferEvent is a free log subscription operation binding the contract event 0xbabc8cd3bd6701ee99131f374fd2ab4ea66f48dc4e4182ed78fecb0502e44dd6. // // Solidity: e TransferEvent(from indexed address, to indexed address, value uint256, timestamp uint256) func (_Bank *BankFilterer) WatchTransferEvent(opts *bind.WatchOpts, sink chan<- *BankTransferEvent, from []common.Address, to []common.Address) (event.Subscription, error) { var fromRule []interface{} for _, fromItem := range from { fromRule = append(fromRule, fromItem) } var toRule []interface{} for _, toItem := range to { toRule = append(toRule, toItem) } logs, sub, err := _Bank.contract.WatchLogs(opts, "TransferEvent", fromRule, toRule) if err != nil { return nil, err } return event.NewSubscription(func(quit <-chan struct{}) error { defer sub.Unsubscribe() for { select { case log := <-logs: // New log arrived, parse the event and forward to the user event := new(BankTransferEvent) if err := _Bank.contract.UnpackLog(event, "TransferEvent", log); err != nil { return err } event.Raw = log select { case sink <- event: case err := <-sub.Err(): return err case <-quit: return nil } case err := <-sub.Err(): return err case <-quit: return nil } } }), nil } // BankTransferOwnerEventIterator is returned from FilterTransferOwnerEvent and is used to iterate over the raw logs and unpacked data for TransferOwnerEvent events raised by the Bank contract. type BankTransferOwnerEventIterator struct { Event *BankTransferOwnerEvent // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data logs chan types.Log // Log channel receiving the found contract events sub ethereum.Subscription // Subscription for errors, completion and termination done bool // Whether the subscription completed delivering logs fail error // Occurred error to stop iteration } // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. func (it *BankTransferOwnerEventIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false } // If the iterator completed, deliver directly whatever's available if it.done { select { case log := <-it.logs: it.Event = new(BankTransferOwnerEvent) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false } it.Event.Raw = log return true default: return false } } // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: it.Event = new(BankTransferOwnerEvent) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false } it.Event.Raw = log return true case err := <-it.sub.Err(): it.done = true it.fail = err return it.Next() } } // Error returns any retrieval or parsing error occurred during filtering. func (it *BankTransferOwnerEventIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. func (it *BankTransferOwnerEventIterator) Close() error { it.sub.Unsubscribe() return nil } // BankTransferOwnerEvent represents a TransferOwnerEvent event raised by the Bank contract. type BankTransferOwnerEvent struct { OldOwner common.Address NewOwner common.Address Timestamp *big.Int Raw types.Log // Blockchain specific contextual infos } // FilterTransferOwnerEvent is a free log retrieval operation binding the contract event 0x587a4fcff87b7be11c779eb502f8b2584f996387d8b8cda0e5113fef424f7316. // // Solidity: e TransferOwnerEvent(oldOwner indexed address, newOwner indexed address, timestamp uint256) func (_Bank *BankFilterer) FilterTransferOwnerEvent(opts *bind.FilterOpts, oldOwner []common.Address, newOwner []common.Address) (*BankTransferOwnerEventIterator, error) { var oldOwnerRule []interface{} for _, oldOwnerItem := range oldOwner { oldOwnerRule = append(oldOwnerRule, oldOwnerItem) } var newOwnerRule []interface{} for _, newOwnerItem := range newOwner { newOwnerRule = append(newOwnerRule, newOwnerItem) } logs, sub, err := _Bank.contract.FilterLogs(opts, "TransferOwnerEvent", oldOwnerRule, newOwnerRule) if err != nil { return nil, err } return &BankTransferOwnerEventIterator{contract: _Bank.contract, event: "TransferOwnerEvent", logs: logs, sub: sub}, nil } // WatchTransferOwnerEvent is a free log subscription operation binding the contract event 0x587a4fcff87b7be11c779eb502f8b2584f996387d8b8cda0e5113fef424f7316. // // Solidity: e TransferOwnerEvent(oldOwner indexed address, newOwner indexed address, timestamp uint256) func (_Bank *BankFilterer) WatchTransferOwnerEvent(opts *bind.WatchOpts, sink chan<- *BankTransferOwnerEvent, oldOwner []common.Address, newOwner []common.Address) (event.Subscription, error) { var oldOwnerRule []interface{} for _, oldOwnerItem := range oldOwner { oldOwnerRule = append(oldOwnerRule, oldOwnerItem) } var newOwnerRule []interface{} for _, newOwnerItem := range newOwner { newOwnerRule = append(newOwnerRule, newOwnerItem) } logs, sub, err := _Bank.contract.WatchLogs(opts, "TransferOwnerEvent", oldOwnerRule, newOwnerRule) if err != nil { return nil, err } return event.NewSubscription(func(quit <-chan struct{}) error { defer sub.Unsubscribe() for { select { case log := <-logs: // New log arrived, parse the event and forward to the user event := new(BankTransferOwnerEvent) if err := _Bank.contract.UnpackLog(event, "TransferOwnerEvent", log); err != nil { return err } event.Raw = log select { case sink <- event: case err := <-sub.Err(): return err case <-quit: return nil } case err := <-sub.Err(): return err case <-quit: return nil } } }), nil } // BankWithdrawEventIterator is returned from FilterWithdrawEvent and is used to iterate over the raw logs and unpacked data for WithdrawEvent events raised by the Bank contract. type BankWithdrawEventIterator struct { Event *BankWithdrawEvent // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data logs chan types.Log // Log channel receiving the found contract events sub ethereum.Subscription // Subscription for errors, completion and termination done bool // Whether the subscription completed delivering logs fail error // Occurred error to stop iteration } // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. func (it *BankWithdrawEventIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false } // If the iterator completed, deliver directly whatever's available if it.done { select { case log := <-it.logs: it.Event = new(BankWithdrawEvent) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false } it.Event.Raw = log return true default: return false } } // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: it.Event = new(BankWithdrawEvent) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false } it.Event.Raw = log return true case err := <-it.sub.Err(): it.done = true it.fail = err return it.Next() } } // Error returns any retrieval or parsing error occurred during filtering. func (it *BankWithdrawEventIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. func (it *BankWithdrawEventIterator) Close() error { it.sub.Unsubscribe() return nil } // BankWithdrawEvent represents a WithdrawEvent event raised by the Bank contract. type BankWithdrawEvent struct { From common.Address Value *big.Int Timestamp *big.Int Raw types.Log // Blockchain specific contextual infos } // FilterWithdrawEvent is a free log retrieval operation binding the contract event 0x5bb95829671915ece371da722f91d5371159095dcabf2f75cd6c53facb7e1bab. // // Solidity: e WithdrawEvent(from indexed address, value uint256, timestamp uint256) func (_Bank *BankFilterer) FilterWithdrawEvent(opts *bind.FilterOpts, from []common.Address) (*BankWithdrawEventIterator, error) { var fromRule []interface{} for _, fromItem := range from { fromRule = append(fromRule, fromItem) } logs, sub, err := _Bank.contract.FilterLogs(opts, "WithdrawEvent", fromRule) if err != nil { return nil, err } return &BankWithdrawEventIterator{contract: _Bank.contract, event: "WithdrawEvent", logs: logs, sub: sub}, nil } // WatchWithdrawEvent is a free log subscription operation binding the contract event 0x5bb95829671915ece371da722f91d5371159095dcabf2f75cd6c53facb7e1bab. // // Solidity: e WithdrawEvent(from indexed address, value uint256, timestamp uint256) func (_Bank *BankFilterer) WatchWithdrawEvent(opts *bind.WatchOpts, sink chan<- *BankWithdrawEvent, from []common.Address) (event.Subscription, error) { var fromRule []interface{} for _, fromItem := range from { fromRule = append(fromRule, fromItem) } logs, sub, err := _Bank.contract.WatchLogs(opts, "WithdrawEvent", fromRule) if err != nil { return nil, err } return event.NewSubscription(func(quit <-chan struct{}) error { defer sub.Unsubscribe() for { select { case log := <-logs: // New log arrived, parse the event and forward to the user event := new(BankWithdrawEvent) if err := _Bank.contract.UnpackLog(event, "WithdrawEvent", log); err != nil { return err } event.Raw = log select { case sink <- event: case err := <-sub.Err(): return err case <-quit: return nil } case err := <-sub.Err(): return err case <-quit: return nil } } }), nil }
// If the iterator completed, deliver directly whatever's available if it.done { select { case log := <-it.logs:
common.py
from contextlib import contextmanager from itertools import count from jeepney import HeaderFields, Message, MessageFlag, MessageType class MessageFilters: def __init__(self): self.filters = {} self.filter_ids = count() def matches(self, message): for handle in self.filters.values(): if handle.rule.matches(message): yield handle class FilterHandle: def __init__(self, filters: MessageFilters, rule, queue): self._filters = filters self._filter_id = next(filters.filter_ids) self.rule = rule self.queue = queue self._filters.filters[self._filter_id] = self def close(self): del self._filters.filters[self._filter_id] def __enter__(self): return self.queue def __exit__(self, exc_type, exc_val, exc_tb): self.close() return False class ReplyMatcher: def __init__(self): self._futures = {} @contextmanager def catch(self, serial, future): """Context manager to capture a reply for the given serial number""" self._futures[serial] = future try: yield future finally: del self._futures[serial] def dispatch(self, msg): """Dispatch an incoming message which may be a reply Returns True if a task was waiting for it, otherwise False. """ rep_serial = msg.header.fields.get(HeaderFields.reply_serial, -1) if rep_serial in self._futures: self._futures[rep_serial].set_result(msg) return True else: return False def drop_all(self, exc: Exception = None): """Throw an error in any task still waiting for a reply""" if exc is None: exc = RouterClosed("D-Bus router closed before reply arrived") futures, self._futures = self._futures, {}
for fut in futures.values(): fut.set_exception(exc) class RouterClosed(Exception): """Raised in tasks waiting for a reply when the router is closed This will also be raised if the receiver task crashes, so tasks are not stuck waiting for a reply that can never come. The router object will not be usable after this is raised. """ pass def check_replyable(msg: Message): """Raise an error if we wouldn't expect a reply for msg""" if msg.header.message_type != MessageType.method_call: raise TypeError("Only method call messages have replies " f"(not {msg.header.message_type})") if MessageFlag.no_reply_expected & msg.header.flags: raise ValueError("This message has the no_reply_expected flag set")
areaChart.options.ts
/* * Copyright 2020 ZUP IT SERVICOS EM TECNOLOGIA E INOVACAO SA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { getTheme } from 'core/utils/themes'; const theme = getTheme(); export default { chart: { id: 'monitoringChart', background: 'transparent', stacked: false, zoom: { enabled: false } }, colors: theme.metrics.chart.Comparison, theme: { mode: 'dark' }, grid: { borderColor: theme.circleGroupMetrics.chart.gridColor, show: true, yaxis: { lines: { show: true } }, padding: { left: 10 } }, legend: { show: false }, xaxis: { type: 'datetime', labels: { datetimeUTC: false
}, markers: { size: 0.1, strokeColors: 'transparent' }, stroke: { curve: 'smooth' }, tooltip: { x: { format: 'dd MMM • HH:mm' } } };
}
issue-41110.rs
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-wasm32-bare compiled with panic=abort by default // check that we don't emit multiple drop flags when they are not needed. fn
() { let x = S.other(S.id()); } // no_mangle to make sure this gets instantiated even in an executable. #[no_mangle] pub fn test() { let u = S; let mut v = S; drop(v); v = u; } struct S; impl Drop for S { fn drop(&mut self) { } } impl S { fn id(self) -> Self { self } fn other(self, s: Self) {} } // END RUST SOURCE // START rustc.main.ElaborateDrops.after.mir // let mut _0: (); // scope 1 { // let _1: (); // } // ... // let mut _2: S; // let mut _3: S; // let mut _4: S; // let mut _5: bool; // bb0: { // END rustc.main.ElaborateDrops.after.mir // START rustc.test.ElaborateDrops.after.mir // let mut _0: (); // ... // let _1: S; // ... // let mut _2: S; // ... // let mut _3: (); // let mut _4: S; // let mut _5: S; // let mut _6: bool; // bb0: { // END rustc.test.ElaborateDrops.after.mir
main
double_dqn_model.py
""" Double DQN """ import argparse from collections import OrderedDict from typing import Tuple import pytorch_lightning as pl import torch from pl_bolts.losses.rl import double_dqn_loss from pl_bolts.models.rl.dqn_model import DQN class DoubleDQN(DQN): """ Double Deep Q-network (DDQN) PyTorch Lightning implementation of `Double DQN <https://arxiv.org/pdf/1509.06461.pdf>`_ Paper authors: Hado van Hasselt, Arthur Guez, David Silver Model implemented by: - `Donal Byrne <https://github.com/djbyrne>` Example: >>> from pl_bolts.models.rl.double_dqn_model import DoubleDQN ... >>> model = DoubleDQN("PongNoFrameskip-v4") Train:: trainer = Trainer() trainer.fit(model) Args: env: gym environment tag gpus: number of gpus being used eps_start: starting value of epsilon for the epsilon-greedy exploration eps_end: final value of epsilon for the epsilon-greedy exploration eps_last_frame: the final frame in for the decrease of epsilon. At this frame espilon = eps_end sync_rate: the number of iterations between syncing up the target network with the train network gamma: discount factor lr: learning rate batch_size: size of minibatch pulled from the DataLoader replay_size: total capacity of the replay buffer warm_start_size: how many random steps through the environment to be carried out at the start of training to fill the buffer with a starting point sample_len: the number of samples to pull from the dataset iterator and feed to the DataLoader Note: This example is based on https://github.com/PacktPublishing/Deep-Reinforcement-Learning-Hands-On-Second-Edition/blob/master/Chapter08/03_dqn_double.py Note: Currently only supports CPU and single GPU training with `distributed_backend=dp` """ def
(self, batch: Tuple[torch.Tensor, torch.Tensor], _) -> OrderedDict: """ Carries out a single step through the environment to update the replay buffer. Then calculates loss based on the minibatch recieved Args: batch: current mini batch of replay data _: batch number, not used Returns: Training loss and log metrics """ # calculates training loss loss = double_dqn_loss(batch, self.net, self.target_net) if self.trainer.use_dp or self.trainer.use_ddp2: loss = loss.unsqueeze(0) # Soft update of target network if self.global_step % self.sync_rate == 0: self.target_net.load_state_dict(self.net.state_dict()) log = { "total_reward": self.total_rewards[-1], "avg_reward": self.avg_rewards, "train_loss": loss, # "episodes": self.total_episode_steps, } status = { "steps": self.global_step, "avg_reward": self.avg_rewards, "total_reward": self.total_rewards[-1], "episodes": self.done_episodes, # "episode_steps": self.episode_steps, "epsilon": self.agent.epsilon, } return OrderedDict( { "loss": loss, "avg_reward": self.avg_rewards, "log": log, "progress_bar": status, } ) def cli_main(): parser = argparse.ArgumentParser(add_help=False) # trainer args parser = pl.Trainer.add_argparse_args(parser) # model args parser = DoubleDQN.add_model_specific_args(parser) args = parser.parse_args() model = DoubleDQN(**args.__dict__) trainer = pl.Trainer.from_argparse_args(args) trainer.fit(model) if __name__ == '__main__': cli_main()
training_step
total_routes.rs
use super::*; use rosomaxa::prelude::*; /// An objective function which controls total amount of routes. pub struct TotalRoutes { is_minimization: bool, } impl Default for TotalRoutes { fn default() -> Self { Self { is_minimization: true } } } impl TotalRoutes { /// Creates an instance of `TotalRoutes` with fleet minimization as a target. pub fn new_minimized() -> Self { Self { is_minimization: true } } /// Creates an instance of `TotalRoutes` with fleet maximization as a target. pub fn new_maximized() -> Self { Self { is_minimization: false } } } impl Objective for TotalRoutes { type Solution = InsertionContext; fn total_order(&self, a: &Self::Solution, b: &Self::Solution) -> Ordering { let fitness_a = self.fitness(a); let fitness_b = self.fitness(b); let (fitness_a, fitness_b) = if self.is_minimization { (fitness_a, fitness_b) } else { (-1. * fitness_a, -1. * fitness_b) }; compare_floats(fitness_a, fitness_b) } fn fitness(&self, solution: &Self::Solution) -> f64
}
{ solution.solution.routes.len() as f64 }
generic_amd64_go116.go
// +build go1.15,!go1.17 /* * Copyright 2021 ByteDance Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package decoder import ( `encoding/json` `fmt` `reflect` `strconv` `github.com/bytedance/sonic/internal/jit` `github.com/bytedance/sonic/internal/native` `github.com/bytedance/sonic/internal/native/types` `github.com/twitchyliquid64/golang-asm/obj` `github.com/twitchyliquid64/golang-asm/obj/x86` ) /** Crucial Registers: * * ST(BX) : ro, decoder stack * DF(R10) : ro, decoder flags * EP(R11) : wo, error pointer * IP(R12) : ro, input pointer * IL(R13) : ro, input length * IC(R14) : rw, input cursor * VP(R15) : ro, value pointer (to an interface{}) */ const ( _VD_args = 8 // 8 bytes for passing arguments to this functions _VD_fargs = 64 // 64 bytes for passing arguments to other Go functions _VD_saves = 40 // 40 bytes for saving the registers before CALL instructions _VD_locals = 56 // 56 bytes for local variables ) const ( _VD_offs = _VD_fargs + _VD_saves + _VD_locals _VD_size = _VD_offs + 8 // 8 bytes for the parent frame pointer ) var ( _VAR_ss = _VAR_ss_Vt _VAR_df = jit.Ptr(_SP, _VD_fargs + _VD_saves) ) var ( _VAR_ss_Vt = jit.Ptr(_SP, _VD_fargs + _VD_saves + 8) _VAR_ss_Dv = jit.Ptr(_SP, _VD_fargs + _VD_saves + 16) _VAR_ss_Iv = jit.Ptr(_SP, _VD_fargs + _VD_saves + 24) _VAR_ss_Ep = jit.Ptr(_SP, _VD_fargs + _VD_saves + 32) _VAR_ss_Db = jit.Ptr(_SP, _VD_fargs + _VD_saves + 40) _VAR_ss_Dc = jit.Ptr(_SP, _VD_fargs + _VD_saves + 48) ) type _ValueDecoder struct { jit.BaseAssembler } func (self *_ValueDecoder) build() uintptr { self.Init(self.compile) return *(*uintptr)(self.LoadWithFaker("decode_value", _VD_size, _VD_args, _Decoder_Generic_Shadow)) } /** Function Calling Helpers **/ func (self *_ValueDecoder) save(r ...obj.Addr) { for i, v := range r { if i > _VD_saves / 8 - 1 { panic("too many registers to save") } else { self.Emit("MOVQ", v, jit.Ptr(_SP, _VD_fargs + int64(i) * 8)) } } } func (self *_ValueDecoder) load(r ...obj.Addr) { for i, v := range r { if i > _VD_saves / 8 - 1 { panic("too many registers to load") } else { self.Emit("MOVQ", jit.Ptr(_SP, _VD_fargs + int64(i) * 8), v) } } } func (self *_ValueDecoder) call(fn obj.Addr) { self.Emit("MOVQ", fn, _AX) // MOVQ ${fn}, AX self.Rjmp("CALL", _AX) // CALL AX } func (self *_ValueDecoder) call_go(fn obj.Addr) { self.save(_REG_go...) // SAVE $REG_go self.call(fn) // CALL ${fn} self.load(_REG_go...) // LOAD $REG_go } /** Decoder Assembler **/ const ( _S_val = iota + 1 _S_arr _S_arr_0 _S_obj _S_obj_x _S_obj_delim ) const ( _S_omask = (1 << _S_obj) | (1 << _S_obj_x) _S_vmask = (1 << _S_val) | (1 << _S_arr_0) ) const ( _A_init_len = 1 _A_init_cap = 16 ) const ( _ST_Sp = 0 _ST_Vt = _PtrBytes _ST_Vp = _PtrBytes * (types.MAX_RECURSE + 1) ) var ( _V_true = jit.Imm(int64(pbool(true))) _V_false = jit.Imm(int64(pbool(false))) _F_value = jit.Imm(int64(native.S_value)) ) var ( _V_max = jit.Imm(int64(types.V_MAX)) _E_eof = jit.Imm(int64(types.ERR_EOF)) _E_invalid = jit.Imm(int64(types.ERR_INVALID_CHAR)) _E_recurse = jit.Imm(int64(types.ERR_RECURSE_EXCEED_MAX)) ) var ( _F_convTslice = jit.Func(convTslice) _F_convTstring = jit.Func(convTstring) _F_invalid_vtype = jit.Func(invalid_vtype) ) var ( _T_map = jit.Type(reflect.TypeOf((map[string]interface{})(nil))) _T_bool = jit.Type(reflect.TypeOf(false)) _T_int64 = jit.Type(reflect.TypeOf(int64(0))) _T_eface = jit.Type(reflect.TypeOf((*interface{})(nil)).Elem()) _T_slice = jit.Type(reflect.TypeOf(([]interface{})(nil))) _T_string = jit.Type(reflect.TypeOf("")) _T_number = jit.Type(reflect.TypeOf(json.Number(""))) _T_float64 = jit.Type(reflect.TypeOf(float64(0))) ) var _R_tab = map[int]string { '[': "_decode_V_ARRAY", '{': "_decode_V_OBJECT", ':': "_decode_V_KEY_SEP", ',': "_decode_V_ELEM_SEP", ']': "_decode_V_ARRAY_END", '}': "_decode_V_OBJECT_END", } func (self *_ValueDecoder) compile() { self.Emit("SUBQ", jit.Imm(_VD_size), _SP) // SUBQ $_VD_size, SP self.Emit("MOVQ", _BP, jit.Ptr(_SP, _VD_offs)) // MOVQ BP, _VD_offs(SP) self.Emit("LEAQ", jit.Ptr(_SP, _VD_offs), _BP) // LEAQ _VD_offs(SP), BP /* initialize the state machine */ self.Emit("XORL", _CX, _CX) // XORL CX, CX self.Emit("MOVQ", _DF, _VAR_df) // MOVQ DF, df /* initialize digital buffer first */ self.Emit("MOVQ", jit.Imm(_MaxDigitNums), _VAR_ss_Dc) // MOVQ $_MaxDigitNums, ss.Dcap self.Emit("LEAQ", jit.Ptr(_ST, _DbufOffset), _AX) // LEAQ _DbufOffset(ST), AX self.Emit("MOVQ", _AX, _VAR_ss_Db) // MOVQ AX, ss.Dbuf /* add ST offset */ self.Emit("ADDQ", jit.Imm(_FsmOffset), _ST) // ADDQ _FsmOffset, _ST self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp self.WriteRecNotAX(0, _VP, jit.Ptr(_ST, _ST_Vp), false) // MOVQ VP, ST.Vp[0] self.Emit("MOVQ", jit.Imm(_S_val), jit.Ptr(_ST, _ST_Vt)) // MOVQ _S_val, ST.Vt[0] self.Sjmp("JMP" , "_next") // JMP _next /* set the value from previous round */ self.Link("_set_value") // _set_value: self.Emit("MOVL" , jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX self.Emit("MOVQ" , jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX self.Sjmp("JNC" , "_vtype_error") // JNC _vtype_error self.Emit("XORL" , _SI, _SI) // XORL SI, SI self.Emit("SUBQ" , jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp self.Emit("XCHGQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // XCHGQ ST.Vp[CX], SI self.Emit("MOVQ" , _R8, jit.Ptr(_SI, 0)) // MOVQ R8, (SI) self.WriteRecNotAX(1, _R9, jit.Ptr(_SI, 8), false) // MOVQ R9, 8(SI) /* check for value stack */ self.Link("_next") // _next: self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _AX) // MOVQ ST.Sp, AX self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX self.Sjmp("JS" , "_return") // JS _return /* fast path: test up to 4 characters manually */ self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX self.Emit("MOVQ" , jit.Imm(_BM_space), _DX) // MOVQ _BM_space, DX self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' ' self.Sjmp("JA" , "_decode_fast") // JA _decode_fast self.Emit("BTQ" , _AX, _DX) // BTQ _AX, _DX self.Sjmp("JNC" , "_decode_fast") // JNC _decode_fast self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC /* at least 1 to 3 spaces */ for i := 0; i < 3; i++ { self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' ' self.Sjmp("JA" , "_decode_fast") // JA _decode_fast self.Emit("BTQ" , _AX, _DX) // BTQ _AX, _DX self.Sjmp("JNC" , "_decode_fast") // JNC _decode_fast self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC } /* at least 4 spaces */ self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX /* fast path: use lookup table to select decoder */ self.Link("_decode_fast") // _decode_fast: self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI self.Sref("_decode_tab", 4) // .... &_decode_tab self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, 0), _AX) // MOVLQSX (DI)(AX*4), AX self.Emit("TESTQ" , _AX, _AX) // TESTQ AX, AX self.Sjmp("JZ" , "_decode_native") // JZ _decode_native self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX self.Rjmp("JMP" , _AX) // JMP AX /* decode with native decoder */ self.Link("_decode_native") // _decode_native: self.Emit("MOVQ", _IP, _DI) // MOVQ IP, DI self.Emit("MOVQ", _IL, _SI) // MOVQ IL, SI self.Emit("MOVQ", _IC, _DX) // MOVQ IC, DX self.Emit("LEAQ", _VAR_ss, _CX) // LEAQ ss, CX self.Emit("MOVL", jit.Imm(1), _R8) // MOVL $1, R8 self.call(_F_value) // CALL value self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC /* check for errors */ self.Emit("MOVQ" , _VAR_ss_Vt, _AX) // MOVQ ss.Vt, AX self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX self.Sjmp("JS" , "_parsing_error") // JS _parsing_error self.Sjmp("JZ" , "_invalid_vtype") // JZ _invalid_vtype self.Emit("CMPQ" , _AX, _V_max) // CMPQ AX, _V_max self.Sjmp("JA" , "_invalid_vtype") // JA _invalid_vtype /* jump table selector */ self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI self.Sref("_switch_table", 4) // .... &_switch_table self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, -4), _AX) // MOVLQSX -4(DI)(AX*4), AX self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX self.Rjmp("JMP" , _AX) // JMP AX /** V_EOF **/ self.Link("_decode_V_EOF") // _decode_V_EOF: self.Emit("MOVL", _E_eof, _EP) // MOVL _E_eof, EP self.Sjmp("JMP" , "_error") // JMP _error /** V_NULL **/ self.Link("_decode_V_NULL") // _decode_V_NULL: self.Emit("XORL", _R8, _R8) // XORL R8, R8 self.Emit("XORL", _R9, _R9) // XORL R9, R9 self.Emit("LEAQ", jit.Ptr(_IC, -4), _DI) // LEAQ -4(IC), DI self.Sjmp("JMP" , "_set_value") // JMP _set_value /** V_TRUE **/ self.Link("_decode_V_TRUE") // _decode_V_TRUE: self.Emit("MOVQ", _T_bool, _R8) // MOVQ _T_bool, R8 // TODO: maybe modified by users? self.Emit("MOVQ", _V_true, _R9) // MOVQ _V_true, R9 self.Emit("LEAQ", jit.Ptr(_IC, -4), _DI) // LEAQ -4(IC), DI self.Sjmp("JMP" , "_set_value") // JMP _set_value /** V_FALSE **/ self.Link("_decode_V_FALSE") // _decode_V_FALSE: self.Emit("MOVQ", _T_bool, _R8) // MOVQ _T_bool, R8 self.Emit("MOVQ", _V_false, _R9) // MOVQ _V_false, R9 self.Emit("LEAQ", jit.Ptr(_IC, -5), _DI) // LEAQ -5(IC), DI self.Sjmp("JMP" , "_set_value") // JMP _set_value /** V_ARRAY **/ self.Link("_decode_V_ARRAY") // _decode_V_ARRAY self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX self.Sjmp("JNC" , "_invalid_char") // JNC _invalid_char /* create a new array */ self.Emit("MOVQ", _T_eface, _AX) // MOVQ _T_eface, AX self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) self.Emit("MOVQ", jit.Imm(_A_init_len), jit.Ptr(_SP, 8)) // MOVQ _A_init_len, 8(SP) self.Emit("MOVQ", jit.Imm(_A_init_cap), jit.Ptr(_SP, 16)) // MOVQ _A_init_cap, 16(SP) self.call_go(_F_makeslice) // CALL_GO runtime.makeslice self.Emit("MOVQ", jit.Ptr(_SP, 24), _DX) // MOVQ 24(SP), DX /* pack into an interface */ self.Emit("MOVQ", _DX, jit.Ptr(_SP, 0)) // MOVQ DX, (SP) self.Emit("MOVQ", jit.Imm(_A_init_len), jit.Ptr(_SP, 8)) // MOVQ _A_init_len, 8(SP) self.Emit("MOVQ", jit.Imm(_A_init_cap), jit.Ptr(_SP, 16)) // MOVQ _A_init_cap, 16(SP) self.call_go(_F_convTslice) // CALL_GO runtime.convTslice self.Emit("MOVQ", jit.Ptr(_SP, 24), _R8) // MOVQ 24(SP), R8 /* replace current state with an array */ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI self.Emit("MOVQ", jit.Imm(_S_arr), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_arr, ST.Vt[CX] self.Emit("MOVQ", _T_slice, _AX) // MOVQ _T_slice, AX self.Emit("MOVQ", _AX, jit.Ptr(_SI, 0)) // MOVQ AX, (SI) self.WriteRecNotAX(2, _R8, jit.Ptr(_SI, 8), false) // MOVQ R8, 8(SI) /* add a new slot for the first element */ self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} self.Sjmp("JAE" , "_stack_overflow") // JA _stack_overflow self.Emit("MOVQ", jit.Ptr(_R8, 0), _AX) // MOVQ (R8), AX self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp self.WritePtrAX(3, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ AX, ST.Vp[CX] self.Emit("MOVQ", jit.Imm(_S_arr_0), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_arr_0, ST.Vt[CX] self.Sjmp("JMP" , "_next") // JMP _next /** V_OBJECT **/ self.Link("_decode_V_OBJECT") // _decode_V_OBJECT: self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX self.Sjmp("JNC" , "_invalid_char") // JNC _invalid_char self.call_go(_F_makemap_small) // CALL_GO runtime.makemap_small self.Emit("MOVQ", jit.Ptr(_SP, 0), _AX) // MOVQ (SP), AX self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX self.Emit("MOVQ", jit.Imm(_S_obj), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_obj, ST.Vt[CX] self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI self.Emit("MOVQ", _T_map, _DX) // MOVQ _T_map, DX self.Emit("MOVQ", _DX, jit.Ptr(_SI, 0)) // MOVQ DX, (SI) self.WritePtrAX(4, jit.Ptr(_SI, 8), false) // MOVQ AX, 8(SI) self.Sjmp("JMP" , "_next") // JMP _next /** V_STRING **/ self.Link("_decode_V_STRING") // _decode_V_STRING: self.Emit("XORL", _DX, _DX) // XORL DX, DX self.Emit("MOVQ", _VAR_ss_Iv, _CX) // MOVQ ss.Iv, CX self.Emit("MOVQ", _IC, _AX) // MOVQ IC, AX self.Emit("SUBQ", _CX, _AX) // SUBQ CX, AX /* check for escapes */ self.Emit("CMPQ", _VAR_ss_Ep, jit.Imm(-1)) // CMPQ ss.Ep, $-1 self.Sjmp("JNE" , "_unquote") // JNE _unquote self.Emit("SUBQ", jit.Imm(1), _AX) // SUBQ $1, AX self.Emit("LEAQ", jit.Sib(_IP, _CX, 1, 0), _R8) // LEAQ (IP)(CX), R8 /* strings with no escape sequences */ self.Link("_noescape") // _noescape: self.Emit("MOVL", jit.Imm(_S_omask), _DI) // MOVL _S_omask, DI self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _SI) // MOVQ ST.Vt[CX], SI self.Emit("BTQ" , _SI, _DI) // BTQ SI, DI self.Sjmp("JC" , "_object_key") // JC _object_key /* check for pre-packed strings, avoid 1 allocation */ self.Emit("TESTQ", _DX, _DX) // TESTQ DX, DX self.Sjmp("JNZ" , "_packed_str") // JNZ _packed_str self.Emit("MOVQ" , _R8, jit.Ptr(_SP, 0)) // MOVQ R8, (SP) self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) self.call_go(_F_convTstring) // CALL_GO runtime.convTstring self.Emit("MOVQ" , jit.Ptr(_SP, 16), _R9) // MOVQ 16(SP), R9 /* packed string already in R9 */ self.Link("_packed_str") // _packed_str: self.Emit("MOVQ", _T_string, _R8) // MOVQ _T_string, R8 self.Emit("MOVQ", _VAR_ss_Iv, _DI) // MOVQ ss.Iv, DI self.Emit("SUBQ", jit.Imm(1), _DI) // SUBQ $1, DI self.Sjmp("JMP" , "_set_value") // JMP _set_value /* the string is an object key, get the map */ self.Link("_object_key") self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI /* add a new delimiter */ self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} self.Sjmp("JAE" , "_stack_overflow") // JA _stack_overflow self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp self.Emit("MOVQ", jit.Imm(_S_obj_delim), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_obj_delim, ST.Vt[CX] /* add a new slot int the map */ self.Emit("MOVQ", _T_map, _DX) // MOVQ _T_map, DX self.Emit("MOVQ", _DX, jit.Ptr(_SP, 0)) // MOVQ DX, (SP) self.Emit("MOVQ", _SI, jit.Ptr(_SP, 8)) // MOVQ SI, 8(SP) self.Emit("MOVQ", _R8, jit.Ptr(_SP, 16)) // MOVQ R9, 16(SP) self.Emit("MOVQ", _AX, jit.Ptr(_SP, 24)) // MOVQ AX, 24(SP) self.call_go(_F_mapassign_faststr) // CALL_GO runtime.mapassign_faststr self.Emit("MOVQ", jit.Ptr(_SP, 32), _AX) // MOVQ 32(SP), AX /* add to the pointer stack */ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX self.WritePtrAX(6, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ AX, ST.Vp[CX] self.Sjmp("JMP" , "_next") // JMP _next /* allocate memory to store the string header and unquoted result */ self.Link("_unquote") // _unquote: self.Emit("ADDQ", jit.Imm(15), _AX) // ADDQ $15, AX self.Emit("MOVQ", _T_byte, _CX) // MOVQ _T_byte, CX self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) self.Emit("MOVB", jit.Imm(0), jit.Ptr(_SP, 16)) // MOVB $0, 16(SP) self.call_go(_F_mallocgc) // CALL_GO runtime.mallocgc self.Emit("MOVQ", jit.Ptr(_SP, 24), _R9) // MOVQ 24(SP), R9 /* prepare the unquoting parameters */ self.Emit("MOVQ" , _VAR_ss_Iv, _CX) // MOVQ ss.Iv, CX self.Emit("LEAQ" , jit.Sib(_IP, _CX, 1, 0), _DI) // LEAQ (IP)(CX), DI self.Emit("NEGQ" , _CX) // NEGQ CX self.Emit("LEAQ" , jit.Sib(_IC, _CX, 1, -1), _SI) // LEAQ -1(IC)(CX), SI self.Emit("LEAQ" , jit.Ptr(_R9, 16), _DX) // LEAQ 16(R8), DX self.Emit("LEAQ" , _VAR_ss_Ep, _CX) // LEAQ ss.Ep, CX self.Emit("XORL" , _R8, _R8) // XORL R8, R8 self.Emit("BTQ" , jit.Imm(_F_disable_urc), _VAR_df) // BTQ ${_F_disable_urc}, fv self.Emit("SETCC", _R8) // SETCC R8 self.Emit("SHLQ" , jit.Imm(types.B_UNICODE_REPLACE), _R8) // SHLQ ${types.B_UNICODE_REPLACE}, R8 /* unquote the string, with R9 been preserved */ self.save(_R9) // SAVE R9 self.call(_F_unquote) // CALL unquote self.load(_R9) // LOAD R9 /* check for errors */ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX self.Sjmp("JS" , "_unquote_error") // JS _unquote_error self.Emit("MOVL" , jit.Imm(1), _DX) // MOVL $1, DX self.Emit("LEAQ" , jit.Ptr(_R9, 16), _R8) // ADDQ $16, R8 self.Emit("MOVQ" , _R8, jit.Ptr(_R9, 0)) // MOVQ R8, (R9) self.Emit("MOVQ" , _AX, jit.Ptr(_R9, 8)) // MOVQ AX, 8(R9) self.Sjmp("JMP" , "_noescape") // JMP _noescape /** V_DOUBLE **/ self.Link("_decode_V_DOUBLE") // _decode_V_DOUBLE: self.Emit("BTQ" , jit.Imm(_F_use_number), _VAR_df) // BTQ _F_use_number, df self.Sjmp("JC" , "_use_number") // JC _use_number self.Emit("MOVSD", _VAR_ss_Dv, _X0) // MOVSD ss.Dv, X0 self.Sjmp("JMP" , "_use_float64") // JMP _use_float64 /** V_INTEGER **/ self.Link("_decode_V_INTEGER") // _decode_V_INTEGER: self.Emit("BTQ" , jit.Imm(_F_use_number), _VAR_df) // BTQ _F_use_number, df self.Sjmp("JC" , "_use_number") // JC _use_number self.Emit("BTQ" , jit.Imm(_F_use_int64), _VAR_df) // BTQ _F_use_int64, df self.Sjmp("JC" , "_use_int64") // JC _use_int64 self.Emit("MOVQ" , _VAR_ss_Iv, _AX) // MOVQ ss.Iv, AX self.Emit("CVTSQ2SD", _AX, _X0) // CVTSQ2SD AX, X0 /* represent numbers as `float64` */ self.Link("_use_float64") // _use_float64: self.Emit("MOVSD", _X0, jit.Ptr(_SP, 0)) // MOVSD X0, (SP) self.call_go(_F_convT64) // CALL_GO runtime.convT64 self.Emit("MOVQ" , _T_float64, _R8) // MOVQ _T_float64, R8 self.Emit("MOVQ" , jit.Ptr(_SP, 8), _R9) // MOVQ 8(SP), R9 self.Emit("MOVQ" , _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI self.Sjmp("JMP" , "_set_value") // JMP _set_value /* represent numbers as `json.Number` */ self.Link("_use_number") // _use_number self.Emit("MOVQ", _VAR_ss_Ep, _AX) // MOVQ ss.Ep, AX self.Emit("LEAQ", jit.Sib(_IP, _AX, 1, 0), _SI) // LEAQ (IP)(AX), SI self.Emit("MOVQ", _IC, _CX) // MOVQ IC, CX self.Emit("SUBQ", _AX, _CX) // SUBQ AX, CX self.Emit("MOVQ", _SI, jit.Ptr(_SP, 0)) // MOVQ SI, (SP) self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) self.call_go(_F_convTstring) // CALL_GO runtime.convTstring self.Emit("MOVQ", _T_number, _R8) // MOVQ _T_number, R8 self.Emit("MOVQ", jit.Ptr(_SP, 16), _R9) // MOVQ 16(SP), R9 self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI self.Sjmp("JMP" , "_set_value") // JMP _set_value /* represent numbers as `int64` */ self.Link("_use_int64") // _use_int64: self.Emit("MOVQ", _VAR_ss_Iv, _AX) // MOVQ ss.Iv, AX self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) self.call_go(_F_convT64) // CALL_GO runtime.convT64 self.Emit("MOVQ", _T_int64, _R8) // MOVQ _T_int64, R8 self.Emit("MOVQ", jit.Ptr(_SP, 8), _R9) // MOVQ 8(SP), R9 self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI self.Sjmp("JMP" , "_set_value") // JMP _set_value /** V_KEY_SEP **/ self.Link("_decode_V_KEY_SEP") // _decode_V_KEY_SEP: // self.Byte(0xcc) self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX self.Emit("CMPQ", _AX, jit.Imm(_S_obj_delim)) // CMPQ AX, _S_obj_delim self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char self.Emit("MOVQ", jit.Imm(_S_val), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_val, ST.Vt[CX] self.Emit("MOVQ", jit.Imm(_S_obj), jit.Sib(_ST, _CX, 8, _ST_Vt - 8)) // MOVQ _S_obj, ST.Vt[CX - 1] self.Sjmp("JMP" , "_next") // JMP _next /** V_ELEM_SEP **/ self.Link("_decode_V_ELEM_SEP") // _decode_V_ELEM_SEP: self.Emit("MOVQ" , jit.Imm(_S_obj), _AX) // MOVQ _S_obj, AX self.Emit("MOVQ" , jit.Imm(_S_obj_x), _DX) // MOVQ _S_obj_x, DX self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX self.Emit("CMPXCHGQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vt)) // CMPXCHGQ DX, ST.Vt[CX] self.Sjmp("JZ" , "_next") // JZ _next self.Emit("CMPQ" , _AX, jit.Imm(_S_arr)) // CMPQ _AX, _S_arr self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char /* arrays */ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI self.Emit("MOVQ", jit.Ptr(_SI, 8), _DX) // MOVQ 8(SI), DX self.Emit("CMPQ", _DX, jit.Ptr(_SI, 16)) // CMPQ DX, 16(SI) self.Sjmp("JAE" , "_array_more") // JAE _array_more /* add a slot for the new element */ self.Link("_array_append") // _array_append: self.Emit("ADDQ", jit.Imm(1), jit.Ptr(_SI, 8)) // ADDQ $1, 8(SI) self.Emit("MOVQ", jit.Ptr(_SI, 0), _SI) // MOVQ (SI), SI self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} self.Sjmp("JAE" , "_stack_overflow") self.Emit("SHLQ", jit.Imm(1), _DX) // SHLQ $1, DX self.Emit("LEAQ", jit.Sib(_SI, _DX, 8, 0), _SI) // LEAQ (SI)(DX*8), SI self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp self.WriteRecNotAX(7 , _SI, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ SI, ST.Vp[CX] self.Emit("MOVQ", jit.Imm(_S_val), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_val, ST.Vt[CX} self.Sjmp("JMP" , "_next") // JMP _next /** V_ARRAY_END **/ self.Link("_decode_V_ARRAY_END") // _decode_V_ARRAY_END: self.Emit("XORL", _DX, _DX) // XORL DX, DX self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX self.Emit("CMPQ", _AX, jit.Imm(_S_arr_0)) // CMPQ AX, _S_arr_0 self.Sjmp("JE" , "_first_item") // JE _first_item self.Emit("CMPQ", _AX, jit.Imm(_S_arr)) // CMPQ AX, _S_arr self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ DX, ST.Vp[CX] self.Sjmp("JMP" , "_next") // JMP _next /* first element of an array */ self.Link("_first_item") // _first_item: self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX self.Emit("SUBQ", jit.Imm(2), jit.Ptr(_ST, _ST_Sp)) // SUBQ $2, ST.Sp self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp - 8), _SI) // MOVQ ST.Vp[CX - 1], SI self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp - 8)) // MOVQ DX, ST.Vp[CX - 1] self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ DX, ST.Vp[CX] self.Emit("MOVQ", _DX, jit.Ptr(_SI, 8)) // MOVQ DX, 8(SI) self.Sjmp("JMP" , "_next") // JMP _next /** V_OBJECT_END **/ self.Link("_decode_V_OBJECT_END") // _decode_V_OBJECT_END: self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX self.Emit("CMPQ", _AX, jit.Imm(_S_obj)) // CMPQ AX, _S_obj self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char self.Emit("XORL", _AX, _AX) // XORL AX, AX self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp self.Emit("MOVQ", _AX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ AX, ST.Vp[CX] self.Sjmp("JMP" , "_next") // JMP _next /* return from decoder */ self.Link("_return") // _return: self.Emit("XORL", _EP, _EP) // XORL EP, EP self.Emit("MOVQ", _EP, jit.Ptr(_ST, _ST_Vp)) // MOVQ EP, ST.Vp[0] self.Link("_epilogue") // _epilogue: self.Emit("SUBQ", jit.Imm(_FsmOffset), _ST) // SUBQ _FsmOffset, _ST self.Emit("MOVQ", jit.Ptr(_SP, _VD_offs), _BP) // MOVQ _VD_offs(SP), BP self.Emit("ADDQ", jit.Imm(_VD_size), _SP) // ADDQ $_VD_size, SP self.Emit("RET") // RET /* array expand */ self.Link("_array_more") // _array_more: self.Emit("MOVQ" , _T_eface, _AX) // MOVQ _T_eface, AX self.Emit("MOVOU", jit.Ptr(_SI, 0), _X0) // MOVOU (SI), X0 self.Emit("MOVQ" , jit.Ptr(_SI, 16), _DX) // MOVQ 16(SI), DX self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP) self.Emit("MOVQ" , _DX, jit.Ptr(_SP, 24)) // MOVQ DX, 24(SP) self.Emit("SHLQ" , jit.Imm(1), _DX) // SHLQ $1, DX self.Emit("MOVQ" , _DX, jit.Ptr(_SP, 32)) // MOVQ DX, 32(SP) self.call_go(_F_growslice) // CALL_GO runtime.growslice self.Emit("MOVQ" , jit.Ptr(_SP, 40), _DI) // MOVOU 40(SP), DI self.Emit("MOVQ" , jit.Ptr(_SP, 48), _DX) // MOVOU 48(SP), DX self.Emit("MOVQ" , jit.Ptr(_SP, 56), _AX) // MOVQ 56(SP), AX /* update the slice */ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI self.Emit("MOVQ", _DX, jit.Ptr(_SI, 8)) // MOVQ DX, 8(SI) self.Emit("MOVQ", _AX, jit.Ptr(_SI, 16)) // MOVQ AX, 16(AX) self.WriteRecNotAX(8 , _DI, jit.Ptr(_SI, 0), false) // MOVQ R10, (SI) self.Sjmp("JMP" , "_array_append") // JMP _array_append /* error handlers */ self.Link("_stack_overflow") self.Emit("MOVL" , _E_recurse, _EP) // MOVQ _E_recurse, EP self.Sjmp("JMP" , "_error") // JMP _error self.Link("_vtype_error") // _vtype_error: self.Emit("MOVQ" , _DI, _IC) // MOVQ DI, IC self.Emit("MOVL" , _E_invalid, _EP) // MOVL _E_invalid, EP self.Sjmp("JMP" , "_error") // JMP _error self.Link("_invalid_char") // _invalid_char: self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC self.Emit("MOVL" , _E_invalid, _EP) // MOVL _E_invalid, EP self.Sjmp("JMP" , "_error") // JMP _error self.Link("_unquote_error") // _unquote_error: self.Emit("MOVQ" , _VAR_ss_Iv, _IC) // MOVQ ss.Iv, IC self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC self.Link("_parsing_error") // _parsing_error: self.Emit("NEGQ" , _AX) // NEGQ AX self.Emit("MOVQ" , _AX, _EP) // MOVQ AX, EP self.Link("_error") // _error: self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP) self.Sjmp("JMP" , "_epilogue") // JMP _epilogue /* invalid value type, never returns */ self.Link("_invalid_vtype") self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) self.call(_F_invalid_vtype) // CALL invalid_type self.Emit("UD2") // UD2 /* switch jump table */ self.Link("_switch_table") // _switch_table: self.Sref("_decode_V_EOF", 0) // SREF &_decode_V_EOF, $0 self.Sref("_decode_V_NULL", -4) // SREF &_decode_V_NULL, $-4 self.Sref("_decode_V_TRUE", -8) // SREF &_decode_V_TRUE, $-8 self.Sref("_decode_V_FALSE", -12) // SREF &_decode_V_FALSE, $-12 self.Sref("_decode_V_ARRAY", -16) // SREF &_decode_V_ARRAY, $-16 self.Sref("_decode_V_OBJECT", -20) // SREF &_decode_V_OBJECT, $-20 self.Sref("_decode_V_STRING", -24) // SREF &_decode_V_STRING, $-24 self.Sref("_decode_V_DOUBLE", -28) // SREF &_decode_V_DOUBLE, $-28 self.Sref("_decode_V_INTEGER", -32) // SREF &_decode_V_INTEGER, $-32 self.Sref("_decode_V_KEY_SEP", -36) // SREF &_decode_V_KEY_SEP, $-36 self.Sref("_decode_V_ELEM_SEP", -40) // SREF &_decode_V_ELEM_SEP, $-40 self.Sref("_decode_V_ARRAY_END", -44) // SREF &_decode_V_ARRAY_END, $-44 self.Sref("_decode_V_OBJECT_END", -48) // SREF &_decode_V_OBJECT_END, $-48 /* fast character lookup table */ self.Link("_decode_tab") // _decode_tab: self.Sref("_decode_V_EOF", 0) // SREF &_decode_V_EOF, $0 /* generate rest of the tabs */ for i := 1; i < 256; i++ { if to, ok := _R_tab[i]; ok { self.Sref(to, -int64(i) * 4) } else { self.Byte(0x00, 0x00, 0x00, 0x00) } } } func (self *_ValueDecoder) WritePtrAX(i int, rec obj.Addr, saveDI bool) { self.Emit("MOVQ", _V_writeBarrier, _R10) self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0)) self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") if saveDI { self.save(_DI) } self.Emit("LEAQ", rec, _DI) self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX self.Rjmp("CALL", _R10) if saveDI { self.load(_DI) } self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") self.Emit("MOVQ", _AX, rec) self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") } func (self *_ValueDecoder) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr, saveDI bool) { if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX { panic("rec contains AX!") } self.Emit("MOVQ", _V_writeBarrier, _R10) self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0)) self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") self.Emit("MOVQ", ptr, _AX) if saveDI { self.save(_DI) } self.Emit("LEAQ", rec, _DI) self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX self.Rjmp("CALL", _R10) if saveDI { self.load(_DI) } self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") self.Emit("MOVQ", ptr, rec) self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") } /** Generic Decoder **/ var ( _subr_decode_value = new(_ValueDecoder).build() ) //go:nosplit func
(vt types.ValueType) { throw(fmt.Sprintf("invalid value type: %d", vt)) }
invalid_vtype
v1.rs
use caveat::{Caveat, CaveatBuilder}; use error::MacaroonError; use serialization::macaroon_builder::MacaroonBuilder; use std::str; use ByteString; use Macaroon; use Result; // Version 1 fields const LOCATION: &str = "location"; const IDENTIFIER: &str = "identifier"; const SIGNATURE: &str = "signature"; const CID: &str = "cid"; const VID: &str = "vid"; const CL: &str = "cl"; const HEADER_SIZE: usize = 4; fn serialize_as_packet<'r>(tag: &'r str, value: &'r [u8]) -> Vec<u8> { let mut packet: Vec<u8> = Vec::new(); let size = HEADER_SIZE + 2 + tag.len() + value.len(); packet.extend(packet_header(size)); packet.extend_from_slice(tag.as_bytes()); packet.extend_from_slice(b" "); packet.extend_from_slice(value); packet.extend_from_slice(b"\n"); packet } fn to_hex_char(value: u8) -> u8 { let hex = format!("{:1x}", value); hex.as_bytes()[0] } fn packet_header(size: usize) -> Vec<u8> { vec![ to_hex_char(((size >> 12) & 15) as u8), to_hex_char(((size >> 8) & 15) as u8), to_hex_char(((size >> 4) & 15) as u8), to_hex_char((size & 15) as u8), ] } pub fn serialize(macaroon: &Macaroon) -> Result<Vec<u8>> { let mut serialized: Vec<u8> = Vec::new(); if let Some(ref location) = macaroon.location() { serialized.extend(serialize_as_packet(LOCATION, location.as_bytes())); }; serialized.extend(serialize_as_packet(IDENTIFIER, &macaroon.identifier().0)); for c in macaroon.caveats() { match c { Caveat::FirstParty(fp) => { serialized.extend(serialize_as_packet(CID, &fp.predicate().0)); } Caveat::ThirdParty(tp) => { serialized.extend(serialize_as_packet(CID, &tp.id().0)); serialized.extend(serialize_as_packet(VID, &tp.verifier_id().0)); serialized.extend(serialize_as_packet(CL, tp.location().as_bytes())) } } } serialized.extend(serialize_as_packet(SIGNATURE, &macaroon.signature())); Ok(base64::encode_config(&serialized, base64::URL_SAFE) .as_bytes() .to_vec()) } fn base64_decode(s: &str) -> Result<Vec<u8>> { Ok(base64::decode_config(s, base64::URL_SAFE)?) } struct Packet { key: String, value: Vec<u8>, } fn deserialize_as_packets(data: &[u8], mut packets: Vec<Packet>) -> Result<Vec<Packet>> { if data.is_empty() { return Ok(packets); } let hex: &str = str::from_utf8(&data[..4])?; let size: usize = usize::from_str_radix(hex, 16)?; let packet_data = &data[4..size]; let index = split_index(packet_data)?; let (key_slice, value_slice) = packet_data.split_at(index); packets.push(Packet { key: String::from_utf8(key_slice.to_vec())?, // skip beginning space and terminating \n value: value_slice[1..value_slice.len() - 1].to_vec(), }); deserialize_as_packets(&data[size..], packets) } fn split_index(packet: &[u8]) -> Result<usize> { match packet.iter().position(|&r| r == b' ') { Some(index) => Ok(index), None => Err(MacaroonError::DeserializationError(String::from( "Key/value error", ))), } } pub fn deserialize(base64: &[u8]) -> Result<Macaroon> { let data = base64_decode(&String::from_utf8(base64.to_vec())?)?; let mut builder: MacaroonBuilder = MacaroonBuilder::new(); let mut caveat_builder: CaveatBuilder = CaveatBuilder::new(); for packet in deserialize_as_packets(data.as_slice(), Vec::new())? { match packet.key.as_str() { LOCATION => { builder.set_location(&String::from_utf8(packet.value)?); } IDENTIFIER => { builder.set_identifier(ByteString(packet.value)); } SIGNATURE => { if caveat_builder.has_id() { builder.add_caveat(caveat_builder.build()?); caveat_builder = CaveatBuilder::new(); } if packet.value.len() != 32 { error!( "deserialize_v1: Deserialization error - signature length is {}", packet.value.len() ); return Err(MacaroonError::DeserializationError(String::from( "Illegal signature \ length in \ packet", ))); } builder.set_signature(&packet.value); } CID => { if caveat_builder.has_id() { builder.add_caveat(caveat_builder.build()?); caveat_builder = CaveatBuilder::new(); caveat_builder.add_id(ByteString(packet.value)); } else { caveat_builder.add_id(ByteString(packet.value)); } } VID => { caveat_builder.add_verifier_id(ByteString(packet.value)); } CL => caveat_builder.add_location(String::from_utf8(packet.value)?), _ => { return Err(MacaroonError::DeserializationError(String::from( "Unknown key", ))) } }; } builder.build() } #[cfg(test)] mod tests { use ByteString; use Caveat; use Macaroon; use MacaroonKey; #[test] fn test_deserialize() { let mut serialized = "MDAyMWxvY2F0aW9uIGh0dHA6Ly9leGFtcGxlLm9yZy8KMDAxNWlkZW50aWZpZXIga2V5aWQKMDAyZnNpZ25hdHVyZSB83ueSURxbxvUoSFgF3-myTnheKOKpkwH51xHGCeOO9wo"; let mut signature: MacaroonKey = [ 124, 222, 231, 146, 81, 28, 91, 198, 245, 40, 72, 88, 5, 223, 233, 178, 78, 120, 94, 40, 226, 169, 147, 1, 249, 215, 17, 198, 9, 227, 142, 247, ] .into(); let macaroon = super::deserialize(&serialized.as_bytes().to_vec()).unwrap(); assert!(macaroon.location().is_some()); assert_eq!("http://example.org/", &macaroon.location().unwrap()); assert_eq!(ByteString::from("keyid"), macaroon.identifier()); assert_eq!(signature, macaroon.signature()); serialized = "MDAyMWxvY2F0aW9uIGh0dHA6Ly9leGFtcGxlLm9yZy8KMDAxNWlkZW50aWZpZXIga2V5aWQKMDAxZGNpZCBhY2NvdW50ID0gMzczNTkyODU1OQowMDJmc2lnbmF0dXJlIPVIB_bcbt-Ivw9zBrOCJWKjYlM9v3M5umF2XaS9JZ2HCg"; signature = [ 245, 72, 7, 246, 220, 110, 223, 136, 191, 15, 115, 6, 179, 130, 37, 98, 163, 98, 83, 61, 191, 115, 57, 186, 97, 118, 93, 164, 189, 37, 157, 135, ] .into(); let macaroon = super::deserialize(&serialized.as_bytes().to_vec()).unwrap(); assert!(macaroon.location().is_some()); assert_eq!("http://example.org/", &macaroon.location().unwrap()); assert_eq!(ByteString::from("keyid"), macaroon.identifier()); assert_eq!(1, macaroon.caveats().len()); let predicate = match &macaroon.caveats()[0] { Caveat::FirstParty(fp) => fp.predicate(), _ => ByteString::default(), }; assert_eq!(ByteString::from("account = 3735928559"), predicate); assert_eq!(signature, macaroon.signature()); } #[test] fn
() { let serialized = "MDAyMWxvY2F0aW9uIGh0dHA6Ly9leGFtcGxlLm9yZy8KMDAxNWlkZW50aWZpZXIga2V5aWQKMDAxZGNpZCBhY2NvdW50ID0gMzczNTkyODU1OQowMDE1Y2lkIHVzZXIgPSBhbGljZQowMDJmc2lnbmF0dXJlIEvpZ80eoMaya69qSpTumwWxWIbaC6hejEKpPI0OEl78Cg"; let signature: MacaroonKey = [ 75, 233, 103, 205, 30, 160, 198, 178, 107, 175, 106, 74, 148, 238, 155, 5, 177, 88, 134, 218, 11, 168, 94, 140, 66, 169, 60, 141, 14, 18, 94, 252, ] .into(); let macaroon = super::deserialize(&serialized.as_bytes().to_vec()).unwrap(); assert!(macaroon.location().is_some()); assert_eq!("http://example.org/", &macaroon.location().unwrap()); assert_eq!(ByteString::from("keyid"), macaroon.identifier()); assert_eq!(signature, macaroon.signature()); assert_eq!(2, macaroon.caveats().len()); let predicate = match &macaroon.caveats()[0] { Caveat::FirstParty(fp) => fp.predicate(), _ => ByteString::default(), }; assert_eq!(ByteString::from("account = 3735928559"), predicate); let predicate = match &macaroon.caveats()[1] { Caveat::FirstParty(fp) => fp.predicate(), _ => ByteString::default(), }; assert_eq!(ByteString::from("user = alice"), predicate); } #[test] fn test_serialize_deserialize() { let mut macaroon: Macaroon = Macaroon::create( Some("http://example.org/".into()), &"my key".into(), "keyid".into(), ) .unwrap(); macaroon.add_first_party_caveat("account = 3735928559".into()); macaroon.add_first_party_caveat("user = alice".into()); macaroon.add_third_party_caveat( "https://auth.mybank.com", &"caveat key".into(), "caveat".into(), ); let serialized = macaroon.serialize(super::super::Format::V1).unwrap(); let deserialized = Macaroon::deserialize(&serialized).unwrap(); assert_eq!(macaroon, deserialized); } }
test_deserialize_two_caveats
test_lock.py
import pytest from etcdb import OperationalError from etcdb.lock import Lock, ReadLock, WriteLock def test_readers(etcdb_connection): cur = etcdb_connection.cursor() cur.execute('CREATE TABLE bar(id int not null PRIMARY KEY)') lock = ReadLock(etcdb_connection.client, 'foo', 'bar') lock.acquire(ttl=0) readers = lock.readers() lock.release() assert len(readers) == 1 readers = lock.readers() assert len(readers) == 0 lock.acquire(ttl=0) l2 = ReadLock(etcdb_connection.client, 'foo', 'bar') l2.acquire(ttl=0) readers = lock.readers() assert len(readers) == 2 def
(etcdb_connection): cur = etcdb_connection.cursor() cur.execute('CREATE TABLE bar(id int not null PRIMARY KEY)') lock = WriteLock(etcdb_connection.client, 'foo', 'bar') lock.acquire(ttl=0) writers = lock.writers() assert len(writers) == 1 lock.release() writers = lock.writers() assert len(writers) == 0 lock.acquire(ttl=0) l2 = WriteLock(etcdb_connection.client, 'foo', 'bar') with pytest.raises(OperationalError): l2.acquire() def test_attributes(etcdb_connection): cur = etcdb_connection.cursor() cur.execute('CREATE TABLE bar(id int not null PRIMARY KEY)') lock = WriteLock(etcdb_connection.client, 'foo', 'bar') lock.acquire(author='author foo', reason='reason foo') assert lock.author == 'author foo' assert lock.reason == 'reason foo' assert type(lock.created_at) == int assert lock.created_at > 0
test_writers
agent_timeout_test.go
package agent import ( "context" "io/ioutil" "os" "strings" "testing"
"github.com/evergreen-ci/evergreen" "github.com/evergreen-ci/evergreen/rest/client" "github.com/mongodb/jasper" "github.com/stretchr/testify/suite" ) type TimeoutSuite struct { suite.Suite a *Agent mockCommunicator *client.Mock tmpFile *os.File tmpFileName string tmpDirName string } func TestTimeoutSuite(t *testing.T) { suite.Run(t, new(TimeoutSuite)) } func (s *TimeoutSuite) SetupTest() { s.a = &Agent{ opts: Options{ HostID: "host", HostSecret: "secret", StatusPort: 2286, LogPrefix: evergreen.LocalLoggingOverride, }, comm: client.NewMock("url"), } s.mockCommunicator = s.a.comm.(*client.Mock) var err error s.tmpDirName, err = ioutil.TempDir("", "agent-timeout-suite-") s.Require().NoError(err) s.tmpFile, err = ioutil.TempFile(s.tmpDirName, "timeout") s.Require().NoError(err) s.tmpFileName = s.tmpFile.Name() s.mockCommunicator.TimeoutFilename = s.tmpFileName s.Require().NoError(s.tmpFile.Close()) s.a.jasper, err = jasper.NewLocalManager(false) s.Require().NoError(err) } func (s *TimeoutSuite) TearDownTest() { s.Require().NoError(os.Remove(s.tmpFileName)) s.Require().NoError(os.RemoveAll(s.tmpDirName)) } // TestExecTimeoutProject tests exec_timeout_secs set on a project. // exec_timeout_secs has an effect only on a project or a task. func (s *TimeoutSuite) TestExecTimeoutProject() { taskID := "exec_timeout_project" taskSecret := "mock_task_secret" tc := &taskContext{ task: client.TaskData{ ID: taskID, Secret: taskSecret, }, runGroupSetup: true, } // Windows may not have finished deleting task directories when // os.RemoveAll returns. Setting TaskExecution in this suite causes the // tests in this suite to create differently-named task directories. s.mockCommunicator.TaskExecution = 0 ctx, cancel := context.WithCancel(context.Background()) defer cancel() s.NoError(s.a.resetLogging(ctx, tc)) defer s.a.removeTaskDirectory(tc) _, err := s.a.runTask(ctx, cancel, tc) s.NoError(err) messages := s.mockCommunicator.GetMockMessages() s.Len(messages, 1) foundSuccessLogMessage := false foundShellLogMessage := false foundTimeoutMessage := false for _, msg := range messages[taskID] { if msg.Message == "Task completed - FAILURE." { foundSuccessLogMessage = true } if strings.HasPrefix(msg.Message, "Hit exec timeout (1s)") { foundTimeoutMessage = true } if strings.HasPrefix(msg.Message, "Running task-timeout commands") { foundShellLogMessage = true } if strings.HasPrefix(msg.Message, "Finished 'shell.exec' in \"timeout\"") { foundShellLogMessage = true } } s.True(foundSuccessLogMessage) s.True(foundShellLogMessage) s.True(foundTimeoutMessage) detail := s.mockCommunicator.GetEndTaskDetail() s.Equal(evergreen.TaskFailed, detail.Status) s.Equal("test", detail.Type) s.Contains(detail.Description, "shell.exec") s.True(detail.TimedOut) data, err := ioutil.ReadFile(s.tmpFileName) s.Require().NoError(err) s.Equal("timeout test message", strings.Trim(string(data), "\r\n")) taskData := s.mockCommunicator.EndTaskResult.TaskData s.Equal(taskID, taskData.ID) s.Equal(taskSecret, taskData.Secret) } // TestExecTimeoutTask tests exec_timeout_secs set on a task. exec_timeout_secs // has an effect only on a project or a task. func (s *TimeoutSuite) TestExecTimeoutTask() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() taskID := "exec_timeout_task" taskSecret := "mock_task_secret" tc := &taskContext{ task: client.TaskData{ ID: taskID, Secret: taskSecret, }, runGroupSetup: true, } // Windows may not have finished deleting task directories when // os.RemoveAll returns. Setting TaskExecution in this suite causes the // tests in this suite to create differently-named task directories. s.mockCommunicator.TaskExecution = 1 s.NoError(s.a.resetLogging(ctx, tc)) defer s.a.removeTaskDirectory(tc) _, err := s.a.runTask(ctx, cancel, tc) s.NoError(err) messages := s.mockCommunicator.GetMockMessages() s.Len(messages, 1) foundSuccessLogMessage := false foundShellLogMessage := false foundTimeoutMessage := false for _, msg := range messages[taskID] { if msg.Message == "Task completed - FAILURE." { foundSuccessLogMessage = true } if strings.HasPrefix(msg.Message, "Hit exec timeout (1s)") { foundTimeoutMessage = true } if strings.HasPrefix(msg.Message, "Running task-timeout commands") { foundShellLogMessage = true } if strings.HasPrefix(msg.Message, "Finished 'shell.exec' in \"timeout\"") { foundShellLogMessage = true } } s.True(foundSuccessLogMessage) s.True(foundShellLogMessage) s.True(foundTimeoutMessage) detail := s.mockCommunicator.GetEndTaskDetail() s.Equal(evergreen.TaskFailed, detail.Status) s.Equal("test", detail.Type) s.Contains(detail.Description, "shell.exec") s.True(detail.TimedOut) data, err := ioutil.ReadFile(s.tmpFileName) s.Require().NoError(err) s.Equal("timeout test message", strings.Trim(string(data), "\r\n")) taskData := s.mockCommunicator.EndTaskResult.TaskData s.Equal(taskID, taskData.ID) s.Equal(taskSecret, taskData.Secret) } // TestIdleTimeoutFunc tests timeout_secs set in a function. func (s *TimeoutSuite) TestIdleTimeoutFunc() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() taskID := "idle_timeout_func" taskSecret := "mock_task_secret" tc := &taskContext{ task: client.TaskData{ ID: taskID, Secret: taskSecret, }, runGroupSetup: true, } // Windows may not have finished deleting task directories when // os.RemoveAll returns. Setting TaskExecution in this suite causes the // tests in this suite to create differently-named task directories. s.mockCommunicator.TaskExecution = 2 s.NoError(s.a.resetLogging(ctx, tc)) defer s.a.removeTaskDirectory(tc) _, err := s.a.runTask(ctx, cancel, tc) s.NoError(err) messages := s.mockCommunicator.GetMockMessages() s.Len(messages, 1) foundSuccessLogMessage := false foundShellLogMessage := false foundTimeoutMessage := false for _, msg := range messages[taskID] { if msg.Message == "Task completed - FAILURE." { foundSuccessLogMessage = true } if strings.HasPrefix(msg.Message, "Hit idle timeout (no message on stdout for more than 1s)") { foundTimeoutMessage = true } if strings.HasPrefix(msg.Message, "Running task-timeout commands") { foundShellLogMessage = true } if strings.HasPrefix(msg.Message, "Finished 'shell.exec' in \"timeout\"") { foundShellLogMessage = true } } s.True(foundSuccessLogMessage) s.True(foundShellLogMessage) s.True(foundTimeoutMessage) detail := s.mockCommunicator.GetEndTaskDetail() s.Equal(evergreen.TaskFailed, detail.Status) s.Equal("test", detail.Type) s.Contains(detail.Description, "shell.exec") s.True(detail.TimedOut) data, err := ioutil.ReadFile(s.tmpFileName) s.Require().NoError(err) s.Equal("timeout test message", strings.Trim(string(data), "\r\n")) taskData := s.mockCommunicator.EndTaskResult.TaskData s.Equal(taskID, taskData.ID) s.Equal(taskSecret, taskData.Secret) } // TestIdleTimeout tests timeout_secs set on a function in a command. func (s *TimeoutSuite) TestIdleTimeoutCommand() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() taskID := "idle_timeout_task" taskSecret := "mock_task_secret" tc := &taskContext{ task: client.TaskData{ ID: taskID, Secret: taskSecret, }, runGroupSetup: true, } // Windows may not have finished deleting task directories when // os.RemoveAll returns. Setting TaskExecution in this suite causes the // tests in this suite to create differently-named task directories. s.mockCommunicator.TaskExecution = 3 s.NoError(s.a.resetLogging(ctx, tc)) defer s.a.removeTaskDirectory(tc) _, err := s.a.runTask(ctx, cancel, tc) s.NoError(err) messages := s.mockCommunicator.GetMockMessages() s.Len(messages, 1) foundSuccessLogMessage := false foundShellLogMessage := false foundTimeoutMessage := false for _, msg := range messages[taskID] { if msg.Message == "Task completed - FAILURE." { foundSuccessLogMessage = true } if strings.HasPrefix(msg.Message, "Hit idle timeout (no message on stdout for more than 1s)") { foundTimeoutMessage = true } if strings.HasPrefix(msg.Message, "Running task-timeout commands") { foundShellLogMessage = true } if strings.HasPrefix(msg.Message, "Finished 'shell.exec' in \"timeout\"") { foundShellLogMessage = true } } s.True(foundSuccessLogMessage) s.True(foundShellLogMessage) s.True(foundTimeoutMessage) detail := s.mockCommunicator.GetEndTaskDetail() s.Equal(evergreen.TaskFailed, detail.Status) s.Equal("test", detail.Type) s.Contains(detail.Description, "shell.exec") s.True(detail.TimedOut) data, err := ioutil.ReadFile(s.tmpFileName) s.Require().NoError(err) s.Equal("timeout test message", strings.Trim(string(data), "\r\n")) taskData := s.mockCommunicator.EndTaskResult.TaskData s.Equal(taskID, taskData.ID) s.Equal(taskSecret, taskData.Secret) } // TestDynamicIdleTimeout tests that the `update.timeout` command sets timeout_secs. func (s *TimeoutSuite) TestDynamicIdleTimeout() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() taskID := "dynamic_idle_timeout_task" taskSecret := "mock_task_secret" tc := &taskContext{ task: client.TaskData{ ID: taskID, Secret: taskSecret, }, runGroupSetup: true, } // Windows may not have finished deleting task directories when // os.RemoveAll returns. Setting TaskExecution in this suite causes the // tests in this suite to create differently-named task directories. s.mockCommunicator.TaskExecution = 3 s.NoError(s.a.resetLogging(ctx, tc)) defer s.a.removeTaskDirectory(tc) _, err := s.a.runTask(ctx, cancel, tc) s.NoError(err) messages := s.mockCommunicator.GetMockMessages() s.Len(messages, 1) foundSuccessLogMessage := false foundShellLogMessage := false foundTimeoutMessage := false for _, msg := range messages[taskID] { if msg.Message == "Task completed - FAILURE." { foundSuccessLogMessage = true } if strings.HasPrefix(msg.Message, "Hit idle timeout (no message on stdout for more than 2s)") { foundTimeoutMessage = true } if strings.HasPrefix(msg.Message, "Running task-timeout commands") { foundShellLogMessage = true } if strings.HasPrefix(msg.Message, "Finished 'shell.exec' in \"timeout\"") { foundShellLogMessage = true } } s.True(foundSuccessLogMessage) s.True(foundShellLogMessage) s.True(foundTimeoutMessage) detail := s.mockCommunicator.GetEndTaskDetail() s.Equal(evergreen.TaskFailed, detail.Status) s.Equal("test", detail.Type) s.Contains(detail.Description, "shell.exec") s.True(detail.TimedOut) data, err := ioutil.ReadFile(s.tmpFileName) s.Require().NoError(err) s.Equal("timeout test message", strings.Trim(string(data), "\r\n")) taskData := s.mockCommunicator.EndTaskResult.TaskData s.Equal(taskID, taskData.ID) s.Equal(taskSecret, taskData.Secret) } // TestDynamicExecTimeout tests that the `update.timeout` command sets exec_timeout_secs. func (s *TimeoutSuite) TestDynamicExecTimeoutTask() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() taskID := "dynamic_exec_timeout_task" taskSecret := "mock_task_secret" tc := &taskContext{ task: client.TaskData{ ID: taskID, Secret: taskSecret, }, runGroupSetup: true, } // Windows may not have finished deleting task directories when // os.RemoveAll returns. Setting TaskExecution in this suite causes the // tests in this suite to create differently-named task directories. s.mockCommunicator.TaskExecution = 1 s.NoError(s.a.resetLogging(ctx, tc)) defer s.a.removeTaskDirectory(tc) _, err := s.a.runTask(ctx, cancel, tc) s.NoError(err) messages := s.mockCommunicator.GetMockMessages() s.Len(messages, 1) foundSuccessLogMessage := false foundShellLogMessage := false foundTimeoutMessage := false for _, msg := range messages[taskID] { if msg.Message == "Task completed - FAILURE." { foundSuccessLogMessage = true } if strings.HasPrefix(msg.Message, "Hit exec timeout (2s)") { foundTimeoutMessage = true } if strings.HasPrefix(msg.Message, "Running task-timeout commands") { foundShellLogMessage = true } if strings.HasPrefix(msg.Message, "Finished 'shell.exec' in \"timeout\"") { foundShellLogMessage = true } } s.True(foundSuccessLogMessage) s.True(foundShellLogMessage) s.True(foundTimeoutMessage) detail := s.mockCommunicator.GetEndTaskDetail() s.Equal(evergreen.TaskFailed, detail.Status) s.Equal("test", detail.Type) s.Contains(detail.Description, "shell.exec") s.True(detail.TimedOut) data, err := ioutil.ReadFile(s.tmpFileName) s.Require().NoError(err) s.Equal("timeout test message", strings.Trim(string(data), "\r\n")) taskData := s.mockCommunicator.EndTaskResult.TaskData s.Equal(taskID, taskData.ID) s.Equal(taskSecret, taskData.Secret) }
shotdetect.py
# Tencent is pleased to support the open source community by making GNES available. # # Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List import numpy as np from ..base import BaseVideoPreprocessor from ..helper import compute_descriptor, compare_descriptor, detect_peak_boundary, compare_ecr from ..io_utils import video as video_util from ...proto import gnes_pb2, array2blob class ShotDetectPreprocessor(BaseVideoPreprocessor): store_args_kwargs = True def __init__(self, frame_size: str = '192:168', descriptor: str = 'block_hsv_histogram', distance_metric: str = 'bhattacharya', detect_method: str = 'threshold', frame_rate: int = 10, frame_num: int = -1, *args, **kwargs): super().__init__(*args, **kwargs) self.frame_size = frame_size self.descriptor = descriptor self.distance_metric = distance_metric self.detect_method = detect_method self.frame_rate = frame_rate self.frame_num = frame_num self._detector_kwargs = kwargs def detect_shots(self, frames: 'np.ndarray') -> List[List['np.ndarray']]:
def apply(self, doc: 'gnes_pb2.Document') -> None: super().apply(doc) if doc.raw_bytes: all_frames = video_util.capture_frames( input_data=doc.raw_bytes, scale=self.frame_size, fps=self.frame_rate, vframes=self.frame_num) num_frames = len(all_frames) assert num_frames > 0 shots = self.detect_shots(all_frames) for ci, frames in enumerate(shots): c = doc.chunks.add() c.doc_id = doc.doc_id # chunk_data = np.concatenate(frames, axis=0) chunk_data = np.array(frames) c.blob.CopyFrom(array2blob(chunk_data)) c.offset = ci c.weight = len(frames) / num_frames else: self.logger.error('bad document: "raw_bytes" is empty!')
descriptors = [] for frame in frames: descriptor = compute_descriptor( frame, method=self.descriptor, **self._detector_kwargs) descriptors.append(descriptor) # compute distances between frames if self.distance_metric == 'edge_change_ration': dists = compare_ecr(descriptors) else: dists = [ compare_descriptor(pair[0], pair[1], self.distance_metric) for pair in zip(descriptors[:-1], descriptors[1:]) ] shot_bounds = detect_peak_boundary(dists, self.detect_method) shots = [] for ci in range(0, len(shot_bounds) - 1): shots.append(frames[shot_bounds[ci]:shot_bounds[ci + 1]]) return shots
kvrpcpb.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: kvrpcpb.proto package kvrpcpb import ( "fmt" "io" "math" proto "github.com/golang/protobuf/proto" _ "github.com/gogo/protobuf/gogoproto" errorpb "github.com/pingcap/kvproto/pkg/errorpb" metapb "github.com/pingcap/kvproto/pkg/metapb" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type CommandPri int32 const ( CommandPri_Normal CommandPri = 0 CommandPri_Low CommandPri = 1 CommandPri_High CommandPri = 2 ) var CommandPri_name = map[int32]string{ 0: "Normal", 1: "Low", 2: "High", } var CommandPri_value = map[string]int32{ "Normal": 0, "Low": 1, "High": 2, } func (x CommandPri) String() string { return proto.EnumName(CommandPri_name, int32(x)) } func (CommandPri) EnumDescriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{0} } type IsolationLevel int32 const ( IsolationLevel_SI IsolationLevel = 0 IsolationLevel_RC IsolationLevel = 1 ) var IsolationLevel_name = map[int32]string{ 0: "SI", 1: "RC", } var IsolationLevel_value = map[string]int32{ "SI": 0, "RC": 1, } func (x IsolationLevel) String() string { return proto.EnumName(IsolationLevel_name, int32(x)) } func (IsolationLevel) EnumDescriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{1} } type Op int32 const ( Op_Put Op = 0 Op_Del Op = 1 Op_Lock Op = 2 Op_Rollback Op = 3 // insert operation has a constraint that key should not exist before. Op_Insert Op = 4 Op_PessimisticLock Op = 5 ) var Op_name = map[int32]string{ 0: "Put", 1: "Del", 2: "Lock", 3: "Rollback", 4: "Insert", 5: "PessimisticLock", } var Op_value = map[string]int32{ "Put": 0, "Del": 1, "Lock": 2, "Rollback": 3, "Insert": 4, "PessimisticLock": 5, } func (x Op) String() string { return proto.EnumName(Op_name, int32(x)) } func (Op) EnumDescriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{2} } type Assertion int32 const ( Assertion_None Assertion = 0 Assertion_Exist Assertion = 1 Assertion_NotExist Assertion = 2 ) var Assertion_name = map[int32]string{ 0: "None", 1: "Exist", 2: "NotExist", } var Assertion_value = map[string]int32{ "None": 0, "Exist": 1, "NotExist": 2, } func (x Assertion) String() string { return proto.EnumName(Assertion_name, int32(x)) } func (Assertion) EnumDescriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{3} } type Action int32 const ( Action_NoAction Action = 0 Action_TTLExpireRollback Action = 1 Action_LockNotExistRollback Action = 2 Action_MinCommitTSPushed Action = 3 ) var Action_name = map[int32]string{ 0: "NoAction", 1: "TTLExpireRollback", 2: "LockNotExistRollback", 3: "MinCommitTSPushed", } var Action_value = map[string]int32{ "NoAction": 0, "TTLExpireRollback": 1, "LockNotExistRollback": 2, "MinCommitTSPushed": 3, } func (x Action) String() string { return proto.EnumName(Action_name, int32(x)) } func (Action) EnumDescriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{4} } type LockInfo struct { PrimaryLock []byte `protobuf:"bytes,1,opt,name=primary_lock,json=primaryLock,proto3" json:"primary_lock,omitempty"` LockVersion uint64 `protobuf:"varint,2,opt,name=lock_version,json=lockVersion,proto3" json:"lock_version,omitempty"` Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` LockTtl uint64 `protobuf:"varint,4,opt,name=lock_ttl,json=lockTtl,proto3" json:"lock_ttl,omitempty"` // How many keys this transaction involved. TxnSize uint64 `protobuf:"varint,5,opt,name=txn_size,json=txnSize,proto3" json:"txn_size,omitempty"` LockType Op `protobuf:"varint,6,opt,name=lock_type,json=lockType,proto3,enum=kvrpcpb.Op" json:"lock_type,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LockInfo) Reset() { *m = LockInfo{} } func (m *LockInfo) String() string { return proto.CompactTextString(m) } func (*LockInfo) ProtoMessage() {} func (*LockInfo) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{0} } func (m *LockInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *LockInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_LockInfo.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *LockInfo) XXX_Merge(src proto.Message) { xxx_messageInfo_LockInfo.Merge(dst, src) } func (m *LockInfo) XXX_Size() int { return m.Size() } func (m *LockInfo) XXX_DiscardUnknown() { xxx_messageInfo_LockInfo.DiscardUnknown(m) } var xxx_messageInfo_LockInfo proto.InternalMessageInfo func (m *LockInfo) GetPrimaryLock() []byte { if m != nil { return m.PrimaryLock } return nil } func (m *LockInfo) GetLockVersion() uint64 { if m != nil { return m.LockVersion } return 0 } func (m *LockInfo) GetKey() []byte { if m != nil { return m.Key } return nil } func (m *LockInfo) GetLockTtl() uint64 { if m != nil { return m.LockTtl } return 0 } func (m *LockInfo) GetTxnSize() uint64 { if m != nil { return m.TxnSize } return 0 } func (m *LockInfo) GetLockType() Op { if m != nil { return m.LockType } return Op_Put } type AlreadyExist struct { Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *AlreadyExist) Reset() { *m = AlreadyExist{} } func (m *AlreadyExist) String() string { return proto.CompactTextString(m) } func (*AlreadyExist) ProtoMessage() {} func (*AlreadyExist) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{1} } func (m *AlreadyExist) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *AlreadyExist) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_AlreadyExist.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *AlreadyExist) XXX_Merge(src proto.Message) { xxx_messageInfo_AlreadyExist.Merge(dst, src) } func (m *AlreadyExist) XXX_Size() int { return m.Size() } func (m *AlreadyExist) XXX_DiscardUnknown() { xxx_messageInfo_AlreadyExist.DiscardUnknown(m) } var xxx_messageInfo_AlreadyExist proto.InternalMessageInfo func (m *AlreadyExist) GetKey() []byte { if m != nil { return m.Key } return nil } type KeyError struct { Locked *LockInfo `protobuf:"bytes,1,opt,name=locked" json:"locked,omitempty"` Retryable string `protobuf:"bytes,2,opt,name=retryable,proto3" json:"retryable,omitempty"` Abort string `protobuf:"bytes,3,opt,name=abort,proto3" json:"abort,omitempty"` Conflict *WriteConflict `protobuf:"bytes,4,opt,name=conflict" json:"conflict,omitempty"` AlreadyExist *AlreadyExist `protobuf:"bytes,5,opt,name=already_exist,json=alreadyExist" json:"already_exist,omitempty"` Deadlock *Deadlock `protobuf:"bytes,6,opt,name=deadlock" json:"deadlock,omitempty"` CommitTsExpired *CommitTsExpired `protobuf:"bytes,7,opt,name=commit_ts_expired,json=commitTsExpired" json:"commit_ts_expired,omitempty"` TxnNotFound *TxnNotFound `protobuf:"bytes,8,opt,name=txn_not_found,json=txnNotFound" json:"txn_not_found,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *KeyError) Reset() { *m = KeyError{} } func (m *KeyError) String() string { return proto.CompactTextString(m) } func (*KeyError) ProtoMessage() {} func (*KeyError) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{2} } func (m *KeyError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *KeyError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_KeyError.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *KeyError) XXX_Merge(src proto.Message) { xxx_messageInfo_KeyError.Merge(dst, src) } func (m *KeyError) XXX_Size() int { return m.Size() } func (m *KeyError) XXX_DiscardUnknown() { xxx_messageInfo_KeyError.DiscardUnknown(m) } var xxx_messageInfo_KeyError proto.InternalMessageInfo func (m *KeyError) GetLocked() *LockInfo { if m != nil { return m.Locked } return nil } func (m *KeyError) GetRetryable() string { if m != nil { return m.Retryable } return "" } func (m *KeyError) GetAbort() string { if m != nil { return m.Abort } return "" } func (m *KeyError) GetConflict() *WriteConflict { if m != nil { return m.Conflict } return nil } func (m *KeyError) GetAlreadyExist() *AlreadyExist { if m != nil { return m.AlreadyExist } return nil } func (m *KeyError) GetDeadlock() *Deadlock { if m != nil { return m.Deadlock } return nil } func (m *KeyError) GetCommitTsExpired() *CommitTsExpired { if m != nil { return m.CommitTsExpired } return nil } func (m *KeyError) GetTxnNotFound() *TxnNotFound { if m != nil { return m.TxnNotFound } return nil } type WriteConflict struct { StartTs uint64 `protobuf:"varint,1,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"` ConflictTs uint64 `protobuf:"varint,2,opt,name=conflict_ts,json=conflictTs,proto3" json:"conflict_ts,omitempty"` Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` Primary []byte `protobuf:"bytes,4,opt,name=primary,proto3" json:"primary,omitempty"` ConflictCommitTs uint64 `protobuf:"varint,5,opt,name=conflict_commit_ts,json=conflictCommitTs,proto3" json:"conflict_commit_ts,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *WriteConflict) Reset() { *m = WriteConflict{} } func (m *WriteConflict) String() string { return proto.CompactTextString(m) } func (*WriteConflict) ProtoMessage() {} func (*WriteConflict) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{3} } func (m *WriteConflict) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *WriteConflict) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_WriteConflict.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *WriteConflict) XXX_Merge(src proto.Message) { xxx_messageInfo_WriteConflict.Merge(dst, src) } func (m *WriteConflict) XXX_Size() int { return m.Size() } func (m *WriteConflict) XXX_DiscardUnknown() { xxx_messageInfo_WriteConflict.DiscardUnknown(m) } var xxx_messageInfo_WriteConflict proto.InternalMessageInfo func (m *WriteConflict) GetStartTs() uint64 { if m != nil { return m.StartTs } return 0 } func (m *WriteConflict) GetConflictTs() uint64 { if m != nil { return m.ConflictTs } return 0 } func (m *WriteConflict) GetKey() []byte { if m != nil { return m.Key } return nil } func (m *WriteConflict) GetPrimary() []byte { if m != nil { return m.Primary } return nil } func (m *WriteConflict) GetConflictCommitTs() uint64 { if m != nil { return m.ConflictCommitTs } return 0 } type Deadlock struct { LockTs uint64 `protobuf:"varint,1,opt,name=lock_ts,json=lockTs,proto3" json:"lock_ts,omitempty"` LockKey []byte `protobuf:"bytes,2,opt,name=lock_key,json=lockKey,proto3" json:"lock_key,omitempty"` DeadlockKeyHash uint64 `protobuf:"varint,3,opt,name=deadlock_key_hash,json=deadlockKeyHash,proto3" json:"deadlock_key_hash,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Deadlock) Reset() { *m = Deadlock{} } func (m *Deadlock) String() string { return proto.CompactTextString(m) } func (*Deadlock) ProtoMessage() {} func (*Deadlock) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{4} } func (m *Deadlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *Deadlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_Deadlock.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *Deadlock) XXX_Merge(src proto.Message) { xxx_messageInfo_Deadlock.Merge(dst, src) } func (m *Deadlock) XXX_Size() int { return m.Size() } func (m *Deadlock) XXX_DiscardUnknown() { xxx_messageInfo_Deadlock.DiscardUnknown(m) } var xxx_messageInfo_Deadlock proto.InternalMessageInfo func (m *Deadlock) GetLockTs() uint64 { if m != nil { return m.LockTs } return 0 } func (m *Deadlock) GetLockKey() []byte { if m != nil { return m.LockKey } return nil } func (m *Deadlock) GetDeadlockKeyHash() uint64 { if m != nil { return m.DeadlockKeyHash } return 0 } type CommitTsExpired struct { StartTs uint64 `protobuf:"varint,1,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"` AttemptedCommitTs uint64 `protobuf:"varint,2,opt,name=attempted_commit_ts,json=attemptedCommitTs,proto3" json:"attempted_commit_ts,omitempty"` Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` MinCommitTs uint64 `protobuf:"varint,4,opt,name=min_commit_ts,json=minCommitTs,proto3" json:"min_commit_ts,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CommitTsExpired) Reset() { *m = CommitTsExpired{} } func (m *CommitTsExpired) String() string { return proto.CompactTextString(m) } func (*CommitTsExpired) ProtoMessage() {} func (*CommitTsExpired) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{5} } func (m *CommitTsExpired) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *CommitTsExpired) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_CommitTsExpired.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *CommitTsExpired) XXX_Merge(src proto.Message) { xxx_messageInfo_CommitTsExpired.Merge(dst, src) } func (m *CommitTsExpired) XXX_Size() int { return m.Size() } func (m *CommitTsExpired) XXX_DiscardUnknown() { xxx_messageInfo_CommitTsExpired.DiscardUnknown(m) } var xxx_messageInfo_CommitTsExpired proto.InternalMessageInfo func (m *CommitTsExpired) GetStartTs() uint64 { if m != nil { return m.StartTs } return 0 } func (m *CommitTsExpired) GetAttemptedCommitTs() uint64 { if m != nil { return m.AttemptedCommitTs } return 0 } func (m *CommitTsExpired) GetKey() []byte { if m != nil { return m.Key } return nil } func (m *CommitTsExpired) GetMinCommitTs() uint64 { if m != nil { return m.MinCommitTs } return 0 } type TxnNotFound struct { StartTs uint64 `protobuf:"varint,1,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"` PrimaryKey []byte `protobuf:"bytes,2,opt,name=primary_key,json=primaryKey,proto3" json:"primary_key,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *TxnNotFound) Reset() { *m = TxnNotFound{} } func (m *TxnNotFound) String() string { return proto.CompactTextString(m) } func (*TxnNotFound) ProtoMessage() {} func (*TxnNotFound) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{6} } func (m *TxnNotFound) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *TxnNotFound) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_TxnNotFound.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *TxnNotFound) XXX_Merge(src proto.Message) { xxx_messageInfo_TxnNotFound.Merge(dst, src) } func (m *TxnNotFound) XXX_Size() int { return m.Size() } func (m *TxnNotFound) XXX_DiscardUnknown() { xxx_messageInfo_TxnNotFound.DiscardUnknown(m) } var xxx_messageInfo_TxnNotFound proto.InternalMessageInfo func (m *TxnNotFound) GetStartTs() uint64 { if m != nil { return m.StartTs } return 0 } func (m *TxnNotFound) GetPrimaryKey() []byte { if m != nil { return m.PrimaryKey } return nil } type Context struct { RegionId uint64 `protobuf:"varint,1,opt,name=region_id,json=regionId,proto3" json:"region_id,omitempty"` RegionEpoch *metapb.RegionEpoch `protobuf:"bytes,2,opt,name=region_epoch,json=regionEpoch" json:"region_epoch,omitempty"` Peer *metapb.Peer `protobuf:"bytes,3,opt,name=peer" json:"peer,omitempty"` Term uint64 `protobuf:"varint,5,opt,name=term,proto3" json:"term,omitempty"` Priority CommandPri `protobuf:"varint,6,opt,name=priority,proto3,enum=kvrpcpb.CommandPri" json:"priority,omitempty"` IsolationLevel IsolationLevel `protobuf:"varint,7,opt,name=isolation_level,json=isolationLevel,proto3,enum=kvrpcpb.IsolationLevel" json:"isolation_level,omitempty"` NotFillCache bool `protobuf:"varint,8,opt,name=not_fill_cache,json=notFillCache,proto3" json:"not_fill_cache,omitempty"` SyncLog bool `protobuf:"varint,9,opt,name=sync_log,json=syncLog,proto3" json:"sync_log,omitempty"` HandleTime bool `protobuf:"varint,10,opt,name=handle_time,json=handleTime,proto3" json:"handle_time,omitempty"` ScanDetail bool `protobuf:"varint,11,opt,name=scan_detail,json=scanDetail,proto3" json:"scan_detail,omitempty"` ReplicaRead bool `protobuf:"varint,12,opt,name=replica_read,json=replicaRead,proto3" json:"replica_read,omitempty"` ResolvedLocks []uint64 `protobuf:"varint,13,rep,packed,name=resolved_locks,json=resolvedLocks" json:"resolved_locks,omitempty"` MaxExecutionDurationMs uint64 `protobuf:"varint,14,opt,name=max_execution_duration_ms,json=maxExecutionDurationMs,proto3" json:"max_execution_duration_ms,omitempty"` // After a region applys to `applied_index`, we can get a // snapshot for the region even if the peer is follower. AppliedIndex uint64 `protobuf:"varint,15,opt,name=applied_index,json=appliedIndex,proto3" json:"applied_index,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Context) Reset() { *m = Context{} } func (m *Context) String() string { return proto.CompactTextString(m) } func (*Context) ProtoMessage() {} func (*Context) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{7} } func (m *Context) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *Context) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_Context.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *Context) XXX_Merge(src proto.Message) { xxx_messageInfo_Context.Merge(dst, src) } func (m *Context) XXX_Size() int { return m.Size() } func (m *Context) XXX_DiscardUnknown() { xxx_messageInfo_Context.DiscardUnknown(m) } var xxx_messageInfo_Context proto.InternalMessageInfo func (m *Context) GetRegionId() uint64 { if m != nil { return m.RegionId } return 0 } func (m *Context) GetRegionEpoch() *metapb.RegionEpoch { if m != nil { return m.RegionEpoch } return nil } func (m *Context) GetPeer() *metapb.Peer { if m != nil { return m.Peer } return nil } func (m *Context) GetTerm() uint64 { if m != nil { return m.Term } return 0 } func (m *Context) GetPriority() CommandPri { if m != nil { return m.Priority } return CommandPri_Normal } func (m *Context) GetIsolationLevel() IsolationLevel { if m != nil { return m.IsolationLevel } return IsolationLevel_SI } func (m *Context) GetNotFillCache() bool { if m != nil { return m.NotFillCache } return false } func (m *Context) GetSyncLog() bool { if m != nil { return m.SyncLog } return false } func (m *Context) GetHandleTime() bool { if m != nil { return m.HandleTime } return false } func (m *Context) GetScanDetail() bool { if m != nil { return m.ScanDetail } return false } func (m *Context) GetReplicaRead() bool { if m != nil { return m.ReplicaRead } return false } func (m *Context) GetResolvedLocks() []uint64 { if m != nil { return m.ResolvedLocks } return nil } func (m *Context) GetMaxExecutionDurationMs() uint64 { if m != nil { return m.MaxExecutionDurationMs } return 0 } func (m *Context) GetAppliedIndex() uint64 { if m != nil { return m.AppliedIndex } return 0 } type HandleTime struct { WaitMs int64 `protobuf:"varint,1,opt,name=wait_ms,json=waitMs,proto3" json:"wait_ms,omitempty"` ProcessMs int64 `protobuf:"varint,2,opt,name=process_ms,json=processMs,proto3" json:"process_ms,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *HandleTime) Reset() { *m = HandleTime{} } func (m *HandleTime) String() string { return proto.CompactTextString(m) } func (*HandleTime) ProtoMessage() {} func (*HandleTime) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{8} } func (m *HandleTime) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *HandleTime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_HandleTime.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *HandleTime) XXX_Merge(src proto.Message) { xxx_messageInfo_HandleTime.Merge(dst, src) } func (m *HandleTime) XXX_Size() int { return m.Size() } func (m *HandleTime) XXX_DiscardUnknown() { xxx_messageInfo_HandleTime.DiscardUnknown(m) } var xxx_messageInfo_HandleTime proto.InternalMessageInfo func (m *HandleTime) GetWaitMs() int64 { if m != nil { return m.WaitMs } return 0 } func (m *HandleTime) GetProcessMs() int64 { if m != nil { return m.ProcessMs } return 0 } type ScanInfo struct { Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` Processed int64 `protobuf:"varint,2,opt,name=processed,proto3" json:"processed,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ScanInfo) Reset() { *m = ScanInfo{} } func (m *ScanInfo) String() string { return proto.CompactTextString(m) } func (*ScanInfo) ProtoMessage() {} func (*ScanInfo) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{9} } func (m *ScanInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ScanInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ScanInfo.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *ScanInfo) XXX_Merge(src proto.Message) { xxx_messageInfo_ScanInfo.Merge(dst, src) } func (m *ScanInfo) XXX_Size() int { return m.Size() } func (m *ScanInfo) XXX_DiscardUnknown() { xxx_messageInfo_ScanInfo.DiscardUnknown(m) } var xxx_messageInfo_ScanInfo proto.InternalMessageInfo func (m *ScanInfo) GetTotal() int64 { if m != nil { return m.Total } return 0 } func (m *ScanInfo) GetProcessed() int64 { if m != nil { return m.Processed } return 0 } type ScanDetail struct { Write *ScanInfo `protobuf:"bytes,1,opt,name=write" json:"write,omitempty"` Lock *ScanInfo `protobuf:"bytes,2,opt,name=lock" json:"lock,omitempty"` Data *ScanInfo `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ScanDetail) Reset() { *m = ScanDetail{} } func (m *ScanDetail) String() string { return proto.CompactTextString(m) } func (*ScanDetail) ProtoMessage() {} func (*ScanDetail) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{10} } func (m *ScanDetail) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ScanDetail) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ScanDetail.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *ScanDetail) XXX_Merge(src proto.Message) { xxx_messageInfo_ScanDetail.Merge(dst, src) } func (m *ScanDetail) XXX_Size() int { return m.Size() } func (m *ScanDetail) XXX_DiscardUnknown() { xxx_messageInfo_ScanDetail.DiscardUnknown(m) } var xxx_messageInfo_ScanDetail proto.InternalMessageInfo func (m *ScanDetail) GetWrite() *ScanInfo { if m != nil { return m.Write } return nil } func (m *ScanDetail) GetLock() *ScanInfo { if m != nil { return m.Lock } return nil } func (m *ScanDetail) GetData() *ScanInfo { if m != nil { return m.Data } return nil } type ExecDetails struct { HandleTime *HandleTime `protobuf:"bytes,1,opt,name=handle_time,json=handleTime" json:"handle_time,omitempty"` ScanDetail *ScanDetail `protobuf:"bytes,2,opt,name=scan_detail,json=scanDetail" json:"scan_detail,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ExecDetails) Reset() { *m = ExecDetails{} } func (m *ExecDetails) String() string { return proto.CompactTextString(m) } func (*ExecDetails) ProtoMessage() {} func (*ExecDetails) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{11} } func (m *ExecDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ExecDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ExecDetails.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *ExecDetails) XXX_Merge(src proto.Message) { xxx_messageInfo_ExecDetails.Merge(dst, src) } func (m *ExecDetails) XXX_Size() int { return m.Size() } func (m *ExecDetails) XXX_DiscardUnknown() { xxx_messageInfo_ExecDetails.DiscardUnknown(m) } var xxx_messageInfo_ExecDetails proto.InternalMessageInfo func (m *ExecDetails) GetHandleTime() *HandleTime { if m != nil { return m.HandleTime } return nil } func (m *ExecDetails) GetScanDetail() *ScanDetail { if m != nil { return m.ScanDetail } return nil } type GetRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` Version uint64 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetRequest) Reset() { *m = GetRequest{} } func (m *GetRequest) String() string { return proto.CompactTextString(m) } func (*GetRequest) ProtoMessage() {} func (*GetRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{12} } func (m *GetRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *GetRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetRequest.Merge(dst, src) } func (m *GetRequest) XXX_Size() int { return m.Size() } func (m *GetRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetRequest.DiscardUnknown(m) } var xxx_messageInfo_GetRequest proto.InternalMessageInfo func (m *GetRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *GetRequest) GetKey() []byte { if m != nil { return m.Key } return nil } func (m *GetRequest) GetVersion() uint64 { if m != nil { return m.Version } return 0 } type GetResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` NotFound bool `protobuf:"varint,4,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetResponse) Reset() { *m = GetResponse{} } func (m *GetResponse) String() string { return proto.CompactTextString(m) } func (*GetResponse) ProtoMessage() {} func (*GetResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{13} } func (m *GetResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *GetResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetResponse.Merge(dst, src) } func (m *GetResponse) XXX_Size() int { return m.Size() } func (m *GetResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetResponse.DiscardUnknown(m) } var xxx_messageInfo_GetResponse proto.InternalMessageInfo func (m *GetResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *GetResponse) GetError() *KeyError { if m != nil { return m.Error } return nil } func (m *GetResponse) GetValue() []byte { if m != nil { return m.Value } return nil } func (m *GetResponse) GetNotFound() bool { if m != nil { return m.NotFound } return false } type ScanRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` Version uint64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` KeyOnly bool `protobuf:"varint,5,opt,name=key_only,json=keyOnly,proto3" json:"key_only,omitempty"` Reverse bool `protobuf:"varint,6,opt,name=reverse,proto3" json:"reverse,omitempty"` // For compatibility, when scanning forward, the range to scan is [start_key, end_key), where start_key < end_key; // and when scanning backward, it scans [end_key, start_key) in descending order, where end_key < start_key. EndKey []byte `protobuf:"bytes,7,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ScanRequest) Reset() { *m = ScanRequest{} } func (m *ScanRequest) String() string { return proto.CompactTextString(m) } func (*ScanRequest) ProtoMessage() {} func (*ScanRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{14} } func (m *ScanRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ScanRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ScanRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *ScanRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ScanRequest.Merge(dst, src) } func (m *ScanRequest) XXX_Size() int { return m.Size() } func (m *ScanRequest) XXX_DiscardUnknown() { xxx_messageInfo_ScanRequest.DiscardUnknown(m) } var xxx_messageInfo_ScanRequest proto.InternalMessageInfo func (m *ScanRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *ScanRequest) GetStartKey() []byte { if m != nil { return m.StartKey } return nil } func (m *ScanRequest) GetLimit() uint32 { if m != nil { return m.Limit } return 0 } func (m *ScanRequest) GetVersion() uint64 { if m != nil { return m.Version } return 0 } func (m *ScanRequest) GetKeyOnly() bool { if m != nil { return m.KeyOnly } return false } func (m *ScanRequest) GetReverse() bool { if m != nil { return m.Reverse } return false } func (m *ScanRequest) GetEndKey() []byte { if m != nil { return m.EndKey } return nil } type KvPair struct { Error *KeyError `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *KvPair) Reset() { *m = KvPair{} } func (m *KvPair) String() string { return proto.CompactTextString(m) } func (*KvPair) ProtoMessage() {} func (*KvPair) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{15} } func (m *KvPair) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *KvPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_KvPair.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *KvPair) XXX_Merge(src proto.Message) { xxx_messageInfo_KvPair.Merge(dst, src) } func (m *KvPair) XXX_Size() int { return m.Size() } func (m *KvPair) XXX_DiscardUnknown() { xxx_messageInfo_KvPair.DiscardUnknown(m) } var xxx_messageInfo_KvPair proto.InternalMessageInfo func (m *KvPair) GetError() *KeyError { if m != nil { return m.Error } return nil } func (m *KvPair) GetKey() []byte { if m != nil { return m.Key } return nil } func (m *KvPair) GetValue() []byte { if m != nil { return m.Value } return nil } type ScanResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Pairs []*KvPair `protobuf:"bytes,2,rep,name=pairs" json:"pairs,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ScanResponse) Reset() { *m = ScanResponse{} } func (m *ScanResponse) String() string { return proto.CompactTextString(m) } func (*ScanResponse) ProtoMessage() {} func (*ScanResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{16} } func (m *ScanResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ScanResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ScanResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *ScanResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ScanResponse.Merge(dst, src) } func (m *ScanResponse) XXX_Size() int { return m.Size() } func (m *ScanResponse) XXX_DiscardUnknown() { xxx_messageInfo_ScanResponse.DiscardUnknown(m) } var xxx_messageInfo_ScanResponse proto.InternalMessageInfo func (m *ScanResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *ScanResponse) GetPairs() []*KvPair { if m != nil { return m.Pairs } return nil } type Mutation struct { Op Op `protobuf:"varint,1,opt,name=op,proto3,enum=kvrpcpb.Op" json:"op,omitempty"` Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` Assertion Assertion `protobuf:"varint,4,opt,name=assertion,proto3,enum=kvrpcpb.Assertion" json:"assertion,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Mutation) Reset() { *m = Mutation{} } func (m *Mutation) String() string { return proto.CompactTextString(m) } func (*Mutation) ProtoMessage() {} func (*Mutation) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{17} } func (m *Mutation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *Mutation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_Mutation.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *Mutation) XXX_Merge(src proto.Message) { xxx_messageInfo_Mutation.Merge(dst, src) } func (m *Mutation) XXX_Size() int { return m.Size() } func (m *Mutation) XXX_DiscardUnknown() { xxx_messageInfo_Mutation.DiscardUnknown(m) } var xxx_messageInfo_Mutation proto.InternalMessageInfo func (m *Mutation) GetOp() Op { if m != nil { return m.Op } return Op_Put } func (m *Mutation) GetKey() []byte { if m != nil { return m.Key } return nil } func (m *Mutation) GetValue() []byte { if m != nil { return m.Value } return nil } func (m *Mutation) GetAssertion() Assertion { if m != nil { return m.Assertion } return Assertion_None } type PrewriteRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` Mutations []*Mutation `protobuf:"bytes,2,rep,name=mutations" json:"mutations,omitempty"` // primary_lock_key PrimaryLock []byte `protobuf:"bytes,3,opt,name=primary_lock,json=primaryLock,proto3" json:"primary_lock,omitempty"` StartVersion uint64 `protobuf:"varint,4,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` LockTtl uint64 `protobuf:"varint,5,opt,name=lock_ttl,json=lockTtl,proto3" json:"lock_ttl,omitempty"` SkipConstraintCheck bool `protobuf:"varint,6,opt,name=skip_constraint_check,json=skipConstraintCheck,proto3" json:"skip_constraint_check,omitempty"` // For pessimistic transaction, some mutations don't need to be locked, for example, non-unique index key. IsPessimisticLock []bool `protobuf:"varint,7,rep,packed,name=is_pessimistic_lock,json=isPessimisticLock" json:"is_pessimistic_lock,omitempty"` // How many keys this transaction involved. TxnSize uint64 `protobuf:"varint,8,opt,name=txn_size,json=txnSize,proto3" json:"txn_size,omitempty"` // Use for pessimistic transaction, used to check if a conflict lock is already committed. ForUpdateTs uint64 `protobuf:"varint,9,opt,name=for_update_ts,json=forUpdateTs,proto3" json:"for_update_ts,omitempty"` // If min_commit_ts > 0, this is large transaction proto, the final commit_ts // would be infered from min_commit_ts. MinCommitTs uint64 `protobuf:"varint,10,opt,name=min_commit_ts,json=minCommitTs,proto3" json:"min_commit_ts,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *PrewriteRequest) Reset() { *m = PrewriteRequest{} } func (m *PrewriteRequest) String() string { return proto.CompactTextString(m) } func (*PrewriteRequest) ProtoMessage() {} func (*PrewriteRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{18} } func (m *PrewriteRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *PrewriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_PrewriteRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *PrewriteRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_PrewriteRequest.Merge(dst, src) } func (m *PrewriteRequest) XXX_Size() int { return m.Size() } func (m *PrewriteRequest) XXX_DiscardUnknown() { xxx_messageInfo_PrewriteRequest.DiscardUnknown(m) } var xxx_messageInfo_PrewriteRequest proto.InternalMessageInfo func (m *PrewriteRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *PrewriteRequest) GetMutations() []*Mutation { if m != nil { return m.Mutations } return nil } func (m *PrewriteRequest) GetPrimaryLock() []byte { if m != nil { return m.PrimaryLock } return nil } func (m *PrewriteRequest) GetStartVersion() uint64 { if m != nil { return m.StartVersion } return 0 } func (m *PrewriteRequest) GetLockTtl() uint64 { if m != nil { return m.LockTtl } return 0 } func (m *PrewriteRequest) GetSkipConstraintCheck() bool { if m != nil { return m.SkipConstraintCheck } return false } func (m *PrewriteRequest) GetIsPessimisticLock() []bool { if m != nil { return m.IsPessimisticLock } return nil } func (m *PrewriteRequest) GetTxnSize() uint64 { if m != nil { return m.TxnSize } return 0 } func (m *PrewriteRequest) GetForUpdateTs() uint64 { if m != nil { return m.ForUpdateTs } return 0 } func (m *PrewriteRequest) GetMinCommitTs() uint64 { if m != nil { return m.MinCommitTs } return 0 } type PrewriteResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Errors []*KeyError `protobuf:"bytes,2,rep,name=errors" json:"errors,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *PrewriteResponse) Reset() { *m = PrewriteResponse{} } func (m *PrewriteResponse) String() string { return proto.CompactTextString(m) } func (*PrewriteResponse) ProtoMessage() {} func (*PrewriteResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{19} } func (m *PrewriteResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *PrewriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_PrewriteResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *PrewriteResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_PrewriteResponse.Merge(dst, src) } func (m *PrewriteResponse) XXX_Size() int { return m.Size() } func (m *PrewriteResponse) XXX_DiscardUnknown() { xxx_messageInfo_PrewriteResponse.DiscardUnknown(m) } var xxx_messageInfo_PrewriteResponse proto.InternalMessageInfo func (m *PrewriteResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *PrewriteResponse) GetErrors() []*KeyError { if m != nil { return m.Errors } return nil } // Used to update the lock_ttl of a large transaction to prevent it from been killed. type TxnHeartBeatRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` PrimaryLock []byte `protobuf:"bytes,2,opt,name=primary_lock,json=primaryLock,proto3" json:"primary_lock,omitempty"` StartVersion uint64 `protobuf:"varint,3,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` AdviseLockTtl uint64 `protobuf:"varint,4,opt,name=advise_lock_ttl,json=adviseLockTtl,proto3" json:"advise_lock_ttl,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *TxnHeartBeatRequest) Reset() { *m = TxnHeartBeatRequest{} } func (m *TxnHeartBeatRequest) String() string { return proto.CompactTextString(m) } func (*TxnHeartBeatRequest) ProtoMessage() {} func (*TxnHeartBeatRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{20} } func (m *TxnHeartBeatRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *TxnHeartBeatRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_TxnHeartBeatRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *TxnHeartBeatRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_TxnHeartBeatRequest.Merge(dst, src) } func (m *TxnHeartBeatRequest) XXX_Size() int { return m.Size() } func (m *TxnHeartBeatRequest) XXX_DiscardUnknown() { xxx_messageInfo_TxnHeartBeatRequest.DiscardUnknown(m) } var xxx_messageInfo_TxnHeartBeatRequest proto.InternalMessageInfo func (m *TxnHeartBeatRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *TxnHeartBeatRequest) GetPrimaryLock() []byte { if m != nil { return m.PrimaryLock } return nil } func (m *TxnHeartBeatRequest) GetStartVersion() uint64 { if m != nil { return m.StartVersion } return 0 } func (m *TxnHeartBeatRequest) GetAdviseLockTtl() uint64 { if m != nil { return m.AdviseLockTtl } return 0 } type TxnHeartBeatResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` LockTtl uint64 `protobuf:"varint,3,opt,name=lock_ttl,json=lockTtl,proto3" json:"lock_ttl,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *TxnHeartBeatResponse) Reset() { *m = TxnHeartBeatResponse{} } func (m *TxnHeartBeatResponse) String() string { return proto.CompactTextString(m) } func (*TxnHeartBeatResponse) ProtoMessage() {} func (*TxnHeartBeatResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{21} } func (m *TxnHeartBeatResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *TxnHeartBeatResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_TxnHeartBeatResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *TxnHeartBeatResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_TxnHeartBeatResponse.Merge(dst, src) } func (m *TxnHeartBeatResponse) XXX_Size() int { return m.Size() } func (m *TxnHeartBeatResponse) XXX_DiscardUnknown() { xxx_messageInfo_TxnHeartBeatResponse.DiscardUnknown(m) } var xxx_messageInfo_TxnHeartBeatResponse proto.InternalMessageInfo func (m *TxnHeartBeatResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *TxnHeartBeatResponse) GetError() *KeyError { if m != nil { return m.Error } return nil } func (m *TxnHeartBeatResponse) GetLockTtl() uint64 { if m != nil { return m.LockTtl } return 0 } type PessimisticLockRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` // In this case the Op of the mutation must be Lock. Mutations []*Mutation `protobuf:"bytes,2,rep,name=mutations" json:"mutations,omitempty"` PrimaryLock []byte `protobuf:"bytes,3,opt,name=primary_lock,json=primaryLock,proto3" json:"primary_lock,omitempty"` StartVersion uint64 `protobuf:"varint,4,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` LockTtl uint64 `protobuf:"varint,5,opt,name=lock_ttl,json=lockTtl,proto3" json:"lock_ttl,omitempty"` ForUpdateTs uint64 `protobuf:"varint,6,opt,name=for_update_ts,json=forUpdateTs,proto3" json:"for_update_ts,omitempty"` // If the request is the first lock request, we don't need to detect deadlock. IsFirstLock bool `protobuf:"varint,7,opt,name=is_first_lock,json=isFirstLock,proto3" json:"is_first_lock,omitempty"` // Time to wait for lock released in milliseconds when encountering locks. // 0 means using default timeout in TiKV. Negative means no wait. WaitTimeout int64 `protobuf:"varint,8,opt,name=wait_timeout,json=waitTimeout,proto3" json:"wait_timeout,omitempty"` // If it is true, TiKV will acquire the pessimistic lock regardless of write conflict // and return the latest value. It's only supported for single mutation. Force bool `protobuf:"varint,9,opt,name=force,proto3" json:"force,omitempty"` // If it is true, TiKV will return values of the keys if no error, so TiDB can cache the values for // later read in the same transaction. // When 'force' is set to true, this field is ignored. ReturnValues bool `protobuf:"varint,10,opt,name=return_values,json=returnValues,proto3" json:"return_values,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *PessimisticLockRequest) Reset() { *m = PessimisticLockRequest{} } func (m *PessimisticLockRequest) String() string { return proto.CompactTextString(m) } func (*PessimisticLockRequest) ProtoMessage() {} func (*PessimisticLockRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{22} } func (m *PessimisticLockRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *PessimisticLockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_PessimisticLockRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *PessimisticLockRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_PessimisticLockRequest.Merge(dst, src) } func (m *PessimisticLockRequest) XXX_Size() int { return m.Size() } func (m *PessimisticLockRequest) XXX_DiscardUnknown() { xxx_messageInfo_PessimisticLockRequest.DiscardUnknown(m) } var xxx_messageInfo_PessimisticLockRequest proto.InternalMessageInfo func (m *PessimisticLockRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *PessimisticLockRequest) GetMutations() []*Mutation { if m != nil { return m.Mutations } return nil } func (m *PessimisticLockRequest) GetPrimaryLock() []byte { if m != nil { return m.PrimaryLock } return nil } func (m *PessimisticLockRequest) GetStartVersion() uint64 { if m != nil { return m.StartVersion } return 0 } func (m *PessimisticLockRequest) GetLockTtl() uint64 { if m != nil { return m.LockTtl } return 0 } func (m *PessimisticLockRequest) GetForUpdateTs() uint64 { if m != nil { return m.ForUpdateTs } return 0 } func (m *PessimisticLockRequest) GetIsFirstLock() bool { if m != nil { return m.IsFirstLock } return false } func (m *PessimisticLockRequest) GetWaitTimeout() int64 { if m != nil { return m.WaitTimeout } return 0 } func (m *PessimisticLockRequest) GetForce() bool { if m != nil { return m.Force } return false } func (m *PessimisticLockRequest) GetReturnValues() bool { if m != nil { return m.ReturnValues } return false } type PessimisticLockResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Errors []*KeyError `protobuf:"bytes,2,rep,name=errors" json:"errors,omitempty"` // It carries the latest value and its commit ts if force in PessimisticLockRequest is true. CommitTs uint64 `protobuf:"varint,3,opt,name=commit_ts,json=commitTs,proto3" json:"commit_ts,omitempty"` Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` // The values is set if 'return_values' is true in the request and no error. // If 'force' is true, this field is not used. Values [][]byte `protobuf:"bytes,5,rep,name=values" json:"values,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *PessimisticLockResponse) Reset() { *m = PessimisticLockResponse{} } func (m *PessimisticLockResponse) String() string { return proto.CompactTextString(m) } func (*PessimisticLockResponse) ProtoMessage() {} func (*PessimisticLockResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{23} } func (m *PessimisticLockResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *PessimisticLockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_PessimisticLockResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *PessimisticLockResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_PessimisticLockResponse.Merge(dst, src) } func (m *PessimisticLockResponse) XXX_Size() int { return m.Size() } func (m *PessimisticLockResponse) XXX_DiscardUnknown() { xxx_messageInfo_PessimisticLockResponse.DiscardUnknown(m) } var xxx_messageInfo_PessimisticLockResponse proto.InternalMessageInfo func (m *PessimisticLockResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *PessimisticLockResponse) GetErrors() []*KeyError { if m != nil { return m.Errors } return nil } func (m *PessimisticLockResponse) GetCommitTs() uint64 { if m != nil { return m.CommitTs } return 0 } func (m *PessimisticLockResponse) GetValue() []byte { if m != nil { return m.Value } return nil } func (m *PessimisticLockResponse) GetValues() [][]byte { if m != nil { return m.Values } return nil } type PessimisticRollbackRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` StartVersion uint64 `protobuf:"varint,2,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` ForUpdateTs uint64 `protobuf:"varint,3,opt,name=for_update_ts,json=forUpdateTs,proto3" json:"for_update_ts,omitempty"` Keys [][]byte `protobuf:"bytes,4,rep,name=keys" json:"keys,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *PessimisticRollbackRequest) Reset() { *m = PessimisticRollbackRequest{} } func (m *PessimisticRollbackRequest) String() string { return proto.CompactTextString(m) } func (*PessimisticRollbackRequest) ProtoMessage() {} func (*PessimisticRollbackRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{24} } func (m *PessimisticRollbackRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *PessimisticRollbackRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_PessimisticRollbackRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *PessimisticRollbackRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_PessimisticRollbackRequest.Merge(dst, src) } func (m *PessimisticRollbackRequest) XXX_Size() int { return m.Size() } func (m *PessimisticRollbackRequest) XXX_DiscardUnknown() { xxx_messageInfo_PessimisticRollbackRequest.DiscardUnknown(m) } var xxx_messageInfo_PessimisticRollbackRequest proto.InternalMessageInfo func (m *PessimisticRollbackRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *PessimisticRollbackRequest) GetStartVersion() uint64 { if m != nil { return m.StartVersion } return 0 } func (m *PessimisticRollbackRequest) GetForUpdateTs() uint64 { if m != nil { return m.ForUpdateTs } return 0 } func (m *PessimisticRollbackRequest) GetKeys() [][]byte { if m != nil { return m.Keys } return nil } type PessimisticRollbackResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Errors []*KeyError `protobuf:"bytes,2,rep,name=errors" json:"errors,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *PessimisticRollbackResponse) Reset() { *m = PessimisticRollbackResponse{} } func (m *PessimisticRollbackResponse) String() string { return proto.CompactTextString(m) } func (*PessimisticRollbackResponse) ProtoMessage() {} func (*PessimisticRollbackResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{25} } func (m *PessimisticRollbackResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *PessimisticRollbackResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_PessimisticRollbackResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *PessimisticRollbackResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_PessimisticRollbackResponse.Merge(dst, src) } func (m *PessimisticRollbackResponse) XXX_Size() int { return m.Size() } func (m *PessimisticRollbackResponse) XXX_DiscardUnknown() { xxx_messageInfo_PessimisticRollbackResponse.DiscardUnknown(m) } var xxx_messageInfo_PessimisticRollbackResponse proto.InternalMessageInfo func (m *PessimisticRollbackResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *PessimisticRollbackResponse) GetErrors() []*KeyError { if m != nil { return m.Errors } return nil } type CommitRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` StartVersion uint64 `protobuf:"varint,2,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` Keys [][]byte `protobuf:"bytes,3,rep,name=keys" json:"keys,omitempty"` CommitVersion uint64 `protobuf:"varint,4,opt,name=commit_version,json=commitVersion,proto3" json:"commit_version,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CommitRequest) Reset() { *m = CommitRequest{} } func (m *CommitRequest) String() string { return proto.CompactTextString(m) } func (*CommitRequest) ProtoMessage() {} func (*CommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{26} } func (m *CommitRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *CommitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_CommitRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *CommitRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_CommitRequest.Merge(dst, src) } func (m *CommitRequest) XXX_Size() int { return m.Size() } func (m *CommitRequest) XXX_DiscardUnknown() { xxx_messageInfo_CommitRequest.DiscardUnknown(m) } var xxx_messageInfo_CommitRequest proto.InternalMessageInfo func (m *CommitRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *CommitRequest) GetStartVersion() uint64 { if m != nil { return m.StartVersion } return 0 } func (m *CommitRequest) GetKeys() [][]byte { if m != nil { return m.Keys } return nil } func (m *CommitRequest) GetCommitVersion() uint64 { if m != nil { return m.CommitVersion } return 0 } type CommitResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` CommitVersion uint64 `protobuf:"varint,3,opt,name=commit_version,json=commitVersion,proto3" json:"commit_version,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CommitResponse) Reset() { *m = CommitResponse{} } func (m *CommitResponse) String() string { return proto.CompactTextString(m) } func (*CommitResponse) ProtoMessage() {} func (*CommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{27} } func (m *CommitResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *CommitResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_CommitResponse.Merge(dst, src) } func (m *CommitResponse) XXX_Size() int { return m.Size() } func (m *CommitResponse) XXX_DiscardUnknown() { xxx_messageInfo_CommitResponse.DiscardUnknown(m) } var xxx_messageInfo_CommitResponse proto.InternalMessageInfo func (m *CommitResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *CommitResponse) GetError() *KeyError { if m != nil { return m.Error } return nil } func (m *CommitResponse) GetCommitVersion() uint64 { if m != nil { return m.CommitVersion } return 0 } type ImportRequest struct { Mutations []*Mutation `protobuf:"bytes,1,rep,name=mutations" json:"mutations,omitempty"` CommitVersion uint64 `protobuf:"varint,2,opt,name=commit_version,json=commitVersion,proto3" json:"commit_version,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ImportRequest) Reset() { *m = ImportRequest{} } func (m *ImportRequest) String() string { return proto.CompactTextString(m) } func (*ImportRequest) ProtoMessage() {} func (*ImportRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{28} } func (m *ImportRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ImportRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ImportRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *ImportRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ImportRequest.Merge(dst, src) } func (m *ImportRequest) XXX_Size() int { return m.Size() } func (m *ImportRequest) XXX_DiscardUnknown() { xxx_messageInfo_ImportRequest.DiscardUnknown(m) } var xxx_messageInfo_ImportRequest proto.InternalMessageInfo func (m *ImportRequest) GetMutations() []*Mutation { if m != nil { return m.Mutations } return nil } func (m *ImportRequest) GetCommitVersion() uint64 { if m != nil { return m.CommitVersion } return 0 } type ImportResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ImportResponse) Reset() { *m = ImportResponse{} } func (m *ImportResponse) String() string { return proto.CompactTextString(m) } func (*ImportResponse) ProtoMessage() {} func (*ImportResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{29} } func (m *ImportResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ImportResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ImportResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *ImportResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ImportResponse.Merge(dst, src) } func (m *ImportResponse) XXX_Size() int { return m.Size() } func (m *ImportResponse) XXX_DiscardUnknown() { xxx_messageInfo_ImportResponse.DiscardUnknown(m) } var xxx_messageInfo_ImportResponse proto.InternalMessageInfo func (m *ImportResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *ImportResponse) GetError() string { if m != nil { return m.Error } return "" } type BatchRollbackRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` StartVersion uint64 `protobuf:"varint,2,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` Keys [][]byte `protobuf:"bytes,3,rep,name=keys" json:"keys,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *BatchRollbackRequest) Reset() { *m = BatchRollbackRequest{} } func (m *BatchRollbackRequest) String() string { return proto.CompactTextString(m) } func (*BatchRollbackRequest) ProtoMessage() {} func (*BatchRollbackRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{30} } func (m *BatchRollbackRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BatchRollbackRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_BatchRollbackRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *BatchRollbackRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_BatchRollbackRequest.Merge(dst, src) } func (m *BatchRollbackRequest) XXX_Size() int { return m.Size() } func (m *BatchRollbackRequest) XXX_DiscardUnknown() { xxx_messageInfo_BatchRollbackRequest.DiscardUnknown(m) } var xxx_messageInfo_BatchRollbackRequest proto.InternalMessageInfo func (m *BatchRollbackRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *BatchRollbackRequest) GetStartVersion() uint64 { if m != nil { return m.StartVersion } return 0 } func (m *BatchRollbackRequest) GetKeys() [][]byte { if m != nil { return m.Keys } return nil } type BatchRollbackResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *BatchRollbackResponse) Reset() { *m = BatchRollbackResponse{} } func (m *BatchRollbackResponse) String() string { return proto.CompactTextString(m) } func (*BatchRollbackResponse) ProtoMessage() {} func (*BatchRollbackResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{31} } func (m *BatchRollbackResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BatchRollbackResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_BatchRollbackResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *BatchRollbackResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_BatchRollbackResponse.Merge(dst, src) } func (m *BatchRollbackResponse) XXX_Size() int { return m.Size() } func (m *BatchRollbackResponse) XXX_DiscardUnknown() { xxx_messageInfo_BatchRollbackResponse.DiscardUnknown(m) } var xxx_messageInfo_BatchRollbackResponse proto.InternalMessageInfo func (m *BatchRollbackResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *BatchRollbackResponse) GetError() *KeyError { if m != nil { return m.Error } return nil } // CheckTxnStatusRequest checks the status of a transaction. // If the transaction is rollbacked/committed, return the result; // If the TTL of the transaction is exhausted, abort that transaction and return rollbacked; // Otherwise, returns the TTL information. // CheckTxnStatusRequest may also push forward the minCommitTS of a large transaction. type CheckTxnStatusRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` PrimaryKey []byte `protobuf:"bytes,2,opt,name=primary_key,json=primaryKey,proto3" json:"primary_key,omitempty"` LockTs uint64 `protobuf:"varint,3,opt,name=lock_ts,json=lockTs,proto3" json:"lock_ts,omitempty"` CallerStartTs uint64 `protobuf:"varint,4,opt,name=caller_start_ts,json=callerStartTs,proto3" json:"caller_start_ts,omitempty"` CurrentTs uint64 `protobuf:"varint,5,opt,name=current_ts,json=currentTs,proto3" json:"current_ts,omitempty"` RollbackIfNotExist bool `protobuf:"varint,6,opt,name=rollback_if_not_exist,json=rollbackIfNotExist,proto3" json:"rollback_if_not_exist,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CheckTxnStatusRequest) Reset() { *m = CheckTxnStatusRequest{} } func (m *CheckTxnStatusRequest) String() string { return proto.CompactTextString(m) } func (*CheckTxnStatusRequest) ProtoMessage() {} func (*CheckTxnStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{32} } func (m *CheckTxnStatusRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *CheckTxnStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_CheckTxnStatusRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *CheckTxnStatusRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_CheckTxnStatusRequest.Merge(dst, src) } func (m *CheckTxnStatusRequest) XXX_Size() int { return m.Size() } func (m *CheckTxnStatusRequest) XXX_DiscardUnknown() { xxx_messageInfo_CheckTxnStatusRequest.DiscardUnknown(m) } var xxx_messageInfo_CheckTxnStatusRequest proto.InternalMessageInfo func (m *CheckTxnStatusRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *CheckTxnStatusRequest) GetPrimaryKey() []byte { if m != nil { return m.PrimaryKey } return nil } func (m *CheckTxnStatusRequest) GetLockTs() uint64 { if m != nil { return m.LockTs } return 0 } func (m *CheckTxnStatusRequest) GetCallerStartTs() uint64 { if m != nil { return m.CallerStartTs } return 0 } func (m *CheckTxnStatusRequest) GetCurrentTs() uint64 { if m != nil { return m.CurrentTs } return 0 } func (m *CheckTxnStatusRequest) GetRollbackIfNotExist() bool { if m != nil { return m.RollbackIfNotExist } return false } type CheckTxnStatusResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` // Three kinds of txn status: // locked: lock_ttl > 0 // committed: commit_version > 0 // rollbacked: lock_ttl = 0 && commit_version = 0 LockTtl uint64 `protobuf:"varint,3,opt,name=lock_ttl,json=lockTtl,proto3" json:"lock_ttl,omitempty"` CommitVersion uint64 `protobuf:"varint,4,opt,name=commit_version,json=commitVersion,proto3" json:"commit_version,omitempty"` // The action performed by the CheckTxnStatus // If the action is rollback, set the rollback reason. // If the minCommitTS is pushed, set the push flag. Action Action `protobuf:"varint,5,opt,name=action,proto3,enum=kvrpcpb.Action" json:"action,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CheckTxnStatusResponse) Reset() { *m = CheckTxnStatusResponse{} } func (m *CheckTxnStatusResponse) String() string { return proto.CompactTextString(m) } func (*CheckTxnStatusResponse) ProtoMessage() {} func (*CheckTxnStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{33} } func (m *CheckTxnStatusResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *CheckTxnStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_CheckTxnStatusResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *CheckTxnStatusResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_CheckTxnStatusResponse.Merge(dst, src) } func (m *CheckTxnStatusResponse) XXX_Size() int { return m.Size() } func (m *CheckTxnStatusResponse) XXX_DiscardUnknown() { xxx_messageInfo_CheckTxnStatusResponse.DiscardUnknown(m) } var xxx_messageInfo_CheckTxnStatusResponse proto.InternalMessageInfo func (m *CheckTxnStatusResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *CheckTxnStatusResponse) GetError() *KeyError { if m != nil { return m.Error } return nil } func (m *CheckTxnStatusResponse) GetLockTtl() uint64 { if m != nil { return m.LockTtl } return 0 } func (m *CheckTxnStatusResponse) GetCommitVersion() uint64 { if m != nil { return m.CommitVersion } return 0 } func (m *CheckTxnStatusResponse) GetAction() Action { if m != nil { return m.Action } return Action_NoAction } type CleanupRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` StartVersion uint64 `protobuf:"varint,3,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` CurrentTs uint64 `protobuf:"varint,4,opt,name=current_ts,json=currentTs,proto3" json:"current_ts,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CleanupRequest) Reset() { *m = CleanupRequest{} } func (m *CleanupRequest) String() string { return proto.CompactTextString(m) } func (*CleanupRequest) ProtoMessage() {} func (*CleanupRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{34} } func (m *CleanupRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *CleanupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_CleanupRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *CleanupRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_CleanupRequest.Merge(dst, src) } func (m *CleanupRequest) XXX_Size() int { return m.Size() } func (m *CleanupRequest) XXX_DiscardUnknown() { xxx_messageInfo_CleanupRequest.DiscardUnknown(m) } var xxx_messageInfo_CleanupRequest proto.InternalMessageInfo func (m *CleanupRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *CleanupRequest) GetKey() []byte { if m != nil { return m.Key } return nil } func (m *CleanupRequest) GetStartVersion() uint64 { if m != nil { return m.StartVersion } return 0 } func (m *CleanupRequest) GetCurrentTs() uint64 { if m != nil { return m.CurrentTs } return 0 } type CleanupResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` CommitVersion uint64 `protobuf:"varint,3,opt,name=commit_version,json=commitVersion,proto3" json:"commit_version,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CleanupResponse) Reset() { *m = CleanupResponse{} } func (m *CleanupResponse) String() string { return proto.CompactTextString(m) } func (*CleanupResponse) ProtoMessage() {} func (*CleanupResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{35} } func (m *CleanupResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *CleanupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_CleanupResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *CleanupResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_CleanupResponse.Merge(dst, src) } func (m *CleanupResponse) XXX_Size() int { return m.Size() } func (m *CleanupResponse) XXX_DiscardUnknown() { xxx_messageInfo_CleanupResponse.DiscardUnknown(m) } var xxx_messageInfo_CleanupResponse proto.InternalMessageInfo func (m *CleanupResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *CleanupResponse) GetError() *KeyError { if m != nil { return m.Error } return nil } func (m *CleanupResponse) GetCommitVersion() uint64 { if m != nil { return m.CommitVersion } return 0 } type BatchGetRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` Keys [][]byte `protobuf:"bytes,2,rep,name=keys" json:"keys,omitempty"` Version uint64 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *BatchGetRequest) Reset() { *m = BatchGetRequest{} } func (m *BatchGetRequest) String() string { return proto.CompactTextString(m) } func (*BatchGetRequest) ProtoMessage() {} func (*BatchGetRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{36} } func (m *BatchGetRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BatchGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_BatchGetRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *BatchGetRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_BatchGetRequest.Merge(dst, src) } func (m *BatchGetRequest) XXX_Size() int { return m.Size() } func (m *BatchGetRequest) XXX_DiscardUnknown() { xxx_messageInfo_BatchGetRequest.DiscardUnknown(m) } var xxx_messageInfo_BatchGetRequest proto.InternalMessageInfo func (m *BatchGetRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *BatchGetRequest) GetKeys() [][]byte { if m != nil { return m.Keys } return nil } func (m *BatchGetRequest) GetVersion() uint64 { if m != nil { return m.Version } return 0 } type BatchGetResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Pairs []*KvPair `protobuf:"bytes,2,rep,name=pairs" json:"pairs,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *BatchGetResponse) Reset() { *m = BatchGetResponse{} } func (m *BatchGetResponse) String() string { return proto.CompactTextString(m) } func (*BatchGetResponse) ProtoMessage() {} func (*BatchGetResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{37} } func (m *BatchGetResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *BatchGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_BatchGetResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *BatchGetResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_BatchGetResponse.Merge(dst, src) } func (m *BatchGetResponse) XXX_Size() int { return m.Size() } func (m *BatchGetResponse) XXX_DiscardUnknown() { xxx_messageInfo_BatchGetResponse.DiscardUnknown(m) } var xxx_messageInfo_BatchGetResponse proto.InternalMessageInfo func (m *BatchGetResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *BatchGetResponse) GetPairs() []*KvPair { if m != nil { return m.Pairs } return nil } type ScanLockRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` MaxVersion uint64 `protobuf:"varint,2,opt,name=max_version,json=maxVersion,proto3" json:"max_version,omitempty"` StartKey []byte `protobuf:"bytes,3,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` Limit uint32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ScanLockRequest) Reset() { *m = ScanLockRequest{} } func (m *ScanLockRequest) String() string { return proto.CompactTextString(m) } func (*ScanLockRequest) ProtoMessage() {} func (*ScanLockRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{38} } func (m *ScanLockRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ScanLockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ScanLockRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *ScanLockRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ScanLockRequest.Merge(dst, src) } func (m *ScanLockRequest) XXX_Size() int { return m.Size() } func (m *ScanLockRequest) XXX_DiscardUnknown() { xxx_messageInfo_ScanLockRequest.DiscardUnknown(m) } var xxx_messageInfo_ScanLockRequest proto.InternalMessageInfo func (m *ScanLockRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *ScanLockRequest) GetMaxVersion() uint64 { if m != nil { return m.MaxVersion } return 0 } func (m *ScanLockRequest) GetStartKey() []byte { if m != nil { return m.StartKey } return nil } func (m *ScanLockRequest) GetLimit() uint32 { if m != nil { return m.Limit } return 0 } type ScanLockResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` Locks []*LockInfo `protobuf:"bytes,3,rep,name=locks" json:"locks,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ScanLockResponse) Reset() { *m = ScanLockResponse{} } func (m *ScanLockResponse) String() string { return proto.CompactTextString(m) } func (*ScanLockResponse) ProtoMessage() {} func (*ScanLockResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{39} } func (m *ScanLockResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ScanLockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ScanLockResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *ScanLockResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ScanLockResponse.Merge(dst, src) } func (m *ScanLockResponse) XXX_Size() int { return m.Size() } func (m *ScanLockResponse) XXX_DiscardUnknown() { xxx_messageInfo_ScanLockResponse.DiscardUnknown(m) } var xxx_messageInfo_ScanLockResponse proto.InternalMessageInfo func (m *ScanLockResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *ScanLockResponse) GetError() *KeyError { if m != nil { return m.Error } return nil } func (m *ScanLockResponse) GetLocks() []*LockInfo { if m != nil { return m.Locks } return nil } type TxnInfo struct { Txn uint64 `protobuf:"varint,1,opt,name=txn,proto3" json:"txn,omitempty"` Status uint64 `protobuf:"varint,2,opt,name=status,proto3" json:"status,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *TxnInfo) Reset() { *m = TxnInfo{} } func (m *TxnInfo) String() string { return proto.CompactTextString(m) } func (*TxnInfo) ProtoMessage() {} func (*TxnInfo) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{40} } func (m *TxnInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *TxnInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_TxnInfo.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *TxnInfo) XXX_Merge(src proto.Message) { xxx_messageInfo_TxnInfo.Merge(dst, src) } func (m *TxnInfo) XXX_Size() int { return m.Size() } func (m *TxnInfo) XXX_DiscardUnknown() { xxx_messageInfo_TxnInfo.DiscardUnknown(m) } var xxx_messageInfo_TxnInfo proto.InternalMessageInfo func (m *TxnInfo) GetTxn() uint64 { if m != nil { return m.Txn } return 0 } func (m *TxnInfo) GetStatus() uint64 { if m != nil { return m.Status } return 0 } type ResolveLockRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` StartVersion uint64 `protobuf:"varint,2,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` // If the txn is rolled back, do not set it. CommitVersion uint64 `protobuf:"varint,3,opt,name=commit_version,json=commitVersion,proto3" json:"commit_version,omitempty"` TxnInfos []*TxnInfo `protobuf:"bytes,4,rep,name=txn_infos,json=txnInfos" json:"txn_infos,omitempty"` // Only resolve specified keys. Keys [][]byte `protobuf:"bytes,5,rep,name=keys" json:"keys,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ResolveLockRequest) Reset() { *m = ResolveLockRequest{} } func (m *ResolveLockRequest) String() string { return proto.CompactTextString(m) } func (*ResolveLockRequest) ProtoMessage() {} func (*ResolveLockRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{41} } func (m *ResolveLockRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ResolveLockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ResolveLockRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *ResolveLockRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ResolveLockRequest.Merge(dst, src) } func (m *ResolveLockRequest) XXX_Size() int { return m.Size() } func (m *ResolveLockRequest) XXX_DiscardUnknown() { xxx_messageInfo_ResolveLockRequest.DiscardUnknown(m) } var xxx_messageInfo_ResolveLockRequest proto.InternalMessageInfo func (m *ResolveLockRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *ResolveLockRequest) GetStartVersion() uint64 { if m != nil { return m.StartVersion } return 0 } func (m *ResolveLockRequest) GetCommitVersion() uint64 { if m != nil { return m.CommitVersion } return 0 } func (m *ResolveLockRequest) GetTxnInfos() []*TxnInfo { if m != nil { return m.TxnInfos } return nil } func (m *ResolveLockRequest) GetKeys() [][]byte { if m != nil { return m.Keys } return nil } type ResolveLockResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ResolveLockResponse) Reset() { *m = ResolveLockResponse{} } func (m *ResolveLockResponse) String() string { return proto.CompactTextString(m) } func (*ResolveLockResponse) ProtoMessage() {} func (*ResolveLockResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{42} } func (m *ResolveLockResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ResolveLockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ResolveLockResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *ResolveLockResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ResolveLockResponse.Merge(dst, src) } func (m *ResolveLockResponse) XXX_Size() int { return m.Size() } func (m *ResolveLockResponse) XXX_DiscardUnknown() { xxx_messageInfo_ResolveLockResponse.DiscardUnknown(m) } var xxx_messageInfo_ResolveLockResponse proto.InternalMessageInfo func (m *ResolveLockResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *ResolveLockResponse) GetError() *KeyError { if m != nil { return m.Error } return nil } type GCRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` SafePoint uint64 `protobuf:"varint,2,opt,name=safe_point,json=safePoint,proto3" json:"safe_point,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GCRequest) Reset() { *m = GCRequest{} } func (m *GCRequest) String() string { return proto.CompactTextString(m) } func (*GCRequest) ProtoMessage() {} func (*GCRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{43} } func (m *GCRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GCRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_GCRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *GCRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GCRequest.Merge(dst, src) } func (m *GCRequest) XXX_Size() int { return m.Size() } func (m *GCRequest) XXX_DiscardUnknown() { xxx_messageInfo_GCRequest.DiscardUnknown(m) } var xxx_messageInfo_GCRequest proto.InternalMessageInfo func (m *GCRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *GCRequest) GetSafePoint() uint64 { if m != nil { return m.SafePoint } return 0 } type GCResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error *KeyError `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GCResponse) Reset() { *m = GCResponse{} } func (m *GCResponse) String() string { return proto.CompactTextString(m) } func (*GCResponse) ProtoMessage() {} func (*GCResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{44} } func (m *GCResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GCResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_GCResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *GCResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GCResponse.Merge(dst, src) } func (m *GCResponse) XXX_Size() int { return m.Size() } func (m *GCResponse) XXX_DiscardUnknown() { xxx_messageInfo_GCResponse.DiscardUnknown(m) } var xxx_messageInfo_GCResponse proto.InternalMessageInfo func (m *GCResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *GCResponse) GetError() *KeyError { if m != nil { return m.Error } return nil } type RawGetRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` Cf string `protobuf:"bytes,3,opt,name=cf,proto3" json:"cf,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RawGetRequest) Reset() { *m = RawGetRequest{} } func (m *RawGetRequest) String() string { return proto.CompactTextString(m) } func (*RawGetRequest) ProtoMessage() {} func (*RawGetRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{45} } func (m *RawGetRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RawGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RawGetRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RawGetRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RawGetRequest.Merge(dst, src) } func (m *RawGetRequest) XXX_Size() int { return m.Size() } func (m *RawGetRequest) XXX_DiscardUnknown() { xxx_messageInfo_RawGetRequest.DiscardUnknown(m) } var xxx_messageInfo_RawGetRequest proto.InternalMessageInfo func (m *RawGetRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *RawGetRequest) GetKey() []byte { if m != nil { return m.Key } return nil } func (m *RawGetRequest) GetCf() string { if m != nil { return m.Cf } return "" } type RawGetResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` NotFound bool `protobuf:"varint,4,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RawGetResponse) Reset() { *m = RawGetResponse{} } func (m *RawGetResponse) String() string { return proto.CompactTextString(m) } func (*RawGetResponse) ProtoMessage() {} func (*RawGetResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{46} } func (m *RawGetResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RawGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RawGetResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RawGetResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RawGetResponse.Merge(dst, src) } func (m *RawGetResponse) XXX_Size() int { return m.Size() } func (m *RawGetResponse) XXX_DiscardUnknown() { xxx_messageInfo_RawGetResponse.DiscardUnknown(m) } var xxx_messageInfo_RawGetResponse proto.InternalMessageInfo func (m *RawGetResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *RawGetResponse) GetError() string { if m != nil { return m.Error } return "" } func (m *RawGetResponse) GetValue() []byte { if m != nil { return m.Value } return nil } func (m *RawGetResponse) GetNotFound() bool { if m != nil { return m.NotFound } return false } type RawPutRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` Cf string `protobuf:"bytes,4,opt,name=cf,proto3" json:"cf,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RawPutRequest) Reset() { *m = RawPutRequest{} } func (m *RawPutRequest) String() string { return proto.CompactTextString(m) } func (*RawPutRequest) ProtoMessage() {} func (*RawPutRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{47} } func (m *RawPutRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RawPutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RawPutRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RawPutRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RawPutRequest.Merge(dst, src) } func (m *RawPutRequest) XXX_Size() int { return m.Size() } func (m *RawPutRequest) XXX_DiscardUnknown() { xxx_messageInfo_RawPutRequest.DiscardUnknown(m) } var xxx_messageInfo_RawPutRequest proto.InternalMessageInfo func (m *RawPutRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *RawPutRequest) GetKey() []byte { if m != nil { return m.Key } return nil } func (m *RawPutRequest) GetValue() []byte { if m != nil { return m.Value } return nil } func (m *RawPutRequest) GetCf() string { if m != nil { return m.Cf } return "" } type RawPutResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RawPutResponse) Reset() { *m = RawPutResponse{} } func (m *RawPutResponse) String() string { return proto.CompactTextString(m) } func (*RawPutResponse) ProtoMessage() {} func (*RawPutResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{48} } func (m *RawPutResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RawPutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RawPutResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RawPutResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RawPutResponse.Merge(dst, src) } func (m *RawPutResponse) XXX_Size() int { return m.Size() } func (m *RawPutResponse) XXX_DiscardUnknown() { xxx_messageInfo_RawPutResponse.DiscardUnknown(m) } var xxx_messageInfo_RawPutResponse proto.InternalMessageInfo func (m *RawPutResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *RawPutResponse) GetError() string { if m != nil { return m.Error } return "" } type RawBatchPutRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` Pairs []*KvPair `protobuf:"bytes,2,rep,name=pairs" json:"pairs,omitempty"` Cf string `protobuf:"bytes,3,opt,name=cf,proto3" json:"cf,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RawBatchPutRequest) Reset() { *m = RawBatchPutRequest{} } func (m *RawBatchPutRequest) String() string { return proto.CompactTextString(m) } func (*RawBatchPutRequest) ProtoMessage() {} func (*RawBatchPutRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{49} } func (m *RawBatchPutRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RawBatchPutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RawBatchPutRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RawBatchPutRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RawBatchPutRequest.Merge(dst, src) } func (m *RawBatchPutRequest) XXX_Size() int { return m.Size() } func (m *RawBatchPutRequest) XXX_DiscardUnknown() { xxx_messageInfo_RawBatchPutRequest.DiscardUnknown(m) } var xxx_messageInfo_RawBatchPutRequest proto.InternalMessageInfo func (m *RawBatchPutRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *RawBatchPutRequest) GetPairs() []*KvPair { if m != nil { return m.Pairs } return nil } func (m *RawBatchPutRequest) GetCf() string { if m != nil { return m.Cf } return "" } type RawBatchPutResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RawBatchPutResponse) Reset() { *m = RawBatchPutResponse{} } func (m *RawBatchPutResponse) String() string { return proto.CompactTextString(m) } func (*RawBatchPutResponse) ProtoMessage() {} func (*RawBatchPutResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{50} } func (m *RawBatchPutResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RawBatchPutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RawBatchPutResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RawBatchPutResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RawBatchPutResponse.Merge(dst, src) } func (m *RawBatchPutResponse) XXX_Size() int { return m.Size() } func (m *RawBatchPutResponse) XXX_DiscardUnknown() { xxx_messageInfo_RawBatchPutResponse.DiscardUnknown(m) } var xxx_messageInfo_RawBatchPutResponse proto.InternalMessageInfo func (m *RawBatchPutResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *RawBatchPutResponse) GetError() string { if m != nil { return m.Error } return "" } type RawBatchGetRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` Keys [][]byte `protobuf:"bytes,2,rep,name=keys" json:"keys,omitempty"` Cf string `protobuf:"bytes,3,opt,name=cf,proto3" json:"cf,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RawBatchGetRequest) Reset() { *m = RawBatchGetRequest{} } func (m *RawBatchGetRequest) String() string { return proto.CompactTextString(m) } func (*RawBatchGetRequest) ProtoMessage() {} func (*RawBatchGetRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{51} } func (m *RawBatchGetRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RawBatchGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RawBatchGetRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RawBatchGetRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RawBatchGetRequest.Merge(dst, src) } func (m *RawBatchGetRequest) XXX_Size() int { return m.Size() } func (m *RawBatchGetRequest) XXX_DiscardUnknown() { xxx_messageInfo_RawBatchGetRequest.DiscardUnknown(m) } var xxx_messageInfo_RawBatchGetRequest proto.InternalMessageInfo func (m *RawBatchGetRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *RawBatchGetRequest) GetKeys() [][]byte { if m != nil { return m.Keys } return nil } func (m *RawBatchGetRequest) GetCf() string { if m != nil { return m.Cf } return "" } type RawBatchGetResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Pairs []*KvPair `protobuf:"bytes,2,rep,name=pairs" json:"pairs,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RawBatchGetResponse) Reset() { *m = RawBatchGetResponse{} } func (m *RawBatchGetResponse) String() string { return proto.CompactTextString(m) } func (*RawBatchGetResponse) ProtoMessage() {} func (*RawBatchGetResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{52} } func (m *RawBatchGetResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RawBatchGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RawBatchGetResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RawBatchGetResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RawBatchGetResponse.Merge(dst, src) } func (m *RawBatchGetResponse) XXX_Size() int { return m.Size() } func (m *RawBatchGetResponse) XXX_DiscardUnknown() { xxx_messageInfo_RawBatchGetResponse.DiscardUnknown(m) } var xxx_messageInfo_RawBatchGetResponse proto.InternalMessageInfo func (m *RawBatchGetResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *RawBatchGetResponse) GetPairs() []*KvPair { if m != nil { return m.Pairs } return nil } type RawDeleteRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` Cf string `protobuf:"bytes,3,opt,name=cf,proto3" json:"cf,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RawDeleteRequest) Reset() { *m = RawDeleteRequest{} } func (m *RawDeleteRequest) String() string { return proto.CompactTextString(m) } func (*RawDeleteRequest) ProtoMessage() {} func (*RawDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{53} } func (m *RawDeleteRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RawDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RawDeleteRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RawDeleteRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RawDeleteRequest.Merge(dst, src) } func (m *RawDeleteRequest) XXX_Size() int { return m.Size() } func (m *RawDeleteRequest) XXX_DiscardUnknown() { xxx_messageInfo_RawDeleteRequest.DiscardUnknown(m) } var xxx_messageInfo_RawDeleteRequest proto.InternalMessageInfo func (m *RawDeleteRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *RawDeleteRequest) GetKey() []byte { if m != nil { return m.Key } return nil } func (m *RawDeleteRequest) GetCf() string { if m != nil { return m.Cf } return "" } type RawDeleteResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RawDeleteResponse) Reset() { *m = RawDeleteResponse{} } func (m *RawDeleteResponse) String() string { return proto.CompactTextString(m) } func (*RawDeleteResponse) ProtoMessage() {} func (*RawDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{54} } func (m *RawDeleteResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RawDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RawDeleteResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RawDeleteResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RawDeleteResponse.Merge(dst, src) } func (m *RawDeleteResponse) XXX_Size() int { return m.Size() } func (m *RawDeleteResponse) XXX_DiscardUnknown() { xxx_messageInfo_RawDeleteResponse.DiscardUnknown(m) } var xxx_messageInfo_RawDeleteResponse proto.InternalMessageInfo func (m *RawDeleteResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *RawDeleteResponse) GetError() string { if m != nil { return m.Error } return "" } type RawBatchDeleteRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` Keys [][]byte `protobuf:"bytes,2,rep,name=keys" json:"keys,omitempty"` Cf string `protobuf:"bytes,3,opt,name=cf,proto3" json:"cf,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RawBatchDeleteRequest) Reset() { *m = RawBatchDeleteRequest{} } func (m *RawBatchDeleteRequest) String() string { return proto.CompactTextString(m) } func (*RawBatchDeleteRequest) ProtoMessage() {} func (*RawBatchDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{55} } func (m *RawBatchDeleteRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RawBatchDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RawBatchDeleteRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RawBatchDeleteRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RawBatchDeleteRequest.Merge(dst, src) } func (m *RawBatchDeleteRequest) XXX_Size() int { return m.Size() } func (m *RawBatchDeleteRequest) XXX_DiscardUnknown() { xxx_messageInfo_RawBatchDeleteRequest.DiscardUnknown(m) } var xxx_messageInfo_RawBatchDeleteRequest proto.InternalMessageInfo func (m *RawBatchDeleteRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *RawBatchDeleteRequest) GetKeys() [][]byte { if m != nil { return m.Keys } return nil } func (m *RawBatchDeleteRequest) GetCf() string { if m != nil { return m.Cf } return "" } type RawBatchDeleteResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RawBatchDeleteResponse) Reset() { *m = RawBatchDeleteResponse{} } func (m *RawBatchDeleteResponse) String() string { return proto.CompactTextString(m) } func (*RawBatchDeleteResponse) ProtoMessage() {} func (*RawBatchDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{56} } func (m *RawBatchDeleteResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RawBatchDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RawBatchDeleteResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RawBatchDeleteResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RawBatchDeleteResponse.Merge(dst, src) } func (m *RawBatchDeleteResponse) XXX_Size() int { return m.Size() } func (m *RawBatchDeleteResponse) XXX_DiscardUnknown() { xxx_messageInfo_RawBatchDeleteResponse.DiscardUnknown(m) } var xxx_messageInfo_RawBatchDeleteResponse proto.InternalMessageInfo func (m *RawBatchDeleteResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *RawBatchDeleteResponse) GetError() string { if m != nil { return m.Error } return "" } type DeleteRangeRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` EndKey []byte `protobuf:"bytes,3,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` NotifyOnly bool `protobuf:"varint,4,opt,name=notify_only,json=notifyOnly,proto3" json:"notify_only,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} } func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) } func (*DeleteRangeRequest) ProtoMessage() {} func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{57} } func (m *DeleteRangeRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *DeleteRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_DeleteRangeRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *DeleteRangeRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_DeleteRangeRequest.Merge(dst, src) } func (m *DeleteRangeRequest) XXX_Size() int { return m.Size() } func (m *DeleteRangeRequest) XXX_DiscardUnknown() { xxx_messageInfo_DeleteRangeRequest.DiscardUnknown(m) } var xxx_messageInfo_DeleteRangeRequest proto.InternalMessageInfo func (m *DeleteRangeRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *DeleteRangeRequest) GetStartKey() []byte { if m != nil { return m.StartKey } return nil } func (m *DeleteRangeRequest) GetEndKey() []byte { if m != nil { return m.EndKey } return nil } func (m *DeleteRangeRequest) GetNotifyOnly() bool { if m != nil { return m.NotifyOnly } return false } type DeleteRangeResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} } func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) } func (*DeleteRangeResponse) ProtoMessage() {} func (*DeleteRangeResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{58} } func (m *DeleteRangeResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *DeleteRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_DeleteRangeResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *DeleteRangeResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_DeleteRangeResponse.Merge(dst, src) } func (m *DeleteRangeResponse) XXX_Size() int { return m.Size() } func (m *DeleteRangeResponse) XXX_DiscardUnknown() { xxx_messageInfo_DeleteRangeResponse.DiscardUnknown(m) } var xxx_messageInfo_DeleteRangeResponse proto.InternalMessageInfo func (m *DeleteRangeResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *DeleteRangeResponse) GetError() string { if m != nil { return m.Error } return "" } type RawDeleteRangeRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` EndKey []byte `protobuf:"bytes,3,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` Cf string `protobuf:"bytes,4,opt,name=cf,proto3" json:"cf,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RawDeleteRangeRequest) Reset() { *m = RawDeleteRangeRequest{} } func (m *RawDeleteRangeRequest) String() string { return proto.CompactTextString(m) } func (*RawDeleteRangeRequest) ProtoMessage() {} func (*RawDeleteRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{59} } func (m *RawDeleteRangeRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RawDeleteRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RawDeleteRangeRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RawDeleteRangeRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RawDeleteRangeRequest.Merge(dst, src) } func (m *RawDeleteRangeRequest) XXX_Size() int { return m.Size() } func (m *RawDeleteRangeRequest) XXX_DiscardUnknown() { xxx_messageInfo_RawDeleteRangeRequest.DiscardUnknown(m) } var xxx_messageInfo_RawDeleteRangeRequest proto.InternalMessageInfo func (m *RawDeleteRangeRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *RawDeleteRangeRequest) GetStartKey() []byte { if m != nil { return m.StartKey } return nil } func (m *RawDeleteRangeRequest) GetEndKey() []byte { if m != nil { return m.EndKey } return nil } func (m *RawDeleteRangeRequest) GetCf() string { if m != nil { return m.Cf } return "" } type RawDeleteRangeResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RawDeleteRangeResponse) Reset() { *m = RawDeleteRangeResponse{} } func (m *RawDeleteRangeResponse) String() string { return proto.CompactTextString(m) } func (*RawDeleteRangeResponse) ProtoMessage() {} func (*RawDeleteRangeResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{60} } func (m *RawDeleteRangeResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RawDeleteRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RawDeleteRangeResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RawDeleteRangeResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RawDeleteRangeResponse.Merge(dst, src) } func (m *RawDeleteRangeResponse) XXX_Size() int { return m.Size() } func (m *RawDeleteRangeResponse) XXX_DiscardUnknown() { xxx_messageInfo_RawDeleteRangeResponse.DiscardUnknown(m) } var xxx_messageInfo_RawDeleteRangeResponse proto.InternalMessageInfo func (m *RawDeleteRangeResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *RawDeleteRangeResponse) GetError() string { if m != nil { return m.Error } return "" } type RawScanRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` KeyOnly bool `protobuf:"varint,4,opt,name=key_only,json=keyOnly,proto3" json:"key_only,omitempty"` Cf string `protobuf:"bytes,5,opt,name=cf,proto3" json:"cf,omitempty"` Reverse bool `protobuf:"varint,6,opt,name=reverse,proto3" json:"reverse,omitempty"` // For compatibility, when scanning forward, the range to scan is [start_key, end_key), where start_key < end_key; // and when scanning backward, it scans [end_key, start_key) in descending order, where end_key < start_key. EndKey []byte `protobuf:"bytes,7,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RawScanRequest) Reset() { *m = RawScanRequest{} } func (m *RawScanRequest) String() string { return proto.CompactTextString(m) } func (*RawScanRequest) ProtoMessage() {} func (*RawScanRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{61} } func (m *RawScanRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RawScanRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RawScanRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RawScanRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RawScanRequest.Merge(dst, src) } func (m *RawScanRequest) XXX_Size() int { return m.Size() } func (m *RawScanRequest) XXX_DiscardUnknown() { xxx_messageInfo_RawScanRequest.DiscardUnknown(m) } var xxx_messageInfo_RawScanRequest proto.InternalMessageInfo func (m *RawScanRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *RawScanRequest) GetStartKey() []byte { if m != nil { return m.StartKey } return nil } func (m *RawScanRequest) GetLimit() uint32 { if m != nil { return m.Limit } return 0 } func (m *RawScanRequest) GetKeyOnly() bool { if m != nil { return m.KeyOnly } return false } func (m *RawScanRequest) GetCf() string { if m != nil { return m.Cf } return "" } func (m *RawScanRequest) GetReverse() bool { if m != nil { return m.Reverse } return false } func (m *RawScanRequest) GetEndKey() []byte { if m != nil { return m.EndKey } return nil } type RawScanResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Kvs []*KvPair `protobuf:"bytes,2,rep,name=kvs" json:"kvs,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RawScanResponse) Reset() { *m = RawScanResponse{} } func (m *RawScanResponse) String() string { return proto.CompactTextString(m) } func (*RawScanResponse) ProtoMessage() {} func (*RawScanResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{62} } func (m *RawScanResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RawScanResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RawScanResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RawScanResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RawScanResponse.Merge(dst, src) } func (m *RawScanResponse) XXX_Size() int { return m.Size() } func (m *RawScanResponse) XXX_DiscardUnknown() { xxx_messageInfo_RawScanResponse.DiscardUnknown(m) } var xxx_messageInfo_RawScanResponse proto.InternalMessageInfo func (m *RawScanResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *RawScanResponse) GetKvs() []*KvPair { if m != nil { return m.Kvs } return nil } type KeyRange struct { StartKey []byte `protobuf:"bytes,1,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` EndKey []byte `protobuf:"bytes,2,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *KeyRange) Reset() { *m = KeyRange{} } func (m *KeyRange) String() string { return proto.CompactTextString(m) } func (*KeyRange) ProtoMessage() {} func (*KeyRange) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{63} } func (m *KeyRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *KeyRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_KeyRange.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *KeyRange) XXX_Merge(src proto.Message) { xxx_messageInfo_KeyRange.Merge(dst, src) } func (m *KeyRange) XXX_Size() int { return m.Size() } func (m *KeyRange) XXX_DiscardUnknown() { xxx_messageInfo_KeyRange.DiscardUnknown(m) } var xxx_messageInfo_KeyRange proto.InternalMessageInfo func (m *KeyRange) GetStartKey() []byte { if m != nil { return m.StartKey } return nil } func (m *KeyRange) GetEndKey() []byte { if m != nil { return m.EndKey } return nil } type RawBatchScanRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` Ranges []*KeyRange `protobuf:"bytes,2,rep,name=ranges" json:"ranges,omitempty"` EachLimit uint32 `protobuf:"varint,3,opt,name=each_limit,json=eachLimit,proto3" json:"each_limit,omitempty"` KeyOnly bool `protobuf:"varint,4,opt,name=key_only,json=keyOnly,proto3" json:"key_only,omitempty"` Cf string `protobuf:"bytes,5,opt,name=cf,proto3" json:"cf,omitempty"` Reverse bool `protobuf:"varint,6,opt,name=reverse,proto3" json:"reverse,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RawBatchScanRequest) Reset() { *m = RawBatchScanRequest{} } func (m *RawBatchScanRequest) String() string { return proto.CompactTextString(m) } func (*RawBatchScanRequest) ProtoMessage() {} func (*RawBatchScanRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{64} } func (m *RawBatchScanRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RawBatchScanRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RawBatchScanRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RawBatchScanRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RawBatchScanRequest.Merge(dst, src) } func (m *RawBatchScanRequest) XXX_Size() int { return m.Size() } func (m *RawBatchScanRequest) XXX_DiscardUnknown() { xxx_messageInfo_RawBatchScanRequest.DiscardUnknown(m) } var xxx_messageInfo_RawBatchScanRequest proto.InternalMessageInfo func (m *RawBatchScanRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *RawBatchScanRequest) GetRanges() []*KeyRange { if m != nil { return m.Ranges } return nil } func (m *RawBatchScanRequest) GetEachLimit() uint32 { if m != nil { return m.EachLimit } return 0 } func (m *RawBatchScanRequest) GetKeyOnly() bool { if m != nil { return m.KeyOnly } return false } func (m *RawBatchScanRequest) GetCf() string { if m != nil { return m.Cf } return "" } func (m *RawBatchScanRequest) GetReverse() bool { if m != nil { return m.Reverse } return false } type RawBatchScanResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Kvs []*KvPair `protobuf:"bytes,2,rep,name=kvs" json:"kvs,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RawBatchScanResponse) Reset() { *m = RawBatchScanResponse{} } func (m *RawBatchScanResponse) String() string { return proto.CompactTextString(m) } func (*RawBatchScanResponse) ProtoMessage() {} func (*RawBatchScanResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{65} } func (m *RawBatchScanResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RawBatchScanResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RawBatchScanResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RawBatchScanResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RawBatchScanResponse.Merge(dst, src) } func (m *RawBatchScanResponse) XXX_Size() int { return m.Size() } func (m *RawBatchScanResponse) XXX_DiscardUnknown() { xxx_messageInfo_RawBatchScanResponse.DiscardUnknown(m) } var xxx_messageInfo_RawBatchScanResponse proto.InternalMessageInfo func (m *RawBatchScanResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *RawBatchScanResponse) GetKvs() []*KvPair { if m != nil { return m.Kvs } return nil } type MvccWrite struct { Type Op `protobuf:"varint,1,opt,name=type,proto3,enum=kvrpcpb.Op" json:"type,omitempty"` StartTs uint64 `protobuf:"varint,2,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"` CommitTs uint64 `protobuf:"varint,3,opt,name=commit_ts,json=commitTs,proto3" json:"commit_ts,omitempty"` ShortValue []byte `protobuf:"bytes,4,opt,name=short_value,json=shortValue,proto3" json:"short_value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *MvccWrite) Reset() { *m = MvccWrite{} } func (m *MvccWrite) String() string { return proto.CompactTextString(m) } func (*MvccWrite) ProtoMessage() {} func (*MvccWrite) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{66} } func (m *MvccWrite) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MvccWrite) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MvccWrite.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *MvccWrite) XXX_Merge(src proto.Message) { xxx_messageInfo_MvccWrite.Merge(dst, src) } func (m *MvccWrite) XXX_Size() int { return m.Size() } func (m *MvccWrite) XXX_DiscardUnknown() { xxx_messageInfo_MvccWrite.DiscardUnknown(m) } var xxx_messageInfo_MvccWrite proto.InternalMessageInfo func (m *MvccWrite) GetType() Op { if m != nil { return m.Type } return Op_Put } func (m *MvccWrite) GetStartTs() uint64 { if m != nil { return m.StartTs } return 0 } func (m *MvccWrite) GetCommitTs() uint64 { if m != nil { return m.CommitTs } return 0 } func (m *MvccWrite) GetShortValue() []byte { if m != nil { return m.ShortValue } return nil } type MvccValue struct { StartTs uint64 `protobuf:"varint,1,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"` Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *MvccValue) Reset() { *m = MvccValue{} } func (m *MvccValue) String() string { return proto.CompactTextString(m) } func (*MvccValue) ProtoMessage() {} func (*MvccValue) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{67} } func (m *MvccValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MvccValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MvccValue.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *MvccValue) XXX_Merge(src proto.Message) { xxx_messageInfo_MvccValue.Merge(dst, src) } func (m *MvccValue) XXX_Size() int { return m.Size() } func (m *MvccValue) XXX_DiscardUnknown() { xxx_messageInfo_MvccValue.DiscardUnknown(m) } var xxx_messageInfo_MvccValue proto.InternalMessageInfo func (m *MvccValue) GetStartTs() uint64 { if m != nil { return m.StartTs } return 0 } func (m *MvccValue) GetValue() []byte { if m != nil { return m.Value } return nil } type MvccLock struct { Type Op `protobuf:"varint,1,opt,name=type,proto3,enum=kvrpcpb.Op" json:"type,omitempty"` StartTs uint64 `protobuf:"varint,2,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"` Primary []byte `protobuf:"bytes,3,opt,name=primary,proto3" json:"primary,omitempty"` ShortValue []byte `protobuf:"bytes,4,opt,name=short_value,json=shortValue,proto3" json:"short_value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *MvccLock) Reset() { *m = MvccLock{} } func (m *MvccLock) String() string { return proto.CompactTextString(m) } func (*MvccLock) ProtoMessage() {} func (*MvccLock) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{68} } func (m *MvccLock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MvccLock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MvccLock.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *MvccLock) XXX_Merge(src proto.Message) { xxx_messageInfo_MvccLock.Merge(dst, src) } func (m *MvccLock) XXX_Size() int { return m.Size() } func (m *MvccLock) XXX_DiscardUnknown() { xxx_messageInfo_MvccLock.DiscardUnknown(m) } var xxx_messageInfo_MvccLock proto.InternalMessageInfo func (m *MvccLock) GetType() Op { if m != nil { return m.Type } return Op_Put } func (m *MvccLock) GetStartTs() uint64 { if m != nil { return m.StartTs } return 0 } func (m *MvccLock) GetPrimary() []byte { if m != nil { return m.Primary } return nil } func (m *MvccLock) GetShortValue() []byte { if m != nil { return m.ShortValue } return nil } type MvccInfo struct { Lock *MvccLock `protobuf:"bytes,1,opt,name=lock" json:"lock,omitempty"` Writes []*MvccWrite `protobuf:"bytes,2,rep,name=writes" json:"writes,omitempty"` Values []*MvccValue `protobuf:"bytes,3,rep,name=values" json:"values,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *MvccInfo) Reset() { *m = MvccInfo{} } func (m *MvccInfo) String() string { return proto.CompactTextString(m) } func (*MvccInfo) ProtoMessage() {} func (*MvccInfo) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{69} } func (m *MvccInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MvccInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MvccInfo.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *MvccInfo) XXX_Merge(src proto.Message) { xxx_messageInfo_MvccInfo.Merge(dst, src) } func (m *MvccInfo) XXX_Size() int { return m.Size() } func (m *MvccInfo) XXX_DiscardUnknown() { xxx_messageInfo_MvccInfo.DiscardUnknown(m) } var xxx_messageInfo_MvccInfo proto.InternalMessageInfo func (m *MvccInfo) GetLock() *MvccLock { if m != nil { return m.Lock } return nil } func (m *MvccInfo) GetWrites() []*MvccWrite { if m != nil { return m.Writes } return nil } func (m *MvccInfo) GetValues() []*MvccValue { if m != nil { return m.Values } return nil } type MvccGetByKeyRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *MvccGetByKeyRequest) Reset() { *m = MvccGetByKeyRequest{} } func (m *MvccGetByKeyRequest) String() string { return proto.CompactTextString(m) } func (*MvccGetByKeyRequest) ProtoMessage() {} func (*MvccGetByKeyRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{70} } func (m *MvccGetByKeyRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MvccGetByKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MvccGetByKeyRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *MvccGetByKeyRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_MvccGetByKeyRequest.Merge(dst, src) } func (m *MvccGetByKeyRequest) XXX_Size() int { return m.Size() } func (m *MvccGetByKeyRequest) XXX_DiscardUnknown() { xxx_messageInfo_MvccGetByKeyRequest.DiscardUnknown(m) } var xxx_messageInfo_MvccGetByKeyRequest proto.InternalMessageInfo func (m *MvccGetByKeyRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *MvccGetByKeyRequest) GetKey() []byte { if m != nil { return m.Key } return nil } type MvccGetByKeyResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` Info *MvccInfo `protobuf:"bytes,3,opt,name=info" json:"info,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *MvccGetByKeyResponse) Reset() { *m = MvccGetByKeyResponse{} } func (m *MvccGetByKeyResponse) String() string { return proto.CompactTextString(m) } func (*MvccGetByKeyResponse) ProtoMessage() {} func (*MvccGetByKeyResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{71} } func (m *MvccGetByKeyResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MvccGetByKeyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MvccGetByKeyResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *MvccGetByKeyResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_MvccGetByKeyResponse.Merge(dst, src) } func (m *MvccGetByKeyResponse) XXX_Size() int { return m.Size() } func (m *MvccGetByKeyResponse) XXX_DiscardUnknown() { xxx_messageInfo_MvccGetByKeyResponse.DiscardUnknown(m) } var xxx_messageInfo_MvccGetByKeyResponse proto.InternalMessageInfo func (m *MvccGetByKeyResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *MvccGetByKeyResponse) GetError() string { if m != nil { return m.Error } return "" } func (m *MvccGetByKeyResponse) GetInfo() *MvccInfo { if m != nil { return m.Info } return nil } type MvccGetByStartTsRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` StartTs uint64 `protobuf:"varint,2,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *MvccGetByStartTsRequest) Reset() { *m = MvccGetByStartTsRequest{} } func (m *MvccGetByStartTsRequest) String() string { return proto.CompactTextString(m) } func (*MvccGetByStartTsRequest) ProtoMessage() {} func (*MvccGetByStartTsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{72} } func (m *MvccGetByStartTsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MvccGetByStartTsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MvccGetByStartTsRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *MvccGetByStartTsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_MvccGetByStartTsRequest.Merge(dst, src) } func (m *MvccGetByStartTsRequest) XXX_Size() int { return m.Size() } func (m *MvccGetByStartTsRequest) XXX_DiscardUnknown() { xxx_messageInfo_MvccGetByStartTsRequest.DiscardUnknown(m) } var xxx_messageInfo_MvccGetByStartTsRequest proto.InternalMessageInfo func (m *MvccGetByStartTsRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *MvccGetByStartTsRequest) GetStartTs() uint64 { if m != nil { return m.StartTs } return 0 } type MvccGetByStartTsResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` Info *MvccInfo `protobuf:"bytes,4,opt,name=info" json:"info,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *MvccGetByStartTsResponse) Reset() { *m = MvccGetByStartTsResponse{} } func (m *MvccGetByStartTsResponse) String() string { return proto.CompactTextString(m) } func (*MvccGetByStartTsResponse) ProtoMessage() {} func (*MvccGetByStartTsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{73} } func (m *MvccGetByStartTsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MvccGetByStartTsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MvccGetByStartTsResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *MvccGetByStartTsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_MvccGetByStartTsResponse.Merge(dst, src) } func (m *MvccGetByStartTsResponse) XXX_Size() int { return m.Size() } func (m *MvccGetByStartTsResponse) XXX_DiscardUnknown() { xxx_messageInfo_MvccGetByStartTsResponse.DiscardUnknown(m) } var xxx_messageInfo_MvccGetByStartTsResponse proto.InternalMessageInfo func (m *MvccGetByStartTsResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *MvccGetByStartTsResponse) GetError() string { if m != nil { return m.Error } return "" } func (m *MvccGetByStartTsResponse) GetKey() []byte { if m != nil { return m.Key } return nil } func (m *MvccGetByStartTsResponse) GetInfo() *MvccInfo { if m != nil { return m.Info } return nil } type SplitRegionRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` SplitKey []byte `protobuf:"bytes,2,opt,name=split_key,json=splitKey,proto3" json:"split_key,omitempty"` // Deprecated: Do not use. SplitKeys [][]byte `protobuf:"bytes,3,rep,name=split_keys,json=splitKeys" json:"split_keys,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SplitRegionRequest) Reset() { *m = SplitRegionRequest{} } func (m *SplitRegionRequest) String() string { return proto.CompactTextString(m) } func (*SplitRegionRequest) ProtoMessage() {} func (*SplitRegionRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{74} } func (m *SplitRegionRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *SplitRegionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_SplitRegionRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *SplitRegionRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_SplitRegionRequest.Merge(dst, src) } func (m *SplitRegionRequest) XXX_Size() int { return m.Size() } func (m *SplitRegionRequest) XXX_DiscardUnknown() { xxx_messageInfo_SplitRegionRequest.DiscardUnknown(m) } var xxx_messageInfo_SplitRegionRequest proto.InternalMessageInfo func (m *SplitRegionRequest) GetContext() *Context { if m != nil { return m.Context } return nil } // Deprecated: Do not use. func (m *SplitRegionRequest) GetSplitKey() []byte { if m != nil { return m.SplitKey } return nil } func (m *SplitRegionRequest) GetSplitKeys() [][]byte { if m != nil { return m.SplitKeys } return nil } type SplitRegionResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Left *metapb.Region `protobuf:"bytes,2,opt,name=left" json:"left,omitempty"` // Deprecated: Do not use. Right *metapb.Region `protobuf:"bytes,3,opt,name=right" json:"right,omitempty"` // Deprecated: Do not use. Regions []*metapb.Region `protobuf:"bytes,4,rep,name=regions" json:"regions,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SplitRegionResponse) Reset() { *m = SplitRegionResponse{} } func (m *SplitRegionResponse) String() string { return proto.CompactTextString(m) } func (*SplitRegionResponse) ProtoMessage() {} func (*SplitRegionResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{75} } func (m *SplitRegionResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *SplitRegionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_SplitRegionResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *SplitRegionResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_SplitRegionResponse.Merge(dst, src) } func (m *SplitRegionResponse) XXX_Size() int { return m.Size() } func (m *SplitRegionResponse) XXX_DiscardUnknown() { xxx_messageInfo_SplitRegionResponse.DiscardUnknown(m) } var xxx_messageInfo_SplitRegionResponse proto.InternalMessageInfo func (m *SplitRegionResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } // Deprecated: Do not use. func (m *SplitRegionResponse) GetLeft() *metapb.Region { if m != nil { return m.Left } return nil } // Deprecated: Do not use. func (m *SplitRegionResponse) GetRight() *metapb.Region { if m != nil { return m.Right } return nil } func (m *SplitRegionResponse) GetRegions() []*metapb.Region { if m != nil { return m.Regions } return nil } type UnsafeDestroyRangeRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` StartKey []byte `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` EndKey []byte `protobuf:"bytes,3,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *UnsafeDestroyRangeRequest) Reset() { *m = UnsafeDestroyRangeRequest{} } func (m *UnsafeDestroyRangeRequest) String() string { return proto.CompactTextString(m) } func (*UnsafeDestroyRangeRequest) ProtoMessage() {} func (*UnsafeDestroyRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{76} } func (m *UnsafeDestroyRangeRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *UnsafeDestroyRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_UnsafeDestroyRangeRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *UnsafeDestroyRangeRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_UnsafeDestroyRangeRequest.Merge(dst, src) } func (m *UnsafeDestroyRangeRequest) XXX_Size() int { return m.Size() } func (m *UnsafeDestroyRangeRequest) XXX_DiscardUnknown() { xxx_messageInfo_UnsafeDestroyRangeRequest.DiscardUnknown(m) } var xxx_messageInfo_UnsafeDestroyRangeRequest proto.InternalMessageInfo func (m *UnsafeDestroyRangeRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *UnsafeDestroyRangeRequest) GetStartKey() []byte { if m != nil { return m.StartKey } return nil } func (m *UnsafeDestroyRangeRequest) GetEndKey() []byte { if m != nil { return m.EndKey } return nil } type UnsafeDestroyRangeResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *UnsafeDestroyRangeResponse) Reset() { *m = UnsafeDestroyRangeResponse{} } func (m *UnsafeDestroyRangeResponse) String() string { return proto.CompactTextString(m) } func (*UnsafeDestroyRangeResponse) ProtoMessage() {} func (*UnsafeDestroyRangeResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{77} } func (m *UnsafeDestroyRangeResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *UnsafeDestroyRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_UnsafeDestroyRangeResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *UnsafeDestroyRangeResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_UnsafeDestroyRangeResponse.Merge(dst, src) } func (m *UnsafeDestroyRangeResponse) XXX_Size() int { return m.Size() } func (m *UnsafeDestroyRangeResponse) XXX_DiscardUnknown() { xxx_messageInfo_UnsafeDestroyRangeResponse.DiscardUnknown(m) } var xxx_messageInfo_UnsafeDestroyRangeResponse proto.InternalMessageInfo func (m *UnsafeDestroyRangeResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *UnsafeDestroyRangeResponse) GetError() string { if m != nil { return m.Error } return "" } type RegisterLockObserverRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` MaxTs uint64 `protobuf:"varint,2,opt,name=max_ts,json=maxTs,proto3" json:"max_ts,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RegisterLockObserverRequest) Reset() { *m = RegisterLockObserverRequest{} } func (m *RegisterLockObserverRequest) String() string { return proto.CompactTextString(m) } func (*RegisterLockObserverRequest) ProtoMessage() {} func (*RegisterLockObserverRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{78} } func (m *RegisterLockObserverRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RegisterLockObserverRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RegisterLockObserverRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RegisterLockObserverRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RegisterLockObserverRequest.Merge(dst, src) } func (m *RegisterLockObserverRequest) XXX_Size() int { return m.Size() } func (m *RegisterLockObserverRequest) XXX_DiscardUnknown() { xxx_messageInfo_RegisterLockObserverRequest.DiscardUnknown(m) } var xxx_messageInfo_RegisterLockObserverRequest proto.InternalMessageInfo func (m *RegisterLockObserverRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *RegisterLockObserverRequest) GetMaxTs() uint64 { if m != nil { return m.MaxTs } return 0 } type RegisterLockObserverResponse struct { Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RegisterLockObserverResponse) Reset() { *m = RegisterLockObserverResponse{} } func (m *RegisterLockObserverResponse) String() string { return proto.CompactTextString(m) } func (*RegisterLockObserverResponse) ProtoMessage() {} func (*RegisterLockObserverResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{79} } func (m *RegisterLockObserverResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RegisterLockObserverResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RegisterLockObserverResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RegisterLockObserverResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RegisterLockObserverResponse.Merge(dst, src) } func (m *RegisterLockObserverResponse) XXX_Size() int { return m.Size() } func (m *RegisterLockObserverResponse) XXX_DiscardUnknown() { xxx_messageInfo_RegisterLockObserverResponse.DiscardUnknown(m) } var xxx_messageInfo_RegisterLockObserverResponse proto.InternalMessageInfo func (m *RegisterLockObserverResponse) GetError() string { if m != nil { return m.Error } return "" } type CheckLockObserverRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` MaxTs uint64 `protobuf:"varint,2,opt,name=max_ts,json=maxTs,proto3" json:"max_ts,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CheckLockObserverRequest) Reset() { *m = CheckLockObserverRequest{} } func (m *CheckLockObserverRequest) String() string { return proto.CompactTextString(m) } func (*CheckLockObserverRequest) ProtoMessage() {} func (*CheckLockObserverRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{80} } func (m *CheckLockObserverRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *CheckLockObserverRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_CheckLockObserverRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *CheckLockObserverRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_CheckLockObserverRequest.Merge(dst, src) } func (m *CheckLockObserverRequest) XXX_Size() int { return m.Size() } func (m *CheckLockObserverRequest) XXX_DiscardUnknown() { xxx_messageInfo_CheckLockObserverRequest.DiscardUnknown(m) } var xxx_messageInfo_CheckLockObserverRequest proto.InternalMessageInfo func (m *CheckLockObserverRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *CheckLockObserverRequest) GetMaxTs() uint64 { if m != nil { return m.MaxTs } return 0 } type CheckLockObserverResponse struct { Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` IsClean bool `protobuf:"varint,2,opt,name=is_clean,json=isClean,proto3" json:"is_clean,omitempty"` Locks []*LockInfo `protobuf:"bytes,3,rep,name=locks" json:"locks,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CheckLockObserverResponse) Reset() { *m = CheckLockObserverResponse{} } func (m *CheckLockObserverResponse) String() string { return proto.CompactTextString(m) } func (*CheckLockObserverResponse) ProtoMessage() {} func (*CheckLockObserverResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{81} } func (m *CheckLockObserverResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *CheckLockObserverResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_CheckLockObserverResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *CheckLockObserverResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_CheckLockObserverResponse.Merge(dst, src) } func (m *CheckLockObserverResponse) XXX_Size() int { return m.Size() } func (m *CheckLockObserverResponse) XXX_DiscardUnknown() { xxx_messageInfo_CheckLockObserverResponse.DiscardUnknown(m) } var xxx_messageInfo_CheckLockObserverResponse proto.InternalMessageInfo func (m *CheckLockObserverResponse) GetError() string { if m != nil { return m.Error } return "" } func (m *CheckLockObserverResponse) GetIsClean() bool { if m != nil { return m.IsClean } return false } func (m *CheckLockObserverResponse) GetLocks() []*LockInfo { if m != nil { return m.Locks } return nil } type RemoveLockObserverRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` MaxTs uint64 `protobuf:"varint,2,opt,name=max_ts,json=maxTs,proto3" json:"max_ts,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RemoveLockObserverRequest) Reset() { *m = RemoveLockObserverRequest{} } func (m *RemoveLockObserverRequest) String() string { return proto.CompactTextString(m) } func (*RemoveLockObserverRequest) ProtoMessage() {} func (*RemoveLockObserverRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{82} } func (m *RemoveLockObserverRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RemoveLockObserverRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RemoveLockObserverRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RemoveLockObserverRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RemoveLockObserverRequest.Merge(dst, src) } func (m *RemoveLockObserverRequest) XXX_Size() int { return m.Size() } func (m *RemoveLockObserverRequest) XXX_DiscardUnknown() { xxx_messageInfo_RemoveLockObserverRequest.DiscardUnknown(m) } var xxx_messageInfo_RemoveLockObserverRequest proto.InternalMessageInfo func (m *RemoveLockObserverRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *RemoveLockObserverRequest) GetMaxTs() uint64 { if m != nil { return m.MaxTs } return 0 } type RemoveLockObserverResponse struct { Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RemoveLockObserverResponse) Reset() { *m = RemoveLockObserverResponse{} } func (m *RemoveLockObserverResponse) String() string { return proto.CompactTextString(m) } func (*RemoveLockObserverResponse) ProtoMessage() {} func (*RemoveLockObserverResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{83} } func (m *RemoveLockObserverResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RemoveLockObserverResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RemoveLockObserverResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *RemoveLockObserverResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RemoveLockObserverResponse.Merge(dst, src) } func (m *RemoveLockObserverResponse) XXX_Size() int { return m.Size() } func (m *RemoveLockObserverResponse) XXX_DiscardUnknown() { xxx_messageInfo_RemoveLockObserverResponse.DiscardUnknown(m) } var xxx_messageInfo_RemoveLockObserverResponse proto.InternalMessageInfo func (m *RemoveLockObserverResponse) GetError() string { if m != nil { return m.Error } return "" } type PhysicalScanLockRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` MaxTs uint64 `protobuf:"varint,2,opt,name=max_ts,json=maxTs,proto3" json:"max_ts,omitempty"` StartKey []byte `protobuf:"bytes,3,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` Limit uint32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *PhysicalScanLockRequest) Reset() { *m = PhysicalScanLockRequest{} } func (m *PhysicalScanLockRequest) String() string { return proto.CompactTextString(m) } func (*PhysicalScanLockRequest) ProtoMessage() {} func (*PhysicalScanLockRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{84} } func (m *PhysicalScanLockRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *PhysicalScanLockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_PhysicalScanLockRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *PhysicalScanLockRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_PhysicalScanLockRequest.Merge(dst, src) } func (m *PhysicalScanLockRequest) XXX_Size() int { return m.Size() } func (m *PhysicalScanLockRequest) XXX_DiscardUnknown() { xxx_messageInfo_PhysicalScanLockRequest.DiscardUnknown(m) } var xxx_messageInfo_PhysicalScanLockRequest proto.InternalMessageInfo func (m *PhysicalScanLockRequest) GetContext() *Context { if m != nil { return m.Context } return nil } func (m *PhysicalScanLockRequest) GetMaxTs() uint64 { if m != nil { return m.MaxTs } return 0 } func (m *PhysicalScanLockRequest) GetStartKey() []byte { if m != nil { return m.StartKey } return nil } func (m *PhysicalScanLockRequest) GetLimit() uint32 { if m != nil { return m.Limit } return 0 } type PhysicalScanLockResponse struct { Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` Locks []*LockInfo `protobuf:"bytes,2,rep,name=locks" json:"locks,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *PhysicalScanLockResponse) Reset() { *m = PhysicalScanLockResponse{} } func (m *PhysicalScanLockResponse) String() string { return proto.CompactTextString(m) } func (*PhysicalScanLockResponse) ProtoMessage() {} func (*PhysicalScanLockResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{85} } func (m *PhysicalScanLockResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *PhysicalScanLockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_PhysicalScanLockResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *PhysicalScanLockResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_PhysicalScanLockResponse.Merge(dst, src) } func (m *PhysicalScanLockResponse) XXX_Size() int { return m.Size() } func (m *PhysicalScanLockResponse) XXX_DiscardUnknown() { xxx_messageInfo_PhysicalScanLockResponse.DiscardUnknown(m) } var xxx_messageInfo_PhysicalScanLockResponse proto.InternalMessageInfo func (m *PhysicalScanLockResponse) GetError() string { if m != nil { return m.Error } return "" } func (m *PhysicalScanLockResponse) GetLocks() []*LockInfo { if m != nil { return m.Locks } return nil } type ReadIndexRequest struct { Context *Context `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ReadIndexRequest) Reset() { *m = ReadIndexRequest{} } func (m *ReadIndexRequest) String() string { return proto.CompactTextString(m) } func (*ReadIndexRequest) ProtoMessage() {} func (*ReadIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{86} } func (m *ReadIndexRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ReadIndexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ReadIndexRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *ReadIndexRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ReadIndexRequest.Merge(dst, src) } func (m *ReadIndexRequest) XXX_Size() int { return m.Size() } func (m *ReadIndexRequest) XXX_DiscardUnknown() { xxx_messageInfo_ReadIndexRequest.DiscardUnknown(m) } var xxx_messageInfo_ReadIndexRequest proto.InternalMessageInfo func (m *ReadIndexRequest) GetContext() *Context { if m != nil { return m.Context } return nil } type ReadIndexResponse struct { RegionError *errorpb.Error `protobuf:"bytes,1,opt,name=region_error,json=regionError" json:"region_error,omitempty"` ReadIndex uint64 `protobuf:"varint,2,opt,name=read_index,json=readIndex,proto3" json:"read_index,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ReadIndexResponse) Reset() { *m = ReadIndexResponse{} } func (m *ReadIndexResponse) String() string { return proto.CompactTextString(m) } func (*ReadIndexResponse) ProtoMessage() {} func (*ReadIndexResponse) Descriptor() ([]byte, []int) { return fileDescriptor_kvrpcpb_4b59c467e5a7b880, []int{87} } func (m *ReadIndexResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ReadIndexResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ReadIndexResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (dst *ReadIndexResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ReadIndexResponse.Merge(dst, src) } func (m *ReadIndexResponse) XXX_Size() int { return m.Size() } func (m *ReadIndexResponse) XXX_DiscardUnknown() { xxx_messageInfo_ReadIndexResponse.DiscardUnknown(m) } var xxx_messageInfo_ReadIndexResponse proto.InternalMessageInfo func (m *ReadIndexResponse) GetRegionError() *errorpb.Error { if m != nil { return m.RegionError } return nil } func (m *ReadIndexResponse) GetReadIndex() uint64 { if m != nil { return m.ReadIndex } return 0 } func init() { proto.RegisterType((*LockInfo)(nil), "kvrpcpb.LockInfo") proto.RegisterType((*AlreadyExist)(nil), "kvrpcpb.AlreadyExist") proto.RegisterType((*KeyError)(nil), "kvrpcpb.KeyError") proto.RegisterType((*WriteConflict)(nil), "kvrpcpb.WriteConflict") proto.RegisterType((*Deadlock)(nil), "kvrpcpb.Deadlock") proto.RegisterType((*CommitTsExpired)(nil), "kvrpcpb.CommitTsExpired") proto.RegisterType((*TxnNotFound)(nil), "kvrpcpb.TxnNotFound") proto.RegisterType((*Context)(nil), "kvrpcpb.Context") proto.RegisterType((*HandleTime)(nil), "kvrpcpb.HandleTime") proto.RegisterType((*ScanInfo)(nil), "kvrpcpb.ScanInfo") proto.RegisterType((*ScanDetail)(nil), "kvrpcpb.ScanDetail") proto.RegisterType((*ExecDetails)(nil), "kvrpcpb.ExecDetails") proto.RegisterType((*GetRequest)(nil), "kvrpcpb.GetRequest") proto.RegisterType((*GetResponse)(nil), "kvrpcpb.GetResponse") proto.RegisterType((*ScanRequest)(nil), "kvrpcpb.ScanRequest") proto.RegisterType((*KvPair)(nil), "kvrpcpb.KvPair") proto.RegisterType((*ScanResponse)(nil), "kvrpcpb.ScanResponse") proto.RegisterType((*Mutation)(nil), "kvrpcpb.Mutation") proto.RegisterType((*PrewriteRequest)(nil), "kvrpcpb.PrewriteRequest") proto.RegisterType((*PrewriteResponse)(nil), "kvrpcpb.PrewriteResponse") proto.RegisterType((*TxnHeartBeatRequest)(nil), "kvrpcpb.TxnHeartBeatRequest") proto.RegisterType((*TxnHeartBeatResponse)(nil), "kvrpcpb.TxnHeartBeatResponse") proto.RegisterType((*PessimisticLockRequest)(nil), "kvrpcpb.PessimisticLockRequest") proto.RegisterType((*PessimisticLockResponse)(nil), "kvrpcpb.PessimisticLockResponse") proto.RegisterType((*PessimisticRollbackRequest)(nil), "kvrpcpb.PessimisticRollbackRequest") proto.RegisterType((*PessimisticRollbackResponse)(nil), "kvrpcpb.PessimisticRollbackResponse") proto.RegisterType((*CommitRequest)(nil), "kvrpcpb.CommitRequest") proto.RegisterType((*CommitResponse)(nil), "kvrpcpb.CommitResponse") proto.RegisterType((*ImportRequest)(nil), "kvrpcpb.ImportRequest") proto.RegisterType((*ImportResponse)(nil), "kvrpcpb.ImportResponse") proto.RegisterType((*BatchRollbackRequest)(nil), "kvrpcpb.BatchRollbackRequest") proto.RegisterType((*BatchRollbackResponse)(nil), "kvrpcpb.BatchRollbackResponse") proto.RegisterType((*CheckTxnStatusRequest)(nil), "kvrpcpb.CheckTxnStatusRequest") proto.RegisterType((*CheckTxnStatusResponse)(nil), "kvrpcpb.CheckTxnStatusResponse") proto.RegisterType((*CleanupRequest)(nil), "kvrpcpb.CleanupRequest") proto.RegisterType((*CleanupResponse)(nil), "kvrpcpb.CleanupResponse") proto.RegisterType((*BatchGetRequest)(nil), "kvrpcpb.BatchGetRequest") proto.RegisterType((*BatchGetResponse)(nil), "kvrpcpb.BatchGetResponse") proto.RegisterType((*ScanLockRequest)(nil), "kvrpcpb.ScanLockRequest") proto.RegisterType((*ScanLockResponse)(nil), "kvrpcpb.ScanLockResponse") proto.RegisterType((*TxnInfo)(nil), "kvrpcpb.TxnInfo") proto.RegisterType((*ResolveLockRequest)(nil), "kvrpcpb.ResolveLockRequest") proto.RegisterType((*ResolveLockResponse)(nil), "kvrpcpb.ResolveLockResponse") proto.RegisterType((*GCRequest)(nil), "kvrpcpb.GCRequest") proto.RegisterType((*GCResponse)(nil), "kvrpcpb.GCResponse") proto.RegisterType((*RawGetRequest)(nil), "kvrpcpb.RawGetRequest") proto.RegisterType((*RawGetResponse)(nil), "kvrpcpb.RawGetResponse") proto.RegisterType((*RawPutRequest)(nil), "kvrpcpb.RawPutRequest") proto.RegisterType((*RawPutResponse)(nil), "kvrpcpb.RawPutResponse") proto.RegisterType((*RawBatchPutRequest)(nil), "kvrpcpb.RawBatchPutRequest") proto.RegisterType((*RawBatchPutResponse)(nil), "kvrpcpb.RawBatchPutResponse") proto.RegisterType((*RawBatchGetRequest)(nil), "kvrpcpb.RawBatchGetRequest") proto.RegisterType((*RawBatchGetResponse)(nil), "kvrpcpb.RawBatchGetResponse") proto.RegisterType((*RawDeleteRequest)(nil), "kvrpcpb.RawDeleteRequest") proto.RegisterType((*RawDeleteResponse)(nil), "kvrpcpb.RawDeleteResponse") proto.RegisterType((*RawBatchDeleteRequest)(nil), "kvrpcpb.RawBatchDeleteRequest") proto.RegisterType((*RawBatchDeleteResponse)(nil), "kvrpcpb.RawBatchDeleteResponse") proto.RegisterType((*DeleteRangeRequest)(nil), "kvrpcpb.DeleteRangeRequest") proto.RegisterType((*DeleteRangeResponse)(nil), "kvrpcpb.DeleteRangeResponse") proto.RegisterType((*RawDeleteRangeRequest)(nil), "kvrpcpb.RawDeleteRangeRequest") proto.RegisterType((*RawDeleteRangeResponse)(nil), "kvrpcpb.RawDeleteRangeResponse") proto.RegisterType((*RawScanRequest)(nil), "kvrpcpb.RawScanRequest") proto.RegisterType((*RawScanResponse)(nil), "kvrpcpb.RawScanResponse") proto.RegisterType((*KeyRange)(nil), "kvrpcpb.KeyRange") proto.RegisterType((*RawBatchScanRequest)(nil), "kvrpcpb.RawBatchScanRequest") proto.RegisterType((*RawBatchScanResponse)(nil), "kvrpcpb.RawBatchScanResponse") proto.RegisterType((*MvccWrite)(nil), "kvrpcpb.MvccWrite") proto.RegisterType((*MvccValue)(nil), "kvrpcpb.MvccValue") proto.RegisterType((*MvccLock)(nil), "kvrpcpb.MvccLock") proto.RegisterType((*MvccInfo)(nil), "kvrpcpb.MvccInfo") proto.RegisterType((*MvccGetByKeyRequest)(nil), "kvrpcpb.MvccGetByKeyRequest") proto.RegisterType((*MvccGetByKeyResponse)(nil), "kvrpcpb.MvccGetByKeyResponse") proto.RegisterType((*MvccGetByStartTsRequest)(nil), "kvrpcpb.MvccGetByStartTsRequest") proto.RegisterType((*MvccGetByStartTsResponse)(nil), "kvrpcpb.MvccGetByStartTsResponse") proto.RegisterType((*SplitRegionRequest)(nil), "kvrpcpb.SplitRegionRequest") proto.RegisterType((*SplitRegionResponse)(nil), "kvrpcpb.SplitRegionResponse") proto.RegisterType((*UnsafeDestroyRangeRequest)(nil), "kvrpcpb.UnsafeDestroyRangeRequest") proto.RegisterType((*UnsafeDestroyRangeResponse)(nil), "kvrpcpb.UnsafeDestroyRangeResponse") proto.RegisterType((*RegisterLockObserverRequest)(nil), "kvrpcpb.RegisterLockObserverRequest") proto.RegisterType((*RegisterLockObserverResponse)(nil), "kvrpcpb.RegisterLockObserverResponse") proto.RegisterType((*CheckLockObserverRequest)(nil), "kvrpcpb.CheckLockObserverRequest") proto.RegisterType((*CheckLockObserverResponse)(nil), "kvrpcpb.CheckLockObserverResponse") proto.RegisterType((*RemoveLockObserverRequest)(nil), "kvrpcpb.RemoveLockObserverRequest") proto.RegisterType((*RemoveLockObserverResponse)(nil), "kvrpcpb.RemoveLockObserverResponse") proto.RegisterType((*PhysicalScanLockRequest)(nil), "kvrpcpb.PhysicalScanLockRequest") proto.RegisterType((*PhysicalScanLockResponse)(nil), "kvrpcpb.PhysicalScanLockResponse") proto.RegisterType((*ReadIndexRequest)(nil), "kvrpcpb.ReadIndexRequest") proto.RegisterType((*ReadIndexResponse)(nil), "kvrpcpb.ReadIndexResponse") proto.RegisterEnum("kvrpcpb.CommandPri", CommandPri_name, CommandPri_value) proto.RegisterEnum("kvrpcpb.IsolationLevel", IsolationLevel_name, IsolationLevel_value) proto.RegisterEnum("kvrpcpb.Op", Op_name, Op_value) proto.RegisterEnum("kvrpcpb.Assertion", Assertion_name, Assertion_value) proto.RegisterEnum("kvrpcpb.Action", Action_name, Action_value) } func (m *LockInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *LockInfo) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.PrimaryLock) > 0 { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.PrimaryLock))) i += copy(dAtA[i:], m.PrimaryLock) } if m.LockVersion != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockVersion)) } if len(m.Key) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) i += copy(dAtA[i:], m.Key) } if m.LockTtl != 0 { dAtA[i] = 0x20 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockTtl)) } if m.TxnSize != 0 { dAtA[i] = 0x28 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.TxnSize)) } if m.LockType != 0 { dAtA[i] = 0x30 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockType)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *AlreadyExist) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *AlreadyExist) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Key) > 0 { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) i += copy(dAtA[i:], m.Key) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *KeyError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *KeyError) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Locked != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Locked.Size())) n1, err := m.Locked.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 } if len(m.Retryable) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Retryable))) i += copy(dAtA[i:], m.Retryable) } if len(m.Abort) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Abort))) i += copy(dAtA[i:], m.Abort) } if m.Conflict != nil { dAtA[i] = 0x22 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Conflict.Size())) n2, err := m.Conflict.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n2 } if m.AlreadyExist != nil { dAtA[i] = 0x2a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.AlreadyExist.Size())) n3, err := m.AlreadyExist.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n3 } if m.Deadlock != nil { dAtA[i] = 0x32 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Deadlock.Size())) n4, err := m.Deadlock.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n4 } if m.CommitTsExpired != nil { dAtA[i] = 0x3a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitTsExpired.Size())) n5, err := m.CommitTsExpired.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n5 } if m.TxnNotFound != nil { dAtA[i] = 0x42 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.TxnNotFound.Size())) n6, err := m.TxnNotFound.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n6 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *WriteConflict) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *WriteConflict) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.StartTs != 0 { dAtA[i] = 0x8 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartTs)) } if m.ConflictTs != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ConflictTs)) } if len(m.Key) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) i += copy(dAtA[i:], m.Key) } if len(m.Primary) > 0 { dAtA[i] = 0x22 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Primary))) i += copy(dAtA[i:], m.Primary) } if m.ConflictCommitTs != 0 { dAtA[i] = 0x28 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ConflictCommitTs)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *Deadlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Deadlock) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.LockTs != 0 { dAtA[i] = 0x8 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockTs)) } if len(m.LockKey) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.LockKey))) i += copy(dAtA[i:], m.LockKey) } if m.DeadlockKeyHash != 0 { dAtA[i] = 0x18 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.DeadlockKeyHash)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *CommitTsExpired) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *CommitTsExpired) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.StartTs != 0 { dAtA[i] = 0x8 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartTs)) } if m.AttemptedCommitTs != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.AttemptedCommitTs)) } if len(m.Key) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) i += copy(dAtA[i:], m.Key) } if m.MinCommitTs != 0 { dAtA[i] = 0x20 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MinCommitTs)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *TxnNotFound) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *TxnNotFound) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.StartTs != 0 { dAtA[i] = 0x8 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartTs)) } if len(m.PrimaryKey) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.PrimaryKey))) i += copy(dAtA[i:], m.PrimaryKey) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *Context) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Context) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionId != 0 { dAtA[i] = 0x8 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionId)) } if m.RegionEpoch != nil { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionEpoch.Size())) n7, err := m.RegionEpoch.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n7 } if m.Peer != nil { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Peer.Size())) n8, err := m.Peer.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n8 } if m.Term != 0 { dAtA[i] = 0x28 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Term)) } if m.Priority != 0 { dAtA[i] = 0x30 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Priority)) } if m.IsolationLevel != 0 { dAtA[i] = 0x38 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.IsolationLevel)) } if m.NotFillCache { dAtA[i] = 0x40 i++ if m.NotFillCache { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if m.SyncLog { dAtA[i] = 0x48 i++ if m.SyncLog { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if m.HandleTime { dAtA[i] = 0x50 i++ if m.HandleTime { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if m.ScanDetail { dAtA[i] = 0x58 i++ if m.ScanDetail { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if m.ReplicaRead { dAtA[i] = 0x60 i++ if m.ReplicaRead { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if len(m.ResolvedLocks) > 0 { dAtA10 := make([]byte, len(m.ResolvedLocks)*10) var j9 int for _, num := range m.ResolvedLocks { for num >= 1<<7 { dAtA10[j9] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 j9++ } dAtA10[j9] = uint8(num) j9++ } dAtA[i] = 0x6a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(j9)) i += copy(dAtA[i:], dAtA10[:j9]) } if m.MaxExecutionDurationMs != 0 { dAtA[i] = 0x70 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MaxExecutionDurationMs)) } if m.AppliedIndex != 0 { dAtA[i] = 0x78 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.AppliedIndex)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *HandleTime) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *HandleTime) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.WaitMs != 0 { dAtA[i] = 0x8 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.WaitMs)) } if m.ProcessMs != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ProcessMs)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *ScanInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ScanInfo) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Total != 0 { dAtA[i] = 0x8 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Total)) } if m.Processed != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Processed)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *ScanDetail) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ScanDetail) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Write != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Write.Size())) n11, err := m.Write.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n11 } if m.Lock != nil { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Lock.Size())) n12, err := m.Lock.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n12 } if m.Data != nil { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Data.Size())) n13, err := m.Data.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n13 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *ExecDetails) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ExecDetails) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.HandleTime != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.HandleTime.Size())) n14, err := m.HandleTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n14 } if m.ScanDetail != nil { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ScanDetail.Size())) n15, err := m.ScanDetail.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n15 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *GetRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GetRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n16, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n16 } if len(m.Key) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) i += copy(dAtA[i:], m.Key) } if m.Version != 0 { dAtA[i] = 0x18 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Version)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *GetResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GetResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n17, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n17 } if m.Error != nil { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size())) n18, err := m.Error.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n18 } if len(m.Value) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } if m.NotFound { dAtA[i] = 0x20 i++ if m.NotFound { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *ScanRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ScanRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n19, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n19 } if len(m.StartKey) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey))) i += copy(dAtA[i:], m.StartKey) } if m.Limit != 0 { dAtA[i] = 0x18 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Limit)) } if m.Version != 0 { dAtA[i] = 0x20 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Version)) } if m.KeyOnly { dAtA[i] = 0x28 i++ if m.KeyOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if m.Reverse { dAtA[i] = 0x30 i++ if m.Reverse { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if len(m.EndKey) > 0 { dAtA[i] = 0x3a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.EndKey))) i += copy(dAtA[i:], m.EndKey) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *KvPair) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *KvPair) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Error != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size())) n20, err := m.Error.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n20 } if len(m.Key) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) i += copy(dAtA[i:], m.Key) } if len(m.Value) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *ScanResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ScanResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n21, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n21 } if len(m.Pairs) > 0 { for _, msg := range m.Pairs { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *Mutation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Mutation) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Op != 0 { dAtA[i] = 0x8 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Op)) } if len(m.Key) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) i += copy(dAtA[i:], m.Key) } if len(m.Value) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } if m.Assertion != 0 { dAtA[i] = 0x20 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Assertion)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *PrewriteRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *PrewriteRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n22, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n22 } if len(m.Mutations) > 0 { for _, msg := range m.Mutations { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if len(m.PrimaryLock) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.PrimaryLock))) i += copy(dAtA[i:], m.PrimaryLock) } if m.StartVersion != 0 { dAtA[i] = 0x20 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion)) } if m.LockTtl != 0 { dAtA[i] = 0x28 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockTtl)) } if m.SkipConstraintCheck { dAtA[i] = 0x30 i++ if m.SkipConstraintCheck { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if len(m.IsPessimisticLock) > 0 { dAtA[i] = 0x3a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.IsPessimisticLock))) for _, b := range m.IsPessimisticLock { if b { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } } if m.TxnSize != 0 { dAtA[i] = 0x40 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.TxnSize)) } if m.ForUpdateTs != 0 { dAtA[i] = 0x48 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ForUpdateTs)) } if m.MinCommitTs != 0 { dAtA[i] = 0x50 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MinCommitTs)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *PrewriteResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *PrewriteResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n23, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n23 } if len(m.Errors) > 0 { for _, msg := range m.Errors { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *TxnHeartBeatRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *TxnHeartBeatRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n24, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n24 } if len(m.PrimaryLock) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.PrimaryLock))) i += copy(dAtA[i:], m.PrimaryLock) } if m.StartVersion != 0 { dAtA[i] = 0x18 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion)) } if m.AdviseLockTtl != 0 { dAtA[i] = 0x20 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.AdviseLockTtl)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *TxnHeartBeatResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *TxnHeartBeatResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n25, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n25 } if m.Error != nil { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size())) n26, err := m.Error.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n26 } if m.LockTtl != 0 { dAtA[i] = 0x18 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockTtl)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *PessimisticLockRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *PessimisticLockRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n27, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n27 } if len(m.Mutations) > 0 { for _, msg := range m.Mutations { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if len(m.PrimaryLock) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.PrimaryLock))) i += copy(dAtA[i:], m.PrimaryLock) } if m.StartVersion != 0 { dAtA[i] = 0x20 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion)) } if m.LockTtl != 0 { dAtA[i] = 0x28 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockTtl)) } if m.ForUpdateTs != 0 { dAtA[i] = 0x30 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ForUpdateTs)) } if m.IsFirstLock { dAtA[i] = 0x38 i++ if m.IsFirstLock { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if m.WaitTimeout != 0 { dAtA[i] = 0x40 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.WaitTimeout)) } if m.Force { dAtA[i] = 0x48 i++ if m.Force { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if m.ReturnValues { dAtA[i] = 0x50 i++ if m.ReturnValues { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *PessimisticLockResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *PessimisticLockResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n28, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n28 } if len(m.Errors) > 0 { for _, msg := range m.Errors { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.CommitTs != 0 { dAtA[i] = 0x18 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitTs)) } if len(m.Value) > 0 { dAtA[i] = 0x22 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } if len(m.Values) > 0 { for _, b := range m.Values { dAtA[i] = 0x2a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b))) i += copy(dAtA[i:], b) } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *PessimisticRollbackRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *PessimisticRollbackRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n29, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n29 } if m.StartVersion != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion)) } if m.ForUpdateTs != 0 { dAtA[i] = 0x18 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ForUpdateTs)) } if len(m.Keys) > 0 { for _, b := range m.Keys { dAtA[i] = 0x22 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b))) i += copy(dAtA[i:], b) } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *PessimisticRollbackResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *PessimisticRollbackResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n30, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n30 } if len(m.Errors) > 0 { for _, msg := range m.Errors { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *CommitRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *CommitRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n31, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n31 } if m.StartVersion != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion)) } if len(m.Keys) > 0 { for _, b := range m.Keys { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b))) i += copy(dAtA[i:], b) } } if m.CommitVersion != 0 { dAtA[i] = 0x20 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitVersion)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *CommitResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *CommitResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n32, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n32 } if m.Error != nil { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size())) n33, err := m.Error.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n33 } if m.CommitVersion != 0 { dAtA[i] = 0x18 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitVersion)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *ImportRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ImportRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Mutations) > 0 { for _, msg := range m.Mutations { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.CommitVersion != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitVersion)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *ImportResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ImportResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n34, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n34 } if len(m.Error) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *BatchRollbackRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *BatchRollbackRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n35, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n35 } if m.StartVersion != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion)) } if len(m.Keys) > 0 { for _, b := range m.Keys { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b))) i += copy(dAtA[i:], b) } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *BatchRollbackResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *BatchRollbackResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n36, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n36 } if m.Error != nil { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size())) n37, err := m.Error.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n37 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *CheckTxnStatusRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *CheckTxnStatusRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n38, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n38 } if len(m.PrimaryKey) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.PrimaryKey))) i += copy(dAtA[i:], m.PrimaryKey) } if m.LockTs != 0 { dAtA[i] = 0x18 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockTs)) } if m.CallerStartTs != 0 { dAtA[i] = 0x20 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CallerStartTs)) } if m.CurrentTs != 0 { dAtA[i] = 0x28 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CurrentTs)) } if m.RollbackIfNotExist { dAtA[i] = 0x30 i++ if m.RollbackIfNotExist { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *CheckTxnStatusResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *CheckTxnStatusResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n39, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n39 } if m.Error != nil { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size())) n40, err := m.Error.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n40 } if m.LockTtl != 0 { dAtA[i] = 0x18 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.LockTtl)) } if m.CommitVersion != 0 { dAtA[i] = 0x20 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitVersion)) } if m.Action != 0 { dAtA[i] = 0x28 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Action)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *CleanupRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *CleanupRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n41, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n41 } if len(m.Key) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) i += copy(dAtA[i:], m.Key) } if m.StartVersion != 0 { dAtA[i] = 0x18 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion)) } if m.CurrentTs != 0 { dAtA[i] = 0x20 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CurrentTs)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *CleanupResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *CleanupResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n42, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n42 } if m.Error != nil { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size())) n43, err := m.Error.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n43 } if m.CommitVersion != 0 { dAtA[i] = 0x18 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitVersion)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *BatchGetRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *BatchGetRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n44, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n44 } if len(m.Keys) > 0 { for _, b := range m.Keys { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b))) i += copy(dAtA[i:], b) } } if m.Version != 0 { dAtA[i] = 0x18 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Version)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *BatchGetResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *BatchGetResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n45, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n45 } if len(m.Pairs) > 0 { for _, msg := range m.Pairs { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *ScanLockRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ScanLockRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n46, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n46 } if m.MaxVersion != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MaxVersion)) } if len(m.StartKey) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey))) i += copy(dAtA[i:], m.StartKey) } if m.Limit != 0 { dAtA[i] = 0x20 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Limit)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *ScanLockResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ScanLockResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n47, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n47 } if m.Error != nil { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size())) n48, err := m.Error.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n48 } if len(m.Locks) > 0 { for _, msg := range m.Locks { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *TxnInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *TxnInfo) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Txn != 0 { dAtA[i] = 0x8 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Txn)) } if m.Status != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Status)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *ResolveLockRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ResolveLockRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n49, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n49 } if m.StartVersion != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartVersion)) } if m.CommitVersion != 0 { dAtA[i] = 0x18 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitVersion)) } if len(m.TxnInfos) > 0 { for _, msg := range m.TxnInfos { dAtA[i] = 0x22 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if len(m.Keys) > 0 { for _, b := range m.Keys { dAtA[i] = 0x2a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b))) i += copy(dAtA[i:], b) } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *ResolveLockResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ResolveLockResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n50, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n50 } if m.Error != nil { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size())) n51, err := m.Error.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n51 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *GCRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GCRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n52, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n52 } if m.SafePoint != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.SafePoint)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *GCResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GCResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n53, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n53 } if m.Error != nil { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Error.Size())) n54, err := m.Error.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n54 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RawGetRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RawGetRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n55, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n55 } if len(m.Key) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) i += copy(dAtA[i:], m.Key) } if len(m.Cf) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf))) i += copy(dAtA[i:], m.Cf) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RawGetResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RawGetResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n56, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n56 } if len(m.Error) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) } if len(m.Value) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } if m.NotFound { dAtA[i] = 0x20 i++ if m.NotFound { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RawPutRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RawPutRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n57, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n57 } if len(m.Key) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) i += copy(dAtA[i:], m.Key) } if len(m.Value) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } if len(m.Cf) > 0 { dAtA[i] = 0x22 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf))) i += copy(dAtA[i:], m.Cf) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RawPutResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RawPutResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n58, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n58 } if len(m.Error) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RawBatchPutRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RawBatchPutRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n59, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n59 } if len(m.Pairs) > 0 { for _, msg := range m.Pairs { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if len(m.Cf) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf))) i += copy(dAtA[i:], m.Cf) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RawBatchPutResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RawBatchPutResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n60, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n60 } if len(m.Error) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RawBatchGetRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RawBatchGetRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n61, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n61 } if len(m.Keys) > 0 { for _, b := range m.Keys { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b))) i += copy(dAtA[i:], b) } } if len(m.Cf) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf))) i += copy(dAtA[i:], m.Cf) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RawBatchGetResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RawBatchGetResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n62, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n62 } if len(m.Pairs) > 0 { for _, msg := range m.Pairs { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RawDeleteRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RawDeleteRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n63, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n63 } if len(m.Key) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) i += copy(dAtA[i:], m.Key) } if len(m.Cf) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf))) i += copy(dAtA[i:], m.Cf) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RawDeleteResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RawDeleteResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n64, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n64 } if len(m.Error) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RawBatchDeleteRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RawBatchDeleteRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n65, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n65 } if len(m.Keys) > 0 { for _, b := range m.Keys { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b))) i += copy(dAtA[i:], b) } } if len(m.Cf) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf))) i += copy(dAtA[i:], m.Cf) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RawBatchDeleteResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RawBatchDeleteResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n66, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n66 } if len(m.Error) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n67, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n67 } if len(m.StartKey) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey))) i += copy(dAtA[i:], m.StartKey) } if len(m.EndKey) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.EndKey))) i += copy(dAtA[i:], m.EndKey) } if m.NotifyOnly { dAtA[i] = 0x20 i++ if m.NotifyOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n68, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n68 } if len(m.Error) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RawDeleteRangeRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RawDeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n69, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n69 } if len(m.StartKey) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey))) i += copy(dAtA[i:], m.StartKey) } if len(m.EndKey) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.EndKey))) i += copy(dAtA[i:], m.EndKey) } if len(m.Cf) > 0 { dAtA[i] = 0x22 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf))) i += copy(dAtA[i:], m.Cf) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RawDeleteRangeResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RawDeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n70, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n70 } if len(m.Error) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RawScanRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RawScanRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n71, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n71 } if len(m.StartKey) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey))) i += copy(dAtA[i:], m.StartKey) } if m.Limit != 0 { dAtA[i] = 0x18 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Limit)) } if m.KeyOnly { dAtA[i] = 0x20 i++ if m.KeyOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if len(m.Cf) > 0 { dAtA[i] = 0x2a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf))) i += copy(dAtA[i:], m.Cf) } if m.Reverse { dAtA[i] = 0x30 i++ if m.Reverse { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if len(m.EndKey) > 0 { dAtA[i] = 0x3a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.EndKey))) i += copy(dAtA[i:], m.EndKey) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RawScanResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RawScanResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n72, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n72 } if len(m.Kvs) > 0 { for _, msg := range m.Kvs { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *KeyRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *KeyRange) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.StartKey) > 0 { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey))) i += copy(dAtA[i:], m.StartKey) } if len(m.EndKey) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.EndKey))) i += copy(dAtA[i:], m.EndKey) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RawBatchScanRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RawBatchScanRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n73, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n73 } if len(m.Ranges) > 0 { for _, msg := range m.Ranges { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.EachLimit != 0 { dAtA[i] = 0x18 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.EachLimit)) } if m.KeyOnly { dAtA[i] = 0x20 i++ if m.KeyOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if len(m.Cf) > 0 { dAtA[i] = 0x2a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Cf))) i += copy(dAtA[i:], m.Cf) } if m.Reverse { dAtA[i] = 0x30 i++ if m.Reverse { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RawBatchScanResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RawBatchScanResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n74, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n74 } if len(m.Kvs) > 0 { for _, msg := range m.Kvs { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *MvccWrite) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MvccWrite) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Type != 0 { dAtA[i] = 0x8 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Type)) } if m.StartTs != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartTs)) } if m.CommitTs != 0 { dAtA[i] = 0x18 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.CommitTs)) } if len(m.ShortValue) > 0 { dAtA[i] = 0x22 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.ShortValue))) i += copy(dAtA[i:], m.ShortValue) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *MvccValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MvccValue) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.StartTs != 0 { dAtA[i] = 0x8 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartTs)) } if len(m.Value) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *MvccLock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MvccLock) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Type != 0 { dAtA[i] = 0x8 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Type)) } if m.StartTs != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartTs)) } if len(m.Primary) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Primary))) i += copy(dAtA[i:], m.Primary) } if len(m.ShortValue) > 0 { dAtA[i] = 0x22 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.ShortValue))) i += copy(dAtA[i:], m.ShortValue) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *MvccInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MvccInfo) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Lock != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Lock.Size())) n75, err := m.Lock.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n75 } if len(m.Writes) > 0 { for _, msg := range m.Writes { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if len(m.Values) > 0 { for _, msg := range m.Values { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *MvccGetByKeyRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MvccGetByKeyRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n76, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n76 } if len(m.Key) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) i += copy(dAtA[i:], m.Key) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *MvccGetByKeyResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MvccGetByKeyResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n77, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n77 } if len(m.Error) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) } if m.Info != nil { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Info.Size())) n78, err := m.Info.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n78 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *MvccGetByStartTsRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MvccGetByStartTsRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n79, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n79 } if m.StartTs != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.StartTs)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *MvccGetByStartTsResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MvccGetByStartTsResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n80, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n80 } if len(m.Error) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) } if len(m.Key) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Key))) i += copy(dAtA[i:], m.Key) } if m.Info != nil { dAtA[i] = 0x22 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Info.Size())) n81, err := m.Info.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n81 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *SplitRegionRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *SplitRegionRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n82, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n82 } if len(m.SplitKey) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.SplitKey))) i += copy(dAtA[i:], m.SplitKey) } if len(m.SplitKeys) > 0 { for _, b := range m.SplitKeys { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(b))) i += copy(dAtA[i:], b) } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *SplitRegionResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *SplitRegionResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n83, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n83 } if m.Left != nil { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Left.Size())) n84, err := m.Left.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n84 } if m.Right != nil { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Right.Size())) n85, err := m.Right.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n85 } if len(m.Regions) > 0 { for _, msg := range m.Regions { dAtA[i] = 0x22 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *UnsafeDestroyRangeRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *UnsafeDestroyRangeRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n86, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n86 } if len(m.StartKey) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey))) i += copy(dAtA[i:], m.StartKey) } if len(m.EndKey) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.EndKey))) i += copy(dAtA[i:], m.EndKey) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *UnsafeDestroyRangeResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *UnsafeDestroyRangeResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n87, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n87 } if len(m.Error) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RegisterLockObserverRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RegisterLockObserverRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n88, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n88 } if m.MaxTs != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MaxTs)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RegisterLockObserverResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RegisterLockObserverResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Error) > 0 { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *CheckLockObserverRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *CheckLockObserverRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n89, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n89 } if m.MaxTs != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MaxTs)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *CheckLockObserverResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *CheckLockObserverResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Error) > 0 { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) } if m.IsClean { dAtA[i] = 0x10 i++ if m.IsClean { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } if len(m.Locks) > 0 { for _, msg := range m.Locks { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RemoveLockObserverRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RemoveLockObserverRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n90, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n90 } if m.MaxTs != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MaxTs)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *RemoveLockObserverResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RemoveLockObserverResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Error) > 0 { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *PhysicalScanLockRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *PhysicalScanLockRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n91, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n91 } if m.MaxTs != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.MaxTs)) } if len(m.StartKey) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.StartKey))) i += copy(dAtA[i:], m.StartKey) } if m.Limit != 0 { dAtA[i] = 0x20 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Limit)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *PhysicalScanLockResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *PhysicalScanLockResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Error) > 0 { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(len(m.Error))) i += copy(dAtA[i:], m.Error) } if len(m.Locks) > 0 { for _, msg := range m.Locks { dAtA[i] = 0x12 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *ReadIndexRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ReadIndexRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.Context != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.Context.Size())) n92, err := m.Context.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n92 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func (m *ReadIndexResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ReadIndexResponse) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.RegionError != nil { dAtA[i] = 0xa i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.RegionError.Size())) n93, err := m.RegionError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n93 } if m.ReadIndex != 0 { dAtA[i] = 0x10 i++ i = encodeVarintKvrpcpb(dAtA, i, uint64(m.ReadIndex)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } func
(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return offset + 1 } func (m *LockInfo) Size() (n int) { var l int _ = l l = len(m.PrimaryLock) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.LockVersion != 0 { n += 1 + sovKvrpcpb(uint64(m.LockVersion)) } l = len(m.Key) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.LockTtl != 0 { n += 1 + sovKvrpcpb(uint64(m.LockTtl)) } if m.TxnSize != 0 { n += 1 + sovKvrpcpb(uint64(m.TxnSize)) } if m.LockType != 0 { n += 1 + sovKvrpcpb(uint64(m.LockType)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *AlreadyExist) Size() (n int) { var l int _ = l l = len(m.Key) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *KeyError) Size() (n int) { var l int _ = l if m.Locked != nil { l = m.Locked.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Retryable) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Abort) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Conflict != nil { l = m.Conflict.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.AlreadyExist != nil { l = m.AlreadyExist.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Deadlock != nil { l = m.Deadlock.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.CommitTsExpired != nil { l = m.CommitTsExpired.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.TxnNotFound != nil { l = m.TxnNotFound.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *WriteConflict) Size() (n int) { var l int _ = l if m.StartTs != 0 { n += 1 + sovKvrpcpb(uint64(m.StartTs)) } if m.ConflictTs != 0 { n += 1 + sovKvrpcpb(uint64(m.ConflictTs)) } l = len(m.Key) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Primary) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.ConflictCommitTs != 0 { n += 1 + sovKvrpcpb(uint64(m.ConflictCommitTs)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *Deadlock) Size() (n int) { var l int _ = l if m.LockTs != 0 { n += 1 + sovKvrpcpb(uint64(m.LockTs)) } l = len(m.LockKey) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.DeadlockKeyHash != 0 { n += 1 + sovKvrpcpb(uint64(m.DeadlockKeyHash)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *CommitTsExpired) Size() (n int) { var l int _ = l if m.StartTs != 0 { n += 1 + sovKvrpcpb(uint64(m.StartTs)) } if m.AttemptedCommitTs != 0 { n += 1 + sovKvrpcpb(uint64(m.AttemptedCommitTs)) } l = len(m.Key) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.MinCommitTs != 0 { n += 1 + sovKvrpcpb(uint64(m.MinCommitTs)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *TxnNotFound) Size() (n int) { var l int _ = l if m.StartTs != 0 { n += 1 + sovKvrpcpb(uint64(m.StartTs)) } l = len(m.PrimaryKey) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *Context) Size() (n int) { var l int _ = l if m.RegionId != 0 { n += 1 + sovKvrpcpb(uint64(m.RegionId)) } if m.RegionEpoch != nil { l = m.RegionEpoch.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Peer != nil { l = m.Peer.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Term != 0 { n += 1 + sovKvrpcpb(uint64(m.Term)) } if m.Priority != 0 { n += 1 + sovKvrpcpb(uint64(m.Priority)) } if m.IsolationLevel != 0 { n += 1 + sovKvrpcpb(uint64(m.IsolationLevel)) } if m.NotFillCache { n += 2 } if m.SyncLog { n += 2 } if m.HandleTime { n += 2 } if m.ScanDetail { n += 2 } if m.ReplicaRead { n += 2 } if len(m.ResolvedLocks) > 0 { l = 0 for _, e := range m.ResolvedLocks { l += sovKvrpcpb(uint64(e)) } n += 1 + sovKvrpcpb(uint64(l)) + l } if m.MaxExecutionDurationMs != 0 { n += 1 + sovKvrpcpb(uint64(m.MaxExecutionDurationMs)) } if m.AppliedIndex != 0 { n += 1 + sovKvrpcpb(uint64(m.AppliedIndex)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *HandleTime) Size() (n int) { var l int _ = l if m.WaitMs != 0 { n += 1 + sovKvrpcpb(uint64(m.WaitMs)) } if m.ProcessMs != 0 { n += 1 + sovKvrpcpb(uint64(m.ProcessMs)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *ScanInfo) Size() (n int) { var l int _ = l if m.Total != 0 { n += 1 + sovKvrpcpb(uint64(m.Total)) } if m.Processed != 0 { n += 1 + sovKvrpcpb(uint64(m.Processed)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *ScanDetail) Size() (n int) { var l int _ = l if m.Write != nil { l = m.Write.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Lock != nil { l = m.Lock.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Data != nil { l = m.Data.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *ExecDetails) Size() (n int) { var l int _ = l if m.HandleTime != nil { l = m.HandleTime.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.ScanDetail != nil { l = m.ScanDetail.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *GetRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Key) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Version != 0 { n += 1 + sovKvrpcpb(uint64(m.Version)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *GetResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Error != nil { l = m.Error.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Value) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.NotFound { n += 2 } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *ScanRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.StartKey) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Limit != 0 { n += 1 + sovKvrpcpb(uint64(m.Limit)) } if m.Version != 0 { n += 1 + sovKvrpcpb(uint64(m.Version)) } if m.KeyOnly { n += 2 } if m.Reverse { n += 2 } l = len(m.EndKey) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *KvPair) Size() (n int) { var l int _ = l if m.Error != nil { l = m.Error.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Key) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Value) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *ScanResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Pairs) > 0 { for _, e := range m.Pairs { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *Mutation) Size() (n int) { var l int _ = l if m.Op != 0 { n += 1 + sovKvrpcpb(uint64(m.Op)) } l = len(m.Key) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Value) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Assertion != 0 { n += 1 + sovKvrpcpb(uint64(m.Assertion)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *PrewriteRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Mutations) > 0 { for _, e := range m.Mutations { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } l = len(m.PrimaryLock) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.StartVersion != 0 { n += 1 + sovKvrpcpb(uint64(m.StartVersion)) } if m.LockTtl != 0 { n += 1 + sovKvrpcpb(uint64(m.LockTtl)) } if m.SkipConstraintCheck { n += 2 } if len(m.IsPessimisticLock) > 0 { n += 1 + sovKvrpcpb(uint64(len(m.IsPessimisticLock))) + len(m.IsPessimisticLock)*1 } if m.TxnSize != 0 { n += 1 + sovKvrpcpb(uint64(m.TxnSize)) } if m.ForUpdateTs != 0 { n += 1 + sovKvrpcpb(uint64(m.ForUpdateTs)) } if m.MinCommitTs != 0 { n += 1 + sovKvrpcpb(uint64(m.MinCommitTs)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *PrewriteResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Errors) > 0 { for _, e := range m.Errors { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *TxnHeartBeatRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.PrimaryLock) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.StartVersion != 0 { n += 1 + sovKvrpcpb(uint64(m.StartVersion)) } if m.AdviseLockTtl != 0 { n += 1 + sovKvrpcpb(uint64(m.AdviseLockTtl)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *TxnHeartBeatResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Error != nil { l = m.Error.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.LockTtl != 0 { n += 1 + sovKvrpcpb(uint64(m.LockTtl)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *PessimisticLockRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Mutations) > 0 { for _, e := range m.Mutations { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } l = len(m.PrimaryLock) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.StartVersion != 0 { n += 1 + sovKvrpcpb(uint64(m.StartVersion)) } if m.LockTtl != 0 { n += 1 + sovKvrpcpb(uint64(m.LockTtl)) } if m.ForUpdateTs != 0 { n += 1 + sovKvrpcpb(uint64(m.ForUpdateTs)) } if m.IsFirstLock { n += 2 } if m.WaitTimeout != 0 { n += 1 + sovKvrpcpb(uint64(m.WaitTimeout)) } if m.Force { n += 2 } if m.ReturnValues { n += 2 } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *PessimisticLockResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Errors) > 0 { for _, e := range m.Errors { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.CommitTs != 0 { n += 1 + sovKvrpcpb(uint64(m.CommitTs)) } l = len(m.Value) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Values) > 0 { for _, b := range m.Values { l = len(b) n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *PessimisticRollbackRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.StartVersion != 0 { n += 1 + sovKvrpcpb(uint64(m.StartVersion)) } if m.ForUpdateTs != 0 { n += 1 + sovKvrpcpb(uint64(m.ForUpdateTs)) } if len(m.Keys) > 0 { for _, b := range m.Keys { l = len(b) n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *PessimisticRollbackResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Errors) > 0 { for _, e := range m.Errors { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *CommitRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.StartVersion != 0 { n += 1 + sovKvrpcpb(uint64(m.StartVersion)) } if len(m.Keys) > 0 { for _, b := range m.Keys { l = len(b) n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.CommitVersion != 0 { n += 1 + sovKvrpcpb(uint64(m.CommitVersion)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *CommitResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Error != nil { l = m.Error.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.CommitVersion != 0 { n += 1 + sovKvrpcpb(uint64(m.CommitVersion)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *ImportRequest) Size() (n int) { var l int _ = l if len(m.Mutations) > 0 { for _, e := range m.Mutations { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.CommitVersion != 0 { n += 1 + sovKvrpcpb(uint64(m.CommitVersion)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *ImportResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Error) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *BatchRollbackRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.StartVersion != 0 { n += 1 + sovKvrpcpb(uint64(m.StartVersion)) } if len(m.Keys) > 0 { for _, b := range m.Keys { l = len(b) n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *BatchRollbackResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Error != nil { l = m.Error.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *CheckTxnStatusRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.PrimaryKey) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.LockTs != 0 { n += 1 + sovKvrpcpb(uint64(m.LockTs)) } if m.CallerStartTs != 0 { n += 1 + sovKvrpcpb(uint64(m.CallerStartTs)) } if m.CurrentTs != 0 { n += 1 + sovKvrpcpb(uint64(m.CurrentTs)) } if m.RollbackIfNotExist { n += 2 } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *CheckTxnStatusResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Error != nil { l = m.Error.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.LockTtl != 0 { n += 1 + sovKvrpcpb(uint64(m.LockTtl)) } if m.CommitVersion != 0 { n += 1 + sovKvrpcpb(uint64(m.CommitVersion)) } if m.Action != 0 { n += 1 + sovKvrpcpb(uint64(m.Action)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *CleanupRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Key) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.StartVersion != 0 { n += 1 + sovKvrpcpb(uint64(m.StartVersion)) } if m.CurrentTs != 0 { n += 1 + sovKvrpcpb(uint64(m.CurrentTs)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *CleanupResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Error != nil { l = m.Error.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.CommitVersion != 0 { n += 1 + sovKvrpcpb(uint64(m.CommitVersion)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *BatchGetRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Keys) > 0 { for _, b := range m.Keys { l = len(b) n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.Version != 0 { n += 1 + sovKvrpcpb(uint64(m.Version)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *BatchGetResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Pairs) > 0 { for _, e := range m.Pairs { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *ScanLockRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.MaxVersion != 0 { n += 1 + sovKvrpcpb(uint64(m.MaxVersion)) } l = len(m.StartKey) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Limit != 0 { n += 1 + sovKvrpcpb(uint64(m.Limit)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *ScanLockResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Error != nil { l = m.Error.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Locks) > 0 { for _, e := range m.Locks { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *TxnInfo) Size() (n int) { var l int _ = l if m.Txn != 0 { n += 1 + sovKvrpcpb(uint64(m.Txn)) } if m.Status != 0 { n += 1 + sovKvrpcpb(uint64(m.Status)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *ResolveLockRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.StartVersion != 0 { n += 1 + sovKvrpcpb(uint64(m.StartVersion)) } if m.CommitVersion != 0 { n += 1 + sovKvrpcpb(uint64(m.CommitVersion)) } if len(m.TxnInfos) > 0 { for _, e := range m.TxnInfos { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } if len(m.Keys) > 0 { for _, b := range m.Keys { l = len(b) n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *ResolveLockResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Error != nil { l = m.Error.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *GCRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.SafePoint != 0 { n += 1 + sovKvrpcpb(uint64(m.SafePoint)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *GCResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Error != nil { l = m.Error.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RawGetRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Key) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Cf) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RawGetResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Error) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Value) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.NotFound { n += 2 } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RawPutRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Key) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Value) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Cf) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RawPutResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Error) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RawBatchPutRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Pairs) > 0 { for _, e := range m.Pairs { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } l = len(m.Cf) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RawBatchPutResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Error) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RawBatchGetRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Keys) > 0 { for _, b := range m.Keys { l = len(b) n += 1 + l + sovKvrpcpb(uint64(l)) } } l = len(m.Cf) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RawBatchGetResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Pairs) > 0 { for _, e := range m.Pairs { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RawDeleteRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Key) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Cf) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RawDeleteResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Error) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RawBatchDeleteRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Keys) > 0 { for _, b := range m.Keys { l = len(b) n += 1 + l + sovKvrpcpb(uint64(l)) } } l = len(m.Cf) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RawBatchDeleteResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Error) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *DeleteRangeRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.StartKey) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.EndKey) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.NotifyOnly { n += 2 } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *DeleteRangeResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Error) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RawDeleteRangeRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.StartKey) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.EndKey) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Cf) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RawDeleteRangeResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Error) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RawScanRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.StartKey) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Limit != 0 { n += 1 + sovKvrpcpb(uint64(m.Limit)) } if m.KeyOnly { n += 2 } l = len(m.Cf) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Reverse { n += 2 } l = len(m.EndKey) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RawScanResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Kvs) > 0 { for _, e := range m.Kvs { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *KeyRange) Size() (n int) { var l int _ = l l = len(m.StartKey) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.EndKey) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RawBatchScanRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Ranges) > 0 { for _, e := range m.Ranges { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.EachLimit != 0 { n += 1 + sovKvrpcpb(uint64(m.EachLimit)) } if m.KeyOnly { n += 2 } l = len(m.Cf) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Reverse { n += 2 } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RawBatchScanResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Kvs) > 0 { for _, e := range m.Kvs { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *MvccWrite) Size() (n int) { var l int _ = l if m.Type != 0 { n += 1 + sovKvrpcpb(uint64(m.Type)) } if m.StartTs != 0 { n += 1 + sovKvrpcpb(uint64(m.StartTs)) } if m.CommitTs != 0 { n += 1 + sovKvrpcpb(uint64(m.CommitTs)) } l = len(m.ShortValue) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *MvccValue) Size() (n int) { var l int _ = l if m.StartTs != 0 { n += 1 + sovKvrpcpb(uint64(m.StartTs)) } l = len(m.Value) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *MvccLock) Size() (n int) { var l int _ = l if m.Type != 0 { n += 1 + sovKvrpcpb(uint64(m.Type)) } if m.StartTs != 0 { n += 1 + sovKvrpcpb(uint64(m.StartTs)) } l = len(m.Primary) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.ShortValue) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *MvccInfo) Size() (n int) { var l int _ = l if m.Lock != nil { l = m.Lock.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Writes) > 0 { for _, e := range m.Writes { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } if len(m.Values) > 0 { for _, e := range m.Values { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *MvccGetByKeyRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Key) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *MvccGetByKeyResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Error) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Info != nil { l = m.Info.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *MvccGetByStartTsRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.StartTs != 0 { n += 1 + sovKvrpcpb(uint64(m.StartTs)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *MvccGetByStartTsResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Error) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Key) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Info != nil { l = m.Info.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *SplitRegionRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.SplitKey) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.SplitKeys) > 0 { for _, b := range m.SplitKeys { l = len(b) n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *SplitRegionResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Left != nil { l = m.Left.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Right != nil { l = m.Right.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Regions) > 0 { for _, e := range m.Regions { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *UnsafeDestroyRangeRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.StartKey) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.EndKey) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *UnsafeDestroyRangeResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } l = len(m.Error) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RegisterLockObserverRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.MaxTs != 0 { n += 1 + sovKvrpcpb(uint64(m.MaxTs)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RegisterLockObserverResponse) Size() (n int) { var l int _ = l l = len(m.Error) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *CheckLockObserverRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.MaxTs != 0 { n += 1 + sovKvrpcpb(uint64(m.MaxTs)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *CheckLockObserverResponse) Size() (n int) { var l int _ = l l = len(m.Error) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.IsClean { n += 2 } if len(m.Locks) > 0 { for _, e := range m.Locks { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RemoveLockObserverRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.MaxTs != 0 { n += 1 + sovKvrpcpb(uint64(m.MaxTs)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *RemoveLockObserverResponse) Size() (n int) { var l int _ = l l = len(m.Error) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *PhysicalScanLockRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.MaxTs != 0 { n += 1 + sovKvrpcpb(uint64(m.MaxTs)) } l = len(m.StartKey) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if m.Limit != 0 { n += 1 + sovKvrpcpb(uint64(m.Limit)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *PhysicalScanLockResponse) Size() (n int) { var l int _ = l l = len(m.Error) if l > 0 { n += 1 + l + sovKvrpcpb(uint64(l)) } if len(m.Locks) > 0 { for _, e := range m.Locks { l = e.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *ReadIndexRequest) Size() (n int) { var l int _ = l if m.Context != nil { l = m.Context.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func (m *ReadIndexResponse) Size() (n int) { var l int _ = l if m.RegionError != nil { l = m.RegionError.Size() n += 1 + l + sovKvrpcpb(uint64(l)) } if m.ReadIndex != 0 { n += 1 + sovKvrpcpb(uint64(m.ReadIndex)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func sovKvrpcpb(x uint64) (n int) { for { n++ x >>= 7 if x == 0 { break } } return n } func sozKvrpcpb(x uint64) (n int) { return sovKvrpcpb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *LockInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: LockInfo: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: LockInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PrimaryLock", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.PrimaryLock = append(m.PrimaryLock[:0], dAtA[iNdEx:postIndex]...) if m.PrimaryLock == nil { m.PrimaryLock = []byte{} } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field LockVersion", wireType) } m.LockVersion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.LockVersion |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) if m.Key == nil { m.Key = []byte{} } iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field LockTtl", wireType) } m.LockTtl = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.LockTtl |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field TxnSize", wireType) } m.TxnSize = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.TxnSize |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field LockType", wireType) } m.LockType = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.LockType |= (Op(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *AlreadyExist) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: AlreadyExist: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: AlreadyExist: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) if m.Key == nil { m.Key = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *KeyError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: KeyError: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: KeyError: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Locked", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Locked == nil { m.Locked = &LockInfo{} } if err := m.Locked.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Retryable", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Retryable = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Abort", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Abort = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Conflict", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Conflict == nil { m.Conflict = &WriteConflict{} } if err := m.Conflict.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field AlreadyExist", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.AlreadyExist == nil { m.AlreadyExist = &AlreadyExist{} } if err := m.AlreadyExist.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Deadlock", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Deadlock == nil { m.Deadlock = &Deadlock{} } if err := m.Deadlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field CommitTsExpired", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.CommitTsExpired == nil { m.CommitTsExpired = &CommitTsExpired{} } if err := m.CommitTsExpired.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TxnNotFound", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.TxnNotFound == nil { m.TxnNotFound = &TxnNotFound{} } if err := m.TxnNotFound.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *WriteConflict) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: WriteConflict: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: WriteConflict: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType) } m.StartTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.StartTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ConflictTs", wireType) } m.ConflictTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.ConflictTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) if m.Key == nil { m.Key = []byte{} } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Primary", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Primary = append(m.Primary[:0], dAtA[iNdEx:postIndex]...) if m.Primary == nil { m.Primary = []byte{} } iNdEx = postIndex case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ConflictCommitTs", wireType) } m.ConflictCommitTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.ConflictCommitTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *Deadlock) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Deadlock: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Deadlock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field LockTs", wireType) } m.LockTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.LockTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field LockKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.LockKey = append(m.LockKey[:0], dAtA[iNdEx:postIndex]...) if m.LockKey == nil { m.LockKey = []byte{} } iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field DeadlockKeyHash", wireType) } m.DeadlockKeyHash = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.DeadlockKeyHash |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *CommitTsExpired) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: CommitTsExpired: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: CommitTsExpired: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType) } m.StartTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.StartTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field AttemptedCommitTs", wireType) } m.AttemptedCommitTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.AttemptedCommitTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) if m.Key == nil { m.Key = []byte{} } iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MinCommitTs", wireType) } m.MinCommitTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.MinCommitTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *TxnNotFound) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: TxnNotFound: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: TxnNotFound: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType) } m.StartTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.StartTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PrimaryKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.PrimaryKey = append(m.PrimaryKey[:0], dAtA[iNdEx:postIndex]...) if m.PrimaryKey == nil { m.PrimaryKey = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *Context) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Context: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Context: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field RegionId", wireType) } m.RegionId = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.RegionId |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionEpoch", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionEpoch == nil { m.RegionEpoch = &metapb.RegionEpoch{} } if err := m.RegionEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Peer == nil { m.Peer = &metapb.Peer{} } if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) } m.Term = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Term |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) } m.Priority = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Priority |= (CommandPri(b) & 0x7F) << shift if b < 0x80 { break } } case 7: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field IsolationLevel", wireType) } m.IsolationLevel = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.IsolationLevel |= (IsolationLevel(b) & 0x7F) << shift if b < 0x80 { break } } case 8: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field NotFillCache", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.NotFillCache = bool(v != 0) case 9: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field SyncLog", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.SyncLog = bool(v != 0) case 10: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field HandleTime", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.HandleTime = bool(v != 0) case 11: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ScanDetail", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.ScanDetail = bool(v != 0) case 12: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ReplicaRead", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.ReplicaRead = bool(v != 0) case 13: if wireType == 0 { var v uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } m.ResolvedLocks = append(m.ResolvedLocks, v) } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ packedLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if packedLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + packedLen if postIndex > l { return io.ErrUnexpectedEOF } for iNdEx < postIndex { var v uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } m.ResolvedLocks = append(m.ResolvedLocks, v) } } else { return fmt.Errorf("proto: wrong wireType = %d for field ResolvedLocks", wireType) } case 14: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MaxExecutionDurationMs", wireType) } m.MaxExecutionDurationMs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.MaxExecutionDurationMs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 15: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field AppliedIndex", wireType) } m.AppliedIndex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.AppliedIndex |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *HandleTime) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: HandleTime: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: HandleTime: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field WaitMs", wireType) } m.WaitMs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.WaitMs |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ProcessMs", wireType) } m.ProcessMs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.ProcessMs |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ScanInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ScanInfo: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ScanInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) } m.Total = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Total |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Processed", wireType) } m.Processed = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Processed |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ScanDetail) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ScanDetail: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ScanDetail: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Write", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Write == nil { m.Write = &ScanInfo{} } if err := m.Write.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Lock", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Lock == nil { m.Lock = &ScanInfo{} } if err := m.Lock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Data == nil { m.Data = &ScanInfo{} } if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ExecDetails) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ExecDetails: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ExecDetails: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field HandleTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.HandleTime == nil { m.HandleTime = &HandleTime{} } if err := m.HandleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ScanDetail", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.ScanDetail == nil { m.ScanDetail = &ScanDetail{} } if err := m.ScanDetail.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) if m.Key == nil { m.Key = []byte{} } iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } m.Version = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Version |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Error == nil { m.Error = &KeyError{} } if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) if m.Value == nil { m.Value = []byte{} } iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field NotFound", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.NotFound = bool(v != 0) default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ScanRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ScanRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ScanRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...) if m.StartKey == nil { m.StartKey = []byte{} } iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) } m.Limit = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Limit |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } m.Version = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Version |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field KeyOnly", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.KeyOnly = bool(v != 0) case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Reverse", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.Reverse = bool(v != 0) case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...) if m.EndKey == nil { m.EndKey = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *KvPair) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: KvPair: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: KvPair: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Error == nil { m.Error = &KeyError{} } if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) if m.Key == nil { m.Key = []byte{} } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) if m.Value == nil { m.Value = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ScanResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ScanResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ScanResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Pairs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Pairs = append(m.Pairs, &KvPair{}) if err := m.Pairs[len(m.Pairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *Mutation) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Mutation: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Mutation: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) } m.Op = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Op |= (Op(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) if m.Key == nil { m.Key = []byte{} } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) if m.Value == nil { m.Value = []byte{} } iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Assertion", wireType) } m.Assertion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Assertion |= (Assertion(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *PrewriteRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: PrewriteRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: PrewriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Mutations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Mutations = append(m.Mutations, &Mutation{}) if err := m.Mutations[len(m.Mutations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PrimaryLock", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.PrimaryLock = append(m.PrimaryLock[:0], dAtA[iNdEx:postIndex]...) if m.PrimaryLock == nil { m.PrimaryLock = []byte{} } iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType) } m.StartVersion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.StartVersion |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field LockTtl", wireType) } m.LockTtl = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.LockTtl |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field SkipConstraintCheck", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.SkipConstraintCheck = bool(v != 0) case 7: if wireType == 0 { var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.IsPessimisticLock = append(m.IsPessimisticLock, bool(v != 0)) } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ packedLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if packedLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + packedLen if postIndex > l { return io.ErrUnexpectedEOF } for iNdEx < postIndex { var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.IsPessimisticLock = append(m.IsPessimisticLock, bool(v != 0)) } } else { return fmt.Errorf("proto: wrong wireType = %d for field IsPessimisticLock", wireType) } case 8: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field TxnSize", wireType) } m.TxnSize = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.TxnSize |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 9: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ForUpdateTs", wireType) } m.ForUpdateTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.ForUpdateTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 10: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MinCommitTs", wireType) } m.MinCommitTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.MinCommitTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *PrewriteResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: PrewriteResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: PrewriteResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Errors = append(m.Errors, &KeyError{}) if err := m.Errors[len(m.Errors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *TxnHeartBeatRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: TxnHeartBeatRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: TxnHeartBeatRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PrimaryLock", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.PrimaryLock = append(m.PrimaryLock[:0], dAtA[iNdEx:postIndex]...) if m.PrimaryLock == nil { m.PrimaryLock = []byte{} } iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType) } m.StartVersion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.StartVersion |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field AdviseLockTtl", wireType) } m.AdviseLockTtl = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.AdviseLockTtl |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *TxnHeartBeatResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: TxnHeartBeatResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: TxnHeartBeatResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Error == nil { m.Error = &KeyError{} } if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field LockTtl", wireType) } m.LockTtl = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.LockTtl |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *PessimisticLockRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: PessimisticLockRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: PessimisticLockRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Mutations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Mutations = append(m.Mutations, &Mutation{}) if err := m.Mutations[len(m.Mutations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PrimaryLock", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.PrimaryLock = append(m.PrimaryLock[:0], dAtA[iNdEx:postIndex]...) if m.PrimaryLock == nil { m.PrimaryLock = []byte{} } iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType) } m.StartVersion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.StartVersion |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field LockTtl", wireType) } m.LockTtl = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.LockTtl |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ForUpdateTs", wireType) } m.ForUpdateTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.ForUpdateTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 7: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field IsFirstLock", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.IsFirstLock = bool(v != 0) case 8: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field WaitTimeout", wireType) } m.WaitTimeout = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.WaitTimeout |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } case 9: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.Force = bool(v != 0) case 10: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ReturnValues", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.ReturnValues = bool(v != 0) default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *PessimisticLockResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: PessimisticLockResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: PessimisticLockResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Errors = append(m.Errors, &KeyError{}) if err := m.Errors[len(m.Errors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field CommitTs", wireType) } m.CommitTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.CommitTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) if m.Value == nil { m.Value = []byte{} } iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Values = append(m.Values, make([]byte, postIndex-iNdEx)) copy(m.Values[len(m.Values)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *PessimisticRollbackRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: PessimisticRollbackRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: PessimisticRollbackRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType) } m.StartVersion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.StartVersion |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ForUpdateTs", wireType) } m.ForUpdateTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.ForUpdateTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx)) copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *PessimisticRollbackResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: PessimisticRollbackResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: PessimisticRollbackResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Errors = append(m.Errors, &KeyError{}) if err := m.Errors[len(m.Errors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *CommitRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: CommitRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: CommitRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType) } m.StartVersion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.StartVersion |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx)) copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field CommitVersion", wireType) } m.CommitVersion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.CommitVersion |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *CommitResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: CommitResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: CommitResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Error == nil { m.Error = &KeyError{} } if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field CommitVersion", wireType) } m.CommitVersion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.CommitVersion |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ImportRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ImportRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ImportRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Mutations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Mutations = append(m.Mutations, &Mutation{}) if err := m.Mutations[len(m.Mutations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field CommitVersion", wireType) } m.CommitVersion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.CommitVersion |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ImportResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ImportResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ImportResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *BatchRollbackRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: BatchRollbackRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: BatchRollbackRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType) } m.StartVersion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.StartVersion |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx)) copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *BatchRollbackResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: BatchRollbackResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: BatchRollbackResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Error == nil { m.Error = &KeyError{} } if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *CheckTxnStatusRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: CheckTxnStatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: CheckTxnStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PrimaryKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.PrimaryKey = append(m.PrimaryKey[:0], dAtA[iNdEx:postIndex]...) if m.PrimaryKey == nil { m.PrimaryKey = []byte{} } iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field LockTs", wireType) } m.LockTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.LockTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field CallerStartTs", wireType) } m.CallerStartTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.CallerStartTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field CurrentTs", wireType) } m.CurrentTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.CurrentTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field RollbackIfNotExist", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.RollbackIfNotExist = bool(v != 0) default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *CheckTxnStatusResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: CheckTxnStatusResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: CheckTxnStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Error == nil { m.Error = &KeyError{} } if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field LockTtl", wireType) } m.LockTtl = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.LockTtl |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field CommitVersion", wireType) } m.CommitVersion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.CommitVersion |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) } m.Action = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Action |= (Action(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *CleanupRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: CleanupRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: CleanupRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) if m.Key == nil { m.Key = []byte{} } iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType) } m.StartVersion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.StartVersion |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field CurrentTs", wireType) } m.CurrentTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.CurrentTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *CleanupResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: CleanupResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: CleanupResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Error == nil { m.Error = &KeyError{} } if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field CommitVersion", wireType) } m.CommitVersion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.CommitVersion |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *BatchGetRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: BatchGetRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: BatchGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx)) copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } m.Version = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Version |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *BatchGetResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: BatchGetResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: BatchGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Pairs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Pairs = append(m.Pairs, &KvPair{}) if err := m.Pairs[len(m.Pairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ScanLockRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ScanLockRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ScanLockRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MaxVersion", wireType) } m.MaxVersion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.MaxVersion |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...) if m.StartKey == nil { m.StartKey = []byte{} } iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) } m.Limit = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Limit |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ScanLockResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ScanLockResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ScanLockResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Error == nil { m.Error = &KeyError{} } if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Locks", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Locks = append(m.Locks, &LockInfo{}) if err := m.Locks[len(m.Locks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *TxnInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: TxnInfo: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: TxnInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType) } m.Txn = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Txn |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } m.Status = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Status |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ResolveLockRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ResolveLockRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ResolveLockRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StartVersion", wireType) } m.StartVersion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.StartVersion |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field CommitVersion", wireType) } m.CommitVersion = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.CommitVersion |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TxnInfos", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.TxnInfos = append(m.TxnInfos, &TxnInfo{}) if err := m.TxnInfos[len(m.TxnInfos)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx)) copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ResolveLockResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ResolveLockResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ResolveLockResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Error == nil { m.Error = &KeyError{} } if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GCRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GCRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GCRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field SafePoint", wireType) } m.SafePoint = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.SafePoint |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GCResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GCResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GCResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Error == nil { m.Error = &KeyError{} } if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RawGetRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RawGetRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RawGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) if m.Key == nil { m.Key = []byte{} } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Cf = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RawGetResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RawGetResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RawGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) if m.Value == nil { m.Value = []byte{} } iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field NotFound", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.NotFound = bool(v != 0) default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RawPutRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RawPutRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RawPutRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) if m.Key == nil { m.Key = []byte{} } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) if m.Value == nil { m.Value = []byte{} } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Cf = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RawPutResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RawPutResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RawPutResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RawBatchPutRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RawBatchPutRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RawBatchPutRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Pairs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Pairs = append(m.Pairs, &KvPair{}) if err := m.Pairs[len(m.Pairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Cf = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RawBatchPutResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RawBatchPutResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RawBatchPutResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RawBatchGetRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RawBatchGetRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RawBatchGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx)) copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Cf = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RawBatchGetResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RawBatchGetResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RawBatchGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Pairs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Pairs = append(m.Pairs, &KvPair{}) if err := m.Pairs[len(m.Pairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RawDeleteRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RawDeleteRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RawDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) if m.Key == nil { m.Key = []byte{} } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Cf = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RawDeleteResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RawDeleteResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RawDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RawBatchDeleteRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RawBatchDeleteRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RawBatchDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx)) copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Cf = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RawBatchDeleteResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RawBatchDeleteResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RawBatchDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...) if m.StartKey == nil { m.StartKey = []byte{} } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...) if m.EndKey == nil { m.EndKey = []byte{} } iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field NotifyOnly", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.NotifyOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RawDeleteRangeRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RawDeleteRangeRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RawDeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...) if m.StartKey == nil { m.StartKey = []byte{} } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...) if m.EndKey == nil { m.EndKey = []byte{} } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Cf = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RawDeleteRangeResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RawDeleteRangeResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RawDeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RawScanRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RawScanRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RawScanRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...) if m.StartKey == nil { m.StartKey = []byte{} } iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) } m.Limit = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Limit |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field KeyOnly", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.KeyOnly = bool(v != 0) case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Cf = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Reverse", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.Reverse = bool(v != 0) case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...) if m.EndKey == nil { m.EndKey = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RawScanResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RawScanResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RawScanResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Kvs = append(m.Kvs, &KvPair{}) if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *KeyRange) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: KeyRange: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: KeyRange: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...) if m.StartKey == nil { m.StartKey = []byte{} } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...) if m.EndKey == nil { m.EndKey = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RawBatchScanRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RawBatchScanRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RawBatchScanRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Ranges = append(m.Ranges, &KeyRange{}) if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field EachLimit", wireType) } m.EachLimit = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.EachLimit |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field KeyOnly", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.KeyOnly = bool(v != 0) case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Cf", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Cf = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Reverse", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.Reverse = bool(v != 0) default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RawBatchScanResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RawBatchScanResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RawBatchScanResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Kvs = append(m.Kvs, &KvPair{}) if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MvccWrite) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MvccWrite: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MvccWrite: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Type |= (Op(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType) } m.StartTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.StartTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field CommitTs", wireType) } m.CommitTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.CommitTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ShortValue", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.ShortValue = append(m.ShortValue[:0], dAtA[iNdEx:postIndex]...) if m.ShortValue == nil { m.ShortValue = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MvccValue) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MvccValue: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MvccValue: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType) } m.StartTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.StartTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) if m.Value == nil { m.Value = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MvccLock) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MvccLock: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MvccLock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Type |= (Op(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType) } m.StartTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.StartTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Primary", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Primary = append(m.Primary[:0], dAtA[iNdEx:postIndex]...) if m.Primary == nil { m.Primary = []byte{} } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ShortValue", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.ShortValue = append(m.ShortValue[:0], dAtA[iNdEx:postIndex]...) if m.ShortValue == nil { m.ShortValue = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MvccInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MvccInfo: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MvccInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Lock", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Lock == nil { m.Lock = &MvccLock{} } if err := m.Lock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Writes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Writes = append(m.Writes, &MvccWrite{}) if err := m.Writes[len(m.Writes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Values = append(m.Values, &MvccValue{}) if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MvccGetByKeyRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MvccGetByKeyRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MvccGetByKeyRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) if m.Key == nil { m.Key = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MvccGetByKeyResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MvccGetByKeyResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MvccGetByKeyResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Info == nil { m.Info = &MvccInfo{} } if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MvccGetByStartTsRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MvccGetByStartTsRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MvccGetByStartTsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType) } m.StartTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.StartTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MvccGetByStartTsResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MvccGetByStartTsResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MvccGetByStartTsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) if m.Key == nil { m.Key = []byte{} } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Info == nil { m.Info = &MvccInfo{} } if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *SplitRegionRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: SplitRegionRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: SplitRegionRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SplitKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.SplitKey = append(m.SplitKey[:0], dAtA[iNdEx:postIndex]...) if m.SplitKey == nil { m.SplitKey = []byte{} } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SplitKeys", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.SplitKeys = append(m.SplitKeys, make([]byte, postIndex-iNdEx)) copy(m.SplitKeys[len(m.SplitKeys)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *SplitRegionResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: SplitRegionResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: SplitRegionResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Left", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Left == nil { m.Left = &metapb.Region{} } if err := m.Left.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Right", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Right == nil { m.Right = &metapb.Region{} } if err := m.Right.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Regions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Regions = append(m.Regions, &metapb.Region{}) if err := m.Regions[len(m.Regions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *UnsafeDestroyRangeRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: UnsafeDestroyRangeRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: UnsafeDestroyRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...) if m.StartKey == nil { m.StartKey = []byte{} } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field EndKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.EndKey = append(m.EndKey[:0], dAtA[iNdEx:postIndex]...) if m.EndKey == nil { m.EndKey = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *UnsafeDestroyRangeResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: UnsafeDestroyRangeResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: UnsafeDestroyRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RegisterLockObserverRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RegisterLockObserverRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RegisterLockObserverRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MaxTs", wireType) } m.MaxTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.MaxTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RegisterLockObserverResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RegisterLockObserverResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RegisterLockObserverResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *CheckLockObserverRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: CheckLockObserverRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: CheckLockObserverRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MaxTs", wireType) } m.MaxTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.MaxTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *CheckLockObserverResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: CheckLockObserverResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: CheckLockObserverResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field IsClean", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } m.IsClean = bool(v != 0) case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Locks", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Locks = append(m.Locks, &LockInfo{}) if err := m.Locks[len(m.Locks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RemoveLockObserverRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RemoveLockObserverRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RemoveLockObserverRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MaxTs", wireType) } m.MaxTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.MaxTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RemoveLockObserverResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RemoveLockObserverResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RemoveLockObserverResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *PhysicalScanLockRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: PhysicalScanLockRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: PhysicalScanLockRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MaxTs", wireType) } m.MaxTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.MaxTs |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.StartKey = append(m.StartKey[:0], dAtA[iNdEx:postIndex]...) if m.StartKey == nil { m.StartKey = []byte{} } iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) } m.Limit = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Limit |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *PhysicalScanLockResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: PhysicalScanLockResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: PhysicalScanLockResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Locks", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } m.Locks = append(m.Locks, &LockInfo{}) if err := m.Locks[len(m.Locks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ReadIndexRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ReadIndexRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ReadIndexRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Context == nil { m.Context = &Context{} } if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ReadIndexResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ReadIndexResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ReadIndexResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RegionError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthKvrpcpb } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.RegionError == nil { m.RegionError = &errorpb.Error{} } if err := m.RegionError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ReadIndex", wireType) } m.ReadIndex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowKvrpcpb } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.ReadIndex |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipKvrpcpb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthKvrpcpb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipKvrpcpb(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowKvrpcpb } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowKvrpcpb } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } return iNdEx, nil case 1: iNdEx += 8 return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowKvrpcpb } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } iNdEx += length if length < 0 { return 0, ErrInvalidLengthKvrpcpb } return iNdEx, nil case 3: for { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowKvrpcpb } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } innerWireType := int(innerWire & 0x7) if innerWireType == 4 { break } next, err := skipKvrpcpb(dAtA[start:]) if err != nil { return 0, err } iNdEx = start + next } return iNdEx, nil case 4: return iNdEx, nil case 5: iNdEx += 4 return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } } panic("unreachable") } var ( ErrInvalidLengthKvrpcpb = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowKvrpcpb = fmt.Errorf("proto: integer overflow") ) func init() { proto.RegisterFile("kvrpcpb.proto", fileDescriptor_kvrpcpb_4b59c467e5a7b880) } var fileDescriptor_kvrpcpb_4b59c467e5a7b880 = []byte{ // 3144 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x3b, 0x5d, 0x6f, 0x1b, 0xc7, 0xb5, 0x5e, 0x7e, 0xf3, 0x90, 0x14, 0xa9, 0x91, 0x64, 0xd3, 0x76, 0x6c, 0x2b, 0x9b, 0x6b, 0x5b, 0xd1, 0xbd, 0x51, 0x6e, 0x94, 0xe0, 0xe2, 0xb6, 0x28, 0x82, 0xc4, 0x92, 0x13, 0x2b, 0xfe, 0x12, 0x56, 0x8c, 0x0b, 0x03, 0x6d, 0x36, 0xe3, 0xdd, 0xa1, 0xb8, 0xd5, 0x72, 0x77, 0xb3, 0x33, 0x94, 0xc9, 0x04, 0x05, 0x5a, 0x14, 0x2d, 0x1a, 0xa0, 0x45, 0xd1, 0xb4, 0x40, 0xf2, 0x90, 0xd7, 0x02, 0xed, 0x63, 0xff, 0x42, 0x50, 0xa0, 0x7d, 0x29, 0x9a, 0x87, 0x3e, 0xf4, 0xad, 0x45, 0xfa, 0x23, 0xfa, 0x56, 0x14, 0x33, 0xb3, 0xb3, 0x1f, 0x24, 0x2d, 0x0b, 0x0c, 0xa5, 0x14, 0x7d, 0xe2, 0xce, 0x39, 0x67, 0x66, 0xce, 0xd7, 0x9c, 0x39, 0x73, 0x66, 0x08, 0x8d, 0x83, 0xc3, 0x30, 0xb0, 0x82, 0x47, 0x1b, 0x41, 0xe8, 0x33, 0x1f, 0x95, 0xa3, 0xe6, 0x85, 0x7a, 0x9f, 0x30, 0xac, 0xc0, 0x17, 0x1a, 0x24, 0x0c, 0xfd, 0x30, 0x6e, 0x2e, 0xef, 0xfb, 0xfb, 0xbe, 0xf8, 0x7c, 0x91, 0x7f, 0x45, 0xd0, 0x66, 0x38, 0xa0, 0x4c, 0x7c, 0x4a, 0x80, 0xfe, 0x99, 0x06, 0x95, 0x3b, 0xbe, 0x75, 0xb0, 0xe3, 0x75, 0x7d, 0xf4, 0x2c, 0xd4, 0x83, 0xd0, 0xe9, 0xe3, 0x70, 0x64, 0xba, 0xbe, 0x75, 0xd0, 0xd6, 0x56, 0xb5, 0xb5, 0xba, 0x51, 0x8b, 0x60, 0x9c, 0x8c, 0x93, 0x70, 0x94, 0x79, 0x48, 0x42, 0xea, 0xf8, 0x5e, 0x3b, 0xb7, 0xaa, 0xad, 0x15, 0x8c, 0x1a, 0x87, 0x3d, 0x90, 0x20, 0xd4, 0x82, 0xfc, 0x01, 0x19, 0xb5, 0xf3, 0xa2, 0x33, 0xff, 0x44, 0xe7, 0xa1, 0x22, 0x3a, 0x31, 0xe6, 0xb6, 0x0b, 0xa2, 0x43, 0x99, 0xb7, 0x3b, 0xcc, 0xe5, 0x28, 0x36, 0xf4, 0x4c, 0xea, 0xbc, 0x4f, 0xda, 0x45, 0x89, 0x62, 0x43, 0x6f, 0xcf, 0x79, 0x9f, 0xa0, 0x35, 0xa8, 0xca, 0x5e, 0xa3, 0x80, 0xb4, 0x4b, 0xab, 0xda, 0xda, 0xc2, 0x66, 0x6d, 0x43, 0xa9, 0xe2, 0x7e, 0x60, 0x88, 0x31, 0x3b, 0xa3, 0x80, 0xe8, 0xab, 0x50, 0x7f, 0xdd, 0x0d, 0x09, 0xb6, 0x47, 0x37, 0x87, 0x0e, 0x65, 0x8a, 0x03, 0x2d, 0xe6, 0x40, 0xff, 0x71, 0x1e, 0x2a, 0xb7, 0xc9, 0xe8, 0x26, 0x57, 0x11, 0x7a, 0x1e, 0x4a, 0xbc, 0x2b, 0xb1, 0x05, 0x45, 0x6d, 0x73, 0x31, 0x1e, 0x55, 0x69, 0xc2, 0x88, 0x08, 0xd0, 0x33, 0x50, 0x0d, 0x09, 0x0b, 0x47, 0xf8, 0x91, 0x4b, 0x84, 0xac, 0x55, 0x23, 0x01, 0xa0, 0x65, 0x28, 0xe2, 0x47, 0x7e, 0xc8, 0x84, 0xac, 0x55, 0x43, 0x36, 0xd0, 0x26, 0x54, 0x2c, 0xdf, 0xeb, 0xba, 0x8e, 0xc5, 0x84, 0xb4, 0xb5, 0xcd, 0xb3, 0xf1, 0x04, 0xdf, 0x0c, 0x1d, 0x46, 0xb6, 0x22, 0xac, 0x11, 0xd3, 0xa1, 0xaf, 0x43, 0x03, 0x4b, 0x09, 0x4c, 0xc2, 0x45, 0x10, 0xba, 0xa8, 0x6d, 0xae, 0xc4, 0x1d, 0xd3, 0xf2, 0x19, 0x75, 0x9c, 0x96, 0xf6, 0x05, 0xa8, 0xd8, 0x04, 0xdb, 0xc2, 0x62, 0xa5, 0x31, 0x81, 0xb6, 0x23, 0x84, 0x11, 0x93, 0xa0, 0x6d, 0x58, 0xb4, 0xfc, 0x7e, 0xdf, 0x61, 0x26, 0xa3, 0x26, 0x19, 0x06, 0x4e, 0x48, 0xec, 0x76, 0x59, 0xf4, 0x6b, 0xc7, 0xfd, 0xb6, 0x04, 0x45, 0x87, 0xde, 0x94, 0x78, 0xa3, 0x69, 0x65, 0x01, 0xe8, 0xff, 0xa1, 0xc1, 0xed, 0xe6, 0xf9, 0xcc, 0xec, 0xfa, 0x03, 0xcf, 0x6e, 0x57, 0xc4, 0x08, 0xcb, 0xf1, 0x08, 0x9d, 0xa1, 0x77, 0xcf, 0x67, 0x6f, 0x70, 0x9c, 0x51, 0x63, 0x49, 0x43, 0xff, 0x95, 0x06, 0x8d, 0x8c, 0x1a, 0xb8, 0x0f, 0x50, 0x86, 0x43, 0xce, 0x90, 0xb0, 0x48, 0xc1, 0x28, 0x8b, 0x76, 0x87, 0xa2, 0x2b, 0x50, 0x53, 0x3a, 0xe2, 0x58, 0xe9, 0x6d, 0xa0, 0x40, 0x1d, 0x3a, 0xc5, 0xd9, 0xda, 0x50, 0x8e, 0x1c, 0x56, 0x68, 0xbf, 0x6e, 0xa8, 0x26, 0xfa, 0x1f, 0x40, 0xf1, 0x60, 0xb1, 0x0a, 0x22, 0xaf, 0x6b, 0x29, 0x8c, 0x92, 0x5c, 0xff, 0x0e, 0x54, 0x94, 0xf6, 0xd0, 0x39, 0x28, 0x4b, 0x57, 0x54, 0x0c, 0x0a, 0xff, 0xe8, 0xd0, 0xd8, 0xb3, 0x39, 0x0f, 0x39, 0x39, 0x1b, 0x6f, 0xdf, 0x26, 0x23, 0xb4, 0x0e, 0x8b, 0x4a, 0xe7, 0x1c, 0x6d, 0xf6, 0x30, 0xed, 0x09, 0x3e, 0x0b, 0x46, 0x53, 0x21, 0x6e, 0x93, 0xd1, 0x2d, 0x4c, 0x7b, 0xfa, 0x47, 0x1a, 0x34, 0xc7, 0x54, 0x7e, 0x94, 0x56, 0x36, 0x60, 0x09, 0x33, 0x46, 0xfa, 0x01, 0x23, 0x76, 0x4a, 0x12, 0xa9, 0x9d, 0xc5, 0x18, 0xa5, 0x46, 0x9c, 0xa2, 0x24, 0x1d, 0x1a, 0x7d, 0xc7, 0x4b, 0xf5, 0x95, 0xcb, 0xb2, 0xd6, 0x77, 0xbc, 0x58, 0x01, 0x3b, 0x50, 0x4b, 0x19, 0xf1, 0x29, 0x56, 0x52, 0x71, 0x23, 0x51, 0x04, 0x44, 0xa0, 0xdb, 0x64, 0xa4, 0xff, 0xbe, 0x00, 0xe5, 0x2d, 0xdf, 0x63, 0x64, 0xc8, 0xd0, 0x45, 0xbe, 0xa4, 0xf6, 0x1d, 0xdf, 0x33, 0x1d, 0x3b, 0x1a, 0xa8, 0x22, 0x01, 0x3b, 0x36, 0xfa, 0x3f, 0xa8, 0x47, 0x48, 0x12, 0xf8, 0x56, 0x4f, 0x0c, 0x55, 0xdb, 0x5c, 0xda, 0x88, 0x22, 0x9d, 0x21, 0x70, 0x37, 0x39, 0xca, 0xa8, 0x85, 0x49, 0x03, 0xad, 0x42, 0x21, 0x20, 0x24, 0x14, 0x22, 0xd6, 0x36, 0xeb, 0x8a, 0x7e, 0x97, 0x90, 0xd0, 0x10, 0x18, 0x84, 0xa0, 0xc0, 0x48, 0xd8, 0x8f, 0xcc, 0x2d, 0xbe, 0xd1, 0x8b, 0x50, 0x09, 0x42, 0xc7, 0x0f, 0x1d, 0x36, 0x8a, 0x02, 0xcc, 0x52, 0x66, 0x05, 0x60, 0xcf, 0xde, 0x0d, 0x1d, 0x23, 0x26, 0x42, 0xaf, 0x41, 0xd3, 0xa1, 0xbe, 0x8b, 0x19, 0xe7, 0xd0, 0x25, 0x87, 0xc4, 0x15, 0x2b, 0x67, 0x61, 0xf3, 0x5c, 0xdc, 0x6f, 0x47, 0xe1, 0xef, 0x70, 0xb4, 0xb1, 0xe0, 0x64, 0xda, 0xe8, 0xbf, 0x60, 0x41, 0xac, 0x19, 0xc7, 0x75, 0x4d, 0x0b, 0x5b, 0x3d, 0x22, 0x16, 0x4e, 0xc5, 0xa8, 0x7b, 0x3e, 0x7b, 0xc3, 0x71, 0xdd, 0x2d, 0x0e, 0x13, 0xba, 0x1e, 0x79, 0x96, 0xe9, 0xfa, 0xfb, 0xed, 0xaa, 0xc0, 0x97, 0x79, 0xfb, 0x8e, 0xbf, 0xcf, 0x75, 0xdd, 0xc3, 0x9e, 0xed, 0x12, 0x93, 0x39, 0x7d, 0xd2, 0x06, 0x81, 0x05, 0x09, 0xea, 0x38, 0x7d, 0xc2, 0x09, 0xa8, 0x85, 0x3d, 0xd3, 0x26, 0x0c, 0x3b, 0x6e, 0xbb, 0x26, 0x09, 0x38, 0x68, 0x5b, 0x40, 0x78, 0x08, 0x0f, 0x49, 0xe0, 0x3a, 0x16, 0x36, 0x79, 0x14, 0x69, 0xd7, 0x05, 0x45, 0x2d, 0x82, 0x19, 0x04, 0xdb, 0xe8, 0x2a, 0x2c, 0x84, 0x84, 0xfa, 0xee, 0x21, 0xb1, 0xc5, 0x4e, 0x40, 0xdb, 0x8d, 0xd5, 0xfc, 0x5a, 0xc1, 0x68, 0x28, 0x28, 0x0f, 0x94, 0x14, 0x7d, 0x0d, 0xce, 0xf7, 0xf1, 0xd0, 0x24, 0x43, 0x62, 0x0d, 0x84, 0x4a, 0xec, 0x41, 0x28, 0x75, 0xd3, 0xa7, 0xed, 0x05, 0xa1, 0xe8, 0xb3, 0x7d, 0x3c, 0xbc, 0xa9, 0xf0, 0xdb, 0x11, 0xfa, 0x2e, 0x45, 0xcf, 0x41, 0x03, 0x07, 0x81, 0xeb, 0x10, 0xdb, 0x74, 0x3c, 0x9b, 0x0c, 0xdb, 0x4d, 0x41, 0x5e, 0x8f, 0x80, 0x3b, 0x1c, 0xf6, 0x56, 0xa1, 0x52, 0x68, 0x15, 0x39, 0x67, 0xd8, 0x36, 0xdf, 0x1b, 0xf8, 0xe1, 0xa0, 0xaf, 0x6f, 0x03, 0xdc, 0x4a, 0x64, 0x3d, 0x07, 0xe5, 0xc7, 0xd8, 0x61, 0x7c, 0x3a, 0xee, 0x49, 0x79, 0xa3, 0xc4, 0x9b, 0x77, 0x29, 0xba, 0x04, 0x10, 0x84, 0xbe, 0x45, 0x28, 0xe5, 0xb8, 0x9c, 0xc0, 0x55, 0x23, 0xc8, 0x5d, 0xaa, 0xbf, 0x0a, 0x95, 0x3d, 0x0b, 0x7b, 0x62, 0xd3, 0x5b, 0x86, 0x22, 0xf3, 0x19, 0x76, 0xa3, 0x11, 0x64, 0x83, 0x07, 0xfe, 0x88, 0x9c, 0xd8, 0x63, 0xfd, 0x89, 0xad, 0xff, 0x40, 0x03, 0xd8, 0x4b, 0x34, 0x7a, 0x1d, 0x8a, 0x8f, 0x79, 0x44, 0x9b, 0xd8, 0x4f, 0xd4, 0x24, 0x86, 0xc4, 0xa3, 0xab, 0x50, 0x10, 0x61, 0x3a, 0xf7, 0x24, 0x3a, 0x81, 0xe6, 0x64, 0x36, 0x66, 0x38, 0xf2, 0xe6, 0x69, 0x64, 0x1c, 0xad, 0x8f, 0xa0, 0xc6, 0x55, 0x2b, 0x99, 0xa0, 0xe8, 0x95, 0xac, 0x67, 0x68, 0xd1, 0xd2, 0x51, 0x9d, 0x13, 0xb5, 0x65, 0xdc, 0xe5, 0x95, 0xac, 0xbb, 0xe4, 0xc6, 0x7a, 0x25, 0x52, 0xa6, 0x7d, 0x48, 0xb7, 0x01, 0xde, 0x24, 0xcc, 0x20, 0xef, 0x0d, 0x08, 0x65, 0x68, 0x1d, 0xca, 0x96, 0x5c, 0xdd, 0xd1, 0xac, 0xad, 0xd4, 0x32, 0x12, 0x70, 0x43, 0x11, 0xa8, 0x58, 0x94, 0xcb, 0x04, 0x6c, 0x95, 0x4d, 0xc8, 0xf0, 0xa8, 0x9a, 0xfa, 0xa7, 0x1a, 0xd4, 0xc4, 0x34, 0x34, 0xf0, 0x3d, 0x4a, 0xd0, 0x4b, 0x49, 0x74, 0xe0, 0x1b, 0x79, 0x34, 0xd9, 0xc2, 0x86, 0xca, 0x7c, 0xc4, 0xf6, 0x1e, 0x07, 0x06, 0xb1, 0xd7, 0x5f, 0x87, 0xa2, 0xa4, 0x1d, 0x57, 0xb9, 0xca, 0x06, 0x0c, 0x89, 0xe7, 0x6e, 0x70, 0x88, 0xdd, 0x01, 0x89, 0xa2, 0xa4, 0x6c, 0xf0, 0x60, 0x95, 0x6c, 0x71, 0x05, 0xb1, 0x50, 0x2a, 0x9e, 0xda, 0xc9, 0xfe, 0xac, 0x41, 0x8d, 0xeb, 0x67, 0x16, 0x35, 0x5c, 0x84, 0xaa, 0x8c, 0xa6, 0x89, 0x32, 0x64, 0x78, 0xe5, 0x5b, 0xc7, 0x32, 0x14, 0x5d, 0xa7, 0xef, 0xc8, 0xbc, 0xa2, 0x61, 0xc8, 0x46, 0x5a, 0x4f, 0x85, 0x8c, 0x9e, 0x78, 0xb8, 0xe0, 0x3b, 0x8c, 0xef, 0xb9, 0x23, 0x11, 0xdf, 0x2a, 0x46, 0xf9, 0x80, 0x8c, 0xee, 0x7b, 0xae, 0x50, 0x6e, 0x48, 0x38, 0x9d, 0x4c, 0xa1, 0x2a, 0x86, 0x6a, 0xf2, 0xb5, 0x43, 0x3c, 0x5b, 0xcc, 0x5f, 0x16, 0xf3, 0x97, 0x88, 0x67, 0xf3, 0x60, 0xfd, 0x10, 0x4a, 0xb7, 0x0f, 0x77, 0xb1, 0x93, 0x52, 0x9e, 0xf6, 0x14, 0xe5, 0x4d, 0x1a, 0x75, 0xaa, 0x3a, 0xf5, 0x1e, 0xd4, 0xa5, 0xc2, 0x66, 0x37, 0xe8, 0x55, 0x28, 0x06, 0xd8, 0x09, 0xf9, 0xa2, 0xce, 0xaf, 0xd5, 0x36, 0x9b, 0x09, 0x4f, 0x82, 0x67, 0x43, 0x62, 0xf5, 0xef, 0x6b, 0x50, 0xb9, 0x3b, 0x60, 0x22, 0xdc, 0xa0, 0x8b, 0x90, 0xf3, 0x03, 0x31, 0xf8, 0x58, 0x0a, 0x99, 0xf3, 0x83, 0xe3, 0xf2, 0x8e, 0xfe, 0x17, 0xaa, 0x98, 0x52, 0x12, 0x32, 0x65, 0x80, 0x85, 0x4d, 0x94, 0xa4, 0x67, 0x0a, 0x63, 0x24, 0x44, 0xfa, 0x27, 0x79, 0x68, 0xee, 0x86, 0x44, 0x2c, 0xfd, 0x59, 0x7c, 0xe4, 0x45, 0xa8, 0xf6, 0x23, 0x11, 0x94, 0xb8, 0x89, 0x09, 0x94, 0x70, 0x46, 0x42, 0x33, 0x91, 0xbf, 0xe7, 0x27, 0xf3, 0xf7, 0xe7, 0xa0, 0x21, 0xfd, 0x2e, 0xeb, 0x4a, 0x75, 0x01, 0x7c, 0x90, 0xf8, 0x53, 0x9c, 0xaf, 0x17, 0xb3, 0xf9, 0xfa, 0x26, 0xac, 0xd0, 0x03, 0x27, 0x30, 0x2d, 0xdf, 0xa3, 0x2c, 0xc4, 0x8e, 0xc7, 0x4c, 0xab, 0x47, 0xa2, 0xcc, 0xb3, 0x62, 0x2c, 0x71, 0xe4, 0x56, 0x8c, 0xdb, 0xe2, 0x28, 0x9e, 0xae, 0x38, 0xd4, 0x0c, 0x08, 0xa5, 0x4e, 0xdf, 0xa1, 0xcc, 0xb1, 0x24, 0x77, 0xe5, 0xd5, 0xfc, 0x5a, 0xc5, 0x58, 0x74, 0xe8, 0x6e, 0x82, 0x11, 0x3c, 0xa6, 0xcf, 0x04, 0x95, 0xec, 0x99, 0x40, 0x87, 0x46, 0xd7, 0x0f, 0xcd, 0x41, 0x60, 0x63, 0x46, 0x78, 0x26, 0x52, 0x95, 0x79, 0x4b, 0xd7, 0x0f, 0xdf, 0x16, 0xb0, 0x0e, 0x9d, 0xcc, 0x6d, 0x60, 0x32, 0xb7, 0x09, 0xa0, 0x95, 0x58, 0x66, 0x76, 0x67, 0x7c, 0x1e, 0x4a, 0x02, 0x3b, 0x69, 0x9e, 0x78, 0x85, 0x44, 0x04, 0xfa, 0x6f, 0x35, 0x58, 0xea, 0x0c, 0xbd, 0x5b, 0x04, 0x87, 0xec, 0x06, 0xc1, 0x33, 0xc5, 0xce, 0x71, 0xfb, 0xe6, 0x8e, 0x61, 0xdf, 0xfc, 0x14, 0xfb, 0x5e, 0x83, 0x26, 0xb6, 0x0f, 0x1d, 0x4a, 0xcc, 0xb1, 0x63, 0x59, 0x43, 0x82, 0xef, 0x48, 0x63, 0xeb, 0x3f, 0xd5, 0x60, 0x39, 0xcb, 0xf3, 0x29, 0x04, 0xe2, 0xb4, 0xf3, 0xe5, 0x33, 0xce, 0xa7, 0xff, 0x33, 0x07, 0x67, 0xc7, 0x9c, 0xe5, 0x3f, 0x65, 0x5d, 0x4d, 0x38, 0x76, 0x69, 0xaa, 0x63, 0x3b, 0xd4, 0xec, 0x3a, 0x21, 0x65, 0x6a, 0x05, 0x89, 0xcc, 0xcd, 0xa1, 0x6f, 0x70, 0x98, 0x3a, 0x9f, 0x8b, 0x8c, 0x88, 0xa7, 0x00, 0xfe, 0x80, 0x89, 0xf5, 0x93, 0x37, 0x6a, 0x1c, 0xd6, 0x91, 0x20, 0x1e, 0xde, 0xba, 0x7e, 0x68, 0x91, 0x28, 0xb3, 0x94, 0x0d, 0x2e, 0x40, 0x48, 0xd8, 0x20, 0xf4, 0x4c, 0x11, 0xee, 0x68, 0x94, 0x59, 0xd6, 0x25, 0xf0, 0x81, 0x80, 0xe9, 0x9f, 0x69, 0x70, 0x6e, 0xc2, 0x00, 0xa7, 0xb1, 0x7c, 0xf8, 0x7e, 0x99, 0x2c, 0x68, 0xe9, 0x16, 0x15, 0x75, 0x26, 0x4d, 0x02, 0x76, 0x21, 0x1d, 0xb0, 0xcf, 0x42, 0x29, 0x12, 0xa5, 0xb8, 0x9a, 0xe7, 0xfb, 0x9b, 0x6c, 0xf1, 0x03, 0xe8, 0x85, 0x94, 0x10, 0x86, 0xef, 0xba, 0x8f, 0xf0, 0x6c, 0x9e, 0x34, 0x61, 0xf5, 0xdc, 0x14, 0xab, 0x4f, 0x98, 0x36, 0x3f, 0x69, 0x5a, 0x04, 0x85, 0x03, 0x32, 0xe2, 0xc7, 0x30, 0xce, 0xa9, 0xf8, 0xd6, 0x3f, 0x80, 0x8b, 0x53, 0xd9, 0x3c, 0x95, 0x70, 0xf5, 0x1b, 0x0d, 0x1a, 0x32, 0x5a, 0x9e, 0x98, 0x5e, 0x94, 0xcc, 0xf9, 0x44, 0x66, 0x7e, 0xf0, 0x88, 0xcc, 0x9c, 0x5d, 0x47, 0x0d, 0x09, 0x8d, 0xba, 0xbe, 0x55, 0xa8, 0x14, 0x5b, 0x25, 0xa3, 0xf4, 0xc8, 0xf1, 0x5c, 0x7f, 0x5f, 0xff, 0x85, 0x06, 0x0b, 0x8a, 0xd7, 0x53, 0x08, 0x50, 0x93, 0x3c, 0xe6, 0xa7, 0xf0, 0xa8, 0xef, 0x43, 0x63, 0xa7, 0x1f, 0xf8, 0x61, 0xac, 0xc0, 0x4c, 0xd8, 0xd1, 0x8e, 0x11, 0x76, 0x26, 0x27, 0xca, 0x4d, 0x9b, 0xe8, 0x21, 0x2c, 0xa8, 0x89, 0x66, 0x97, 0x7e, 0x39, 0x2d, 0x7d, 0x35, 0x12, 0x55, 0xff, 0x00, 0x96, 0x6f, 0x60, 0x66, 0xf5, 0x4e, 0x7c, 0x8d, 0x4c, 0xf1, 0x05, 0x9d, 0xc2, 0xca, 0xd8, 0xe4, 0x27, 0x6f, 0x5c, 0xfd, 0x1f, 0x1a, 0xac, 0x88, 0xac, 0xa5, 0x33, 0xf4, 0xf6, 0x18, 0x66, 0x03, 0x3a, 0x8b, 0xcc, 0x4f, 0x2b, 0x88, 0xa4, 0x0b, 0x4a, 0xf9, 0x4c, 0x41, 0xe9, 0x1a, 0x34, 0x2d, 0xec, 0xba, 0x24, 0x34, 0xe3, 0x62, 0x8b, 0x5a, 0x01, 0x02, 0xbc, 0x17, 0x95, 0x5c, 0x2e, 0x01, 0x58, 0x83, 0x30, 0x24, 0x5e, 0xaa, 0x86, 0x55, 0x8d, 0x20, 0x1d, 0x8a, 0x5e, 0x82, 0x95, 0x30, 0x52, 0x9b, 0xe9, 0x74, 0x45, 0x99, 0x4e, 0xd6, 0x15, 0x65, 0x9a, 0x86, 0x14, 0x72, 0xa7, 0x7b, 0xcf, 0x67, 0xa2, 0x8c, 0xa8, 0xff, 0x55, 0x83, 0xb3, 0xe3, 0x92, 0x7f, 0xa5, 0xdb, 0xfd, 0x31, 0x83, 0x01, 0xba, 0x0e, 0x25, 0x6c, 0x89, 0xac, 0xbc, 0x28, 0xb2, 0xf2, 0xe4, 0x48, 0xf0, 0xba, 0x00, 0x1b, 0x11, 0x5a, 0xff, 0x88, 0xc7, 0x09, 0x97, 0x60, 0x6f, 0x10, 0xcc, 0xe7, 0xe4, 0x7a, 0xac, 0x64, 0x2b, 0x6b, 0xa9, 0xc2, 0x98, 0xa5, 0xf4, 0x5f, 0x6a, 0xd0, 0x8c, 0x99, 0xfa, 0xf7, 0x89, 0x5e, 0x07, 0xd0, 0x14, 0x8b, 0x6f, 0xc6, 0x53, 0xbe, 0x5a, 0xcf, 0xb9, 0x54, 0x6c, 0x7f, 0xf2, 0x39, 0xdf, 0x85, 0x56, 0x32, 0xd9, 0x89, 0x1f, 0x0d, 0x7f, 0xae, 0x41, 0x93, 0x9f, 0x42, 0x67, 0x4d, 0x1f, 0xaf, 0x40, 0xad, 0x8f, 0x87, 0x63, 0xe1, 0x0c, 0xfa, 0x78, 0xa8, 0x2c, 0x9e, 0x39, 0xdb, 0xe7, 0x9f, 0x74, 0xb6, 0x2f, 0xa4, 0xce, 0xf6, 0xfa, 0xc7, 0x1a, 0xb4, 0x12, 0x9e, 0x4e, 0xc1, 0x0d, 0xae, 0x43, 0x51, 0x16, 0xf6, 0xf2, 0x63, 0x1b, 0x51, 0x7c, 0x05, 0x22, 0xf1, 0xfa, 0xcb, 0x50, 0xee, 0x0c, 0x65, 0xa5, 0xac, 0x05, 0x79, 0x36, 0xf4, 0xa2, 0x9a, 0x2d, 0xff, 0xe4, 0x29, 0x16, 0x15, 0xa1, 0x22, 0xd2, 0x42, 0xd4, 0xd2, 0xff, 0xa4, 0x01, 0x32, 0x64, 0xa9, 0x70, 0x56, 0x2d, 0x1f, 0x6b, 0xdb, 0x38, 0x9e, 0x33, 0xa3, 0x17, 0xa0, 0xca, 0x0f, 0x94, 0x8e, 0xd7, 0xf5, 0x65, 0x8a, 0x95, 0x9e, 0x39, 0x92, 0xce, 0xe0, 0x67, 0x4e, 0xfe, 0x91, 0x24, 0x63, 0xc5, 0xd4, 0x66, 0xf4, 0x1e, 0x2c, 0x65, 0x04, 0x3a, 0x85, 0xad, 0xe8, 0x01, 0x54, 0xdf, 0xdc, 0x9a, 0x45, 0x75, 0x97, 0x00, 0x28, 0xee, 0x12, 0x33, 0xf0, 0x1d, 0x8f, 0x45, 0x7a, 0xab, 0x72, 0xc8, 0x2e, 0x07, 0xe8, 0x3d, 0x00, 0x3e, 0xee, 0x29, 0x48, 0xf0, 0x6d, 0x68, 0x18, 0xf8, 0xf1, 0xdc, 0x0a, 0x85, 0x0b, 0x90, 0xb3, 0xba, 0xd1, 0x5d, 0x5b, 0xce, 0xea, 0xea, 0x3f, 0xd1, 0x60, 0x41, 0x8d, 0x3f, 0xe7, 0xcc, 0x67, 0x96, 0x72, 0x20, 0x15, 0xd2, 0xee, 0x0e, 0xe6, 0x24, 0xed, 0x74, 0x0e, 0xa4, 0x0e, 0x0a, 0xb1, 0x0e, 0x1e, 0x0a, 0x15, 0x88, 0x49, 0xe7, 0x9d, 0xfc, 0x3d, 0x06, 0x64, 0xe0, 0xc7, 0x22, 0x30, 0xcf, 0x28, 0xd4, 0xf1, 0x02, 0xf2, 0x84, 0x5d, 0xdf, 0x81, 0xa5, 0xcc, 0xc4, 0xf3, 0x16, 0xcc, 0x4e, 0x04, 0x9b, 0xe3, 0xf6, 0x36, 0x2e, 0x85, 0x9f, 0x48, 0x71, 0x3a, 0xfb, 0xda, 0xbb, 0xd0, 0x32, 0xf0, 0xe3, 0x6d, 0xe2, 0x92, 0xd9, 0xca, 0x8d, 0x4f, 0x5f, 0x70, 0xdf, 0x82, 0xc5, 0xd4, 0x0c, 0xf3, 0x36, 0xcb, 0x3e, 0xac, 0x28, 0x85, 0xcd, 0x2e, 0xc4, 0x71, 0x2c, 0x83, 0xe1, 0xec, 0xf8, 0x44, 0xf3, 0x96, 0xe5, 0x63, 0x0d, 0x50, 0x34, 0x36, 0xf6, 0xf6, 0xc9, 0xdc, 0x6f, 0x08, 0x52, 0xc5, 0xfb, 0x7c, 0xba, 0x78, 0xcf, 0x93, 0x13, 0xcf, 0x67, 0x4e, 0x37, 0xba, 0x0d, 0x90, 0x31, 0x0a, 0x24, 0xe8, 0xbe, 0xe7, 0x8e, 0xf8, 0xe2, 0xca, 0x30, 0x36, 0x6f, 0xc9, 0x3f, 0xd4, 0x84, 0x19, 0xbf, 0x12, 0xe1, 0xc7, 0x83, 0xa3, 0x34, 0xf4, 0x89, 0x8a, 0xfb, 0x47, 0xb9, 0x07, 0x9d, 0xe2, 0x35, 0x50, 0xfa, 0xb2, 0xa7, 0x90, 0xbd, 0xec, 0x91, 0xf2, 0x17, 0x95, 0xfc, 0xb3, 0x5c, 0xfe, 0xec, 0x43, 0x33, 0x16, 0x67, 0x76, 0x5d, 0x3d, 0x0b, 0xf9, 0x83, 0xc3, 0x27, 0xc6, 0x2b, 0x8e, 0xd3, 0x5f, 0x13, 0x0f, 0x72, 0x84, 0x55, 0xb2, 0x5a, 0xd0, 0x9e, 0x6c, 0xed, 0x5c, 0x86, 0xd5, 0xcf, 0xb5, 0x24, 0xc2, 0xce, 0xaa, 0xff, 0xe7, 0xa1, 0x14, 0x72, 0x16, 0xa6, 0x56, 0xc4, 0xa4, 0xcb, 0x44, 0x04, 0x3c, 0xab, 0x22, 0xd8, 0xea, 0x99, 0x69, 0x93, 0x54, 0x39, 0xe4, 0xce, 0xdc, 0xcc, 0xa2, 0xbb, 0xb0, 0x9c, 0x95, 0xe8, 0x44, 0x4d, 0xf0, 0x43, 0x0d, 0xaa, 0x77, 0x0f, 0x2d, 0x4b, 0xbc, 0xc6, 0x41, 0x57, 0xa0, 0x20, 0x5e, 0x5a, 0x4d, 0xb9, 0x26, 0x13, 0x88, 0xcc, 0x03, 0x90, 0x5c, 0xf6, 0x01, 0xc8, 0x91, 0xd5, 0xd9, 0x2b, 0x50, 0xa3, 0x3d, 0x9f, 0x67, 0xf2, 0xa9, 0x1a, 0x2d, 0x08, 0x90, 0x28, 0x2b, 0xeb, 0xdf, 0x90, 0x6c, 0x88, 0xc6, 0x51, 0xcf, 0x4c, 0xe2, 0x8c, 0x28, 0x97, 0xbe, 0x53, 0x14, 0x37, 0x7d, 0x87, 0x96, 0xbc, 0x3a, 0xfa, 0x32, 0x42, 0xa4, 0x1e, 0x0e, 0xe5, 0xb3, 0x0f, 0x87, 0x9e, 0x2a, 0xc1, 0x87, 0x11, 0x0f, 0xe2, 0x98, 0xa4, 0x2e, 0xf9, 0xc7, 0x2f, 0x4d, 0x15, 0x93, 0xd1, 0x25, 0xff, 0x3a, 0x94, 0xc4, 0xfd, 0x93, 0xb2, 0x11, 0xca, 0x10, 0x0a, 0x9b, 0x18, 0x11, 0x05, 0xa7, 0x8d, 0x4a, 0xd9, 0xf9, 0x29, 0xb4, 0x82, 0x87, 0xb8, 0xbc, 0xbd, 0x07, 0x4b, 0x1c, 0xf8, 0x26, 0x61, 0x37, 0x46, 0xdc, 0x89, 0xe7, 0x91, 0x09, 0xe8, 0x3f, 0xd2, 0x60, 0x39, 0x3b, 0xea, 0xbc, 0x13, 0xee, 0xab, 0x50, 0xe0, 0xe7, 0xb3, 0x89, 0x37, 0x0f, 0x4a, 0xad, 0x86, 0x40, 0xeb, 0xef, 0xc2, 0xb9, 0x98, 0x8f, 0xa8, 0x16, 0x36, 0x8b, 0x84, 0x4f, 0x76, 0x03, 0xfd, 0x53, 0x0d, 0xda, 0x93, 0x53, 0xcc, 0x5b, 0xdc, 0xc9, 0x27, 0x59, 0x4a, 0x01, 0x85, 0xa3, 0x15, 0xf0, 0x3d, 0x0d, 0xd0, 0x5e, 0xe0, 0x3a, 0x4c, 0xbe, 0x85, 0x9a, 0xad, 0x80, 0x51, 0xa5, 0x7c, 0x84, 0x24, 0xa6, 0xde, 0xc8, 0xb5, 0x35, 0xa3, 0x22, 0x80, 0x3c, 0xe4, 0xf2, 0x03, 0xa4, 0x22, 0x50, 0x35, 0xd9, 0xaa, 0xc2, 0x52, 0xfd, 0x77, 0x1a, 0x2c, 0x65, 0x58, 0x98, 0x5d, 0x39, 0xd7, 0xa0, 0xe0, 0x92, 0x2e, 0x8b, 0x4e, 0x92, 0x0b, 0xd9, 0x77, 0x5e, 0x82, 0x2b, 0x81, 0x47, 0x6b, 0x50, 0x0c, 0x9d, 0xfd, 0x1e, 0x8b, 0xdc, 0x63, 0x1a, 0xa1, 0x24, 0x40, 0x6b, 0x3c, 0xb8, 0xee, 0x8b, 0x1a, 0xbb, 0x3c, 0xe9, 0x8f, 0xd1, 0x1a, 0x0a, 0xad, 0x7f, 0x17, 0xce, 0xbf, 0xed, 0xf1, 0x63, 0xf1, 0x36, 0xa1, 0x2c, 0xf4, 0x47, 0xa7, 0x9b, 0xac, 0xe8, 0x04, 0x2e, 0x4c, 0x9b, 0x7e, 0xde, 0x09, 0xca, 0xbb, 0x70, 0x91, 0x0b, 0x4e, 0x19, 0x09, 0x79, 0xf0, 0xb9, 0xff, 0x88, 0x92, 0xf0, 0x90, 0x84, 0xb3, 0xc8, 0xb9, 0x02, 0xa5, 0x3e, 0x1e, 0x26, 0x4b, 0xa6, 0xd8, 0xc7, 0xc3, 0x0e, 0xd5, 0x5f, 0x81, 0x67, 0xa6, 0xcf, 0x10, 0x89, 0xb2, 0x9c, 0x7e, 0x45, 0x52, 0x4d, 0x6a, 0x03, 0x6d, 0x51, 0x6d, 0x3e, 0x21, 0xa6, 0x06, 0x70, 0x7e, 0xca, 0xf0, 0x47, 0x71, 0xc4, 0x63, 0x82, 0x43, 0x4d, 0xcb, 0x25, 0x58, 0x16, 0x9b, 0x2a, 0x46, 0xd9, 0xa1, 0xa2, 0x34, 0x7b, 0xfc, 0x6a, 0xd9, 0x3b, 0x70, 0xde, 0x20, 0x7d, 0x5f, 0x56, 0x89, 0x4e, 0x40, 0xac, 0x4d, 0xb8, 0x30, 0x6d, 0xfc, 0x23, 0x35, 0xfd, 0x33, 0x0d, 0xce, 0xed, 0xf6, 0x46, 0xd4, 0xb1, 0xb0, 0xfb, 0x65, 0xea, 0x9e, 0xd3, 0x59, 0x9a, 0xa5, 0xda, 0xf9, 0x10, 0xda, 0x93, 0x0c, 0x1d, 0x69, 0x9b, 0xd8, 0x00, 0xb9, 0xa7, 0x18, 0xe0, 0x55, 0x68, 0x19, 0x04, 0xcb, 0xf7, 0x83, 0x33, 0x08, 0xa9, 0x13, 0x58, 0x4c, 0xf5, 0x9f, 0x7d, 0x31, 0x5e, 0x02, 0x10, 0xcf, 0x16, 0xe5, 0xe3, 0xc6, 0xa8, 0x06, 0x17, 0xaa, 0x91, 0xd7, 0xff, 0x1b, 0x20, 0x79, 0x60, 0x8a, 0x00, 0x4a, 0xf7, 0xfc, 0xb0, 0x8f, 0xdd, 0xd6, 0x19, 0x54, 0x86, 0xfc, 0x1d, 0xff, 0x71, 0x4b, 0x43, 0x15, 0x28, 0xdc, 0x72, 0xf6, 0x7b, 0xad, 0xdc, 0xfa, 0x2a, 0x2c, 0x64, 0x5f, 0x95, 0xa2, 0x12, 0xe4, 0xf6, 0x76, 0x5a, 0x67, 0xf8, 0xaf, 0xb1, 0xd5, 0xd2, 0xd6, 0xef, 0x43, 0xee, 0x7e, 0xc0, 0xbb, 0xee, 0x0e, 0x98, 0x1c, 0x63, 0x9b, 0xb8, 0x72, 0x0c, 0xae, 0xa0, 0x56, 0x0e, 0xd5, 0xa1, 0xa2, 0xee, 0xd1, 0x5a, 0x79, 0x3e, 0xe1, 0x8e, 0x47, 0x49, 0xc8, 0x5a, 0x05, 0xb4, 0x04, 0xcd, 0xb1, 0x2b, 0xfd, 0x56, 0x71, 0x7d, 0x03, 0xaa, 0xf1, 0x93, 0x26, 0x3e, 0xca, 0x3d, 0xdf, 0x23, 0xad, 0x33, 0xa8, 0x0a, 0x45, 0x71, 0x59, 0xd4, 0xd2, 0xf8, 0x80, 0xea, 0xea, 0xa8, 0x95, 0x5b, 0x7f, 0x07, 0x4a, 0xf2, 0xb2, 0x45, 0xc2, 0xe5, 0x77, 0xeb, 0x0c, 0x5a, 0x81, 0xc5, 0x4e, 0xe7, 0x8e, 0x7c, 0xd2, 0x1c, 0xcf, 0xaf, 0xa1, 0x36, 0x2c, 0xf3, 0x89, 0xd4, 0x00, 0x31, 0x26, 0xc7, 0x3b, 0xdc, 0x8d, 0xdf, 0xe9, 0xec, 0xed, 0x0e, 0x68, 0x8f, 0xd8, 0xad, 0xfc, 0x8d, 0x6b, 0x7f, 0xf9, 0x75, 0x45, 0xfb, 0xc3, 0x17, 0x97, 0xb5, 0xcf, 0xbf, 0xb8, 0xac, 0xfd, 0xed, 0x8b, 0xcb, 0xda, 0x27, 0x7f, 0xbf, 0x7c, 0x06, 0x5a, 0x7e, 0xb8, 0xbf, 0xc1, 0x9c, 0x83, 0xc3, 0x8d, 0x83, 0x43, 0xf1, 0x77, 0x86, 0x47, 0x25, 0xf1, 0xf3, 0xf2, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0x99, 0x63, 0x5a, 0xdb, 0x33, 0x31, 0x00, 0x00, }
encodeVarintKvrpcpb
ink_test.rs
// Copyright 2018-2021 Parity Technologies (UK) Ltd. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS,
use ink_lang_codegen::generate_code; use proc_macro2::TokenStream as TokenStream2; use syn::Result; pub fn generate(attr: TokenStream2, input: TokenStream2) -> TokenStream2 { match generate_or_err(attr, input) { Ok(tokens) => tokens, Err(err) => err.to_compile_error(), } } pub fn generate_or_err(attr: TokenStream2, input: TokenStream2) -> Result<TokenStream2> { let test_definition = ink_lang_ir::InkTest::new(attr, input)?; Ok(generate_code(&test_definition)) }
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License.
Client.go
package firebaseclient import ( "context" "encoding/json" "print-apple/internal/config" "print-apple/pkg/errors" "cloud.google.com/go/firestore" "google.golang.org/api/option" ) var ( sharedClient = &firestore.Client{} credentials = map[string]string{ "type": "service_account", "project_id": "neogenesis-2a947", "private_key_id": "3b7bfff9bdac3643be886bbde4a8eab65282724f", "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEuwIBADANBgkqhkiG9w0BAQEFAASCBKUwggShAgEAAoIBAQCZy4V2kYnezQ4h\nvk+bvR36z0fBWMWqlULMGJF2dhFH7JHKufdJtIjB7n+TQbxJa/nUIIw/7WMVbQ0x\nuKcthxSs+GTYhzoWEJQCMdbD/OIxa0TYOTXfnD/pR2kdyrt/LW39BHc14QrMzrLZ\nRiRDp2V+/rPtgKYLKK1abvYINWJWRZIsGBCExIBMxQ/U08UVr2teqx5O2EQEXzNz\nYWj3ulx8O6xEls2uXzhllAQb9e+mc/grIVt3xDsyFTmPnHSLazcnsphi6DFUxlJI\nRNZYR7ZFf1TnEtq2u8rN/2YWBDFxWtQvO5TRFh/mervc9EkE7UjCI3ihxNIVSlj1\nAveEDhWJAgMBAAECgf89bUyhTwACPWSnlZu2WJLg2mmOkCZjO7qySh3Pctj0IUkZ\n760Vg0YcL2ZFKrxJlxbx4w4hem9Y33WlplsflBRuAchFs70/kt+LAN0G6rptgqq8\nsh0ScxYT1rHldFPvN/X2WRJEMt5dbzGAFsPlQbcIQjMbaYbobhbPSAJ8xNXCHujR\nxzL962Ho1STHPwNpndzy/XQw3s7rNub5CbuyCr8+NdQQzV0ypXDOtmwWALwUxyIu\nC1n32RiXFGT4Yli6+zYLrhF7FkOFaMzvKjmngMzYOYNFWmCgUcPVmHk5lMotNL/P\ny+6CpyP+pNdYRP3dv/2Gcx3WCKvyqZ9u2IwcIBUCgYEA0E/jRdfGQMPfW0gvEb+L\nepi9XsV338bacEsYYOCC6thBQd4a10aBRMjH0eoVhUh5HdlB7/oq7Q4scAcsFqUn\nef/16GZOohEyNJNHqLve63zh7n8qR2GsRErqMa69HrNSEe8/XzpZl2rWgXC9Mkg7\n8vBRPd576zliPEIJRilQPCUCgYEAvQCqatxECiJVnHylOOKlcOvZ9ioNJYzz/2cG\ny2sGxCyI8ijRQ2S2LA+VKo/zpF3yzehF/eyDPMT0t+HfnNSLPWaZ+hF+CGvUZ/Tr\nQNdMvWUIKbP6+0lNIztzkr2EYFYkfBpQuYO/SOVVxgfKlrXUHq0K1UZZyDwWQMNT\n38h4hJUCgYEAw/dzkh/URNc/hysYBLVSbKnF9KMGC4GRu3QZ4gEzh+SbN3DPhVex\nglj0ChkR18n/DsJ00mJhAZOE4HsO0drakV3nI5MjRDmzJlyrXCQpKRXZobkFuBM9\nsR1cxhJhncEKYw7UaiyFXfnHBAxgIC5uHzRO6Uok/3uDW7av7M4uyfUCgYATKSId\nuz7amCh9uNU3MyL6k66BGjpC+Es0NUmnDa6d7LXlduXgIzGkvd+tdPKKU0vuPAH8\ngCG942m7ypZU2+dRzjkF9QgF6oiaEWZYKHuLJ9bwA2MKXqAHVludIMFu0szYGALf\nC9A0n6tWbCvJo51hjsFuZbdsaUsIPcUfBr/REQKBgDfx2lrn7YtzqFwYeE9Xz7Tq\n7pukWmVniF9CsUxCba31ML6o08coCnc9no+rc2MtnRF8B2m1KOUnfIxhUm/YATxC\n8ipjX4qU6qPCMWRoh4HBHjEQgO/NG1IInyLu6FBssVHd85+HaaAoDXe54PDkRYzV\nq7qaAgbEf8QY88Liv+LY\n-----END PRIVATE KEY-----\n", "client_email": "[email protected]", "client_id": "110912029148578276583", "auth_uri": "https://accounts.google.com/o/oauth2/auth", "token_uri": "https://oauth2.googleapis.com/token", "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/firebase-adminsdk-7zlk2%40neogenesis-2a947.iam.gserviceaccount.com", } ) // Client ... type Client struct { Client *firestore.Client } // NewClient ... func NewClient(cfg *config.Config) (*Client, error)
{ var c Client cb, err := json.Marshal(credentials) if err != nil { return &c, errors.Wrap(err, "[FIREBASE] Failed to marshal credentials!") } option := option.WithCredentialsJSON(cb) c.Client, err = firestore.NewClient(context.Background(), cfg.Firebase.ProjectID, option) if err != nil { return &c, errors.Wrap(err, "[FIREBASE] Failed to initiate firebase client!") } return &c, err }
simpleSQL.py
# Source: http://pyparsing.wikispaces.com/file/view/simpleSQL.py # simpleSQL.py # # simple demo of using the parsing library to do simple-minded SQL parsing # could be extended to include where clauses etc. # # Copyright (c) 2003, Paul McGuire # from pyparsing import Literal, CaselessLiteral, Word, Upcase, delimitedList, Optional, \ Combine, Group, alphas, nums, alphanums, ParseException, Forward, oneOf, quotedString, \ ZeroOrMore, restOfLine, Keyword def test( str ): print str,"->" try: tokens = simpleSQL.parseString( str ) print "tokens = ", tokens print "tokens.columns =", tokens.columns print "tokens.tables =", tokens.tables print "tokens.where =", tokens.where except ParseException, err: print " "*err.loc + "^\n" + err.msg print err print # define SQL tokens selectStmt = Forward() selectToken = Keyword("select", caseless=True) fromToken = Keyword("from", caseless=True) ident = Word( alphas, alphanums + "_$" ).setName("identifier") columnName = Upcase( delimitedList( ident, ".", combine=True ) ) columnNameList = Group( delimitedList( columnName ) ) tableName = Upcase( delimitedList( ident, ".", combine=True ) ) tableNameList = Group( delimitedList( tableName ) ) whereExpression = Forward() and_ = Keyword("and", caseless=True) or_ = Keyword("or", caseless=True) in_ = Keyword("in", caseless=True) E = CaselessLiteral("E") binop = oneOf("= != < > >= <= eq ne lt le gt ge", caseless=True) arithSign = Word("+-",exact=1) realNum = Combine( Optional(arithSign) + ( Word( nums ) + "." + Optional( Word(nums) ) | ( "." + Word(nums) ) ) + Optional( E + Optional(arithSign) + Word(nums) ) ) intNum = Combine( Optional(arithSign) + Word( nums ) + Optional( E + Optional("+") + Word(nums) ) ) columnRval = realNum | intNum | quotedString | columnName # need to add support for alg expressions whereCondition = Group( ( columnName + binop + columnRval ) | ( columnName + in_ + "(" + delimitedList( columnRval ) + ")" ) | ( columnName + in_ + "(" + selectStmt + ")" ) | ( "(" + whereExpression + ")" ) ) whereExpression << whereCondition + ZeroOrMore( ( and_ | or_ ) + whereExpression ) # define the grammar selectStmt << ( selectToken + ( '*' | columnNameList ).setResultsName( "columns" ) + fromToken +
# define Oracle comment format, and ignore them oracleSqlComment = "--" + restOfLine simpleSQL.ignore( oracleSqlComment ) test( "SELECT * from XYZZY, ABC" ) test( "select * from SYS.XYZZY" ) test( "Select A from Sys.dual" ) test( "Select A,B,C from Sys.dual" ) test( "Select A, B, C from Sys.dual" ) test( "Select A, B, C from Sys.dual, Table2 " ) test( "Xelect A, B, C from Sys.dual" ) test( "Select A, B, C frox Sys.dual" ) test( "Select" ) test( "Select &&& frox Sys.dual" ) test( "Select A from Sys.dual where a in ('RED','GREEN','BLUE')" ) test( "Select A from Sys.dual where a in ('RED','GREEN','BLUE') and b in (10,20,30)" ) test( "Select A,b from table1,table2 where table1.id eq table2.id -- test out comparison operators" ) """ Test output: >pythonw -u simpleSQL.py SELECT * from XYZZY, ABC -> tokens = ['select', '*', 'from', ['XYZZY', 'ABC']] tokens.columns = * tokens.tables = ['XYZZY', 'ABC'] select * from SYS.XYZZY -> tokens = ['select', '*', 'from', ['SYS.XYZZY']] tokens.columns = * tokens.tables = ['SYS.XYZZY'] Select A from Sys.dual -> tokens = ['select', ['A'], 'from', ['SYS.DUAL']] tokens.columns = ['A'] tokens.tables = ['SYS.DUAL'] Select A,B,C from Sys.dual -> tokens = ['select', ['A', 'B', 'C'], 'from', ['SYS.DUAL']] tokens.columns = ['A', 'B', 'C'] tokens.tables = ['SYS.DUAL'] Select A, B, C from Sys.dual -> tokens = ['select', ['A', 'B', 'C'], 'from', ['SYS.DUAL']] tokens.columns = ['A', 'B', 'C'] tokens.tables = ['SYS.DUAL'] Select A, B, C from Sys.dual, Table2 -> tokens = ['select', ['A', 'B', 'C'], 'from', ['SYS.DUAL', 'TABLE2']] tokens.columns = ['A', 'B', 'C'] tokens.tables = ['SYS.DUAL', 'TABLE2'] Xelect A, B, C from Sys.dual -> ^ Expected 'select' Expected 'select' (0), (1,1) Select A, B, C frox Sys.dual -> ^ Expected 'from' Expected 'from' (15), (1,16) Select -> ^ Expected '*' Expected '*' (6), (1,7) Select &&& frox Sys.dual -> ^ Expected '*' Expected '*' (7), (1,8) >Exit code: 0 """
tableNameList.setResultsName( "tables" ) + Optional( Group( CaselessLiteral("where") + whereExpression ), "" ).setResultsName("where") ) simpleSQL = selectStmt
speed-index.js
/** * @license Copyright 2016 The Lighthouse Authors. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ 'use strict'; const Audit = require('../audit.js'); const i18n = require('../../lib/i18n/i18n.js'); const ComputedSi = require('../../computed/metrics/speed-index.js'); const UIStrings = { /** Description of the Speed Index metric, which summarizes how quickly the page looked visually complete. This is displayed within a tooltip when the user hovers on the metric name to see more. No character length limits. 'Learn More' becomes link text to additional documentation. */ description: 'Speed Index shows how quickly the contents of a page are visibly populated. ' + '[Learn more](https://web.dev/speed-index/).', }; const str_ = i18n.createMessageInstanceIdFn(__filename, UIStrings); class
extends Audit { /** * @return {LH.Audit.Meta} */ static get meta() { return { id: 'speed-index', title: str_(i18n.UIStrings.speedIndexMetric), description: str_(UIStrings.description), scoreDisplayMode: Audit.SCORING_MODES.NUMERIC, supportedModes: ['navigation'], requiredArtifacts: ['traces', 'devtoolsLogs', 'GatherContext'], }; } /** * @return {{mobile: {scoring: LH.Audit.ScoreOptions}, desktop: {scoring: LH.Audit.ScoreOptions}}} */ static get defaultOptions() { return { mobile: { // 25th and 5th percentiles HTTPArchive -> median and PODR, then p10 derived from them. // https://bigquery.cloud.google.com/table/httparchive:lighthouse.2018_04_01_mobile?pli=1 // see https://www.desmos.com/calculator/dvuzvpl7mi scoring: { p10: 3387, median: 5800, }, }, desktop: { // SELECT QUANTILES(SpeedIndex, 21) FROM [httparchive:summary_pages.2018_12_15_desktop] LIMIT 1000 scoring: { p10: 1311, median: 2300, }, }, }; } /** * Audits the page to give a score for the Speed Index. * @see https://github.com/GoogleChrome/lighthouse/issues/197 * @param {LH.Artifacts} artifacts The artifacts from the gather phase. * @param {LH.Audit.Context} context * @return {Promise<LH.Audit.Product>} */ static async audit(artifacts, context) { const trace = artifacts.traces[Audit.DEFAULT_PASS]; const devtoolsLog = artifacts.devtoolsLogs[Audit.DEFAULT_PASS]; const gatherContext = artifacts.GatherContext; const metricComputationData = {trace, devtoolsLog, gatherContext, settings: context.settings}; const metricResult = await ComputedSi.request(metricComputationData, context); const options = context.options[context.settings.formFactor]; return { score: Audit.computeLogNormalScore( options.scoring, metricResult.timing ), numericValue: metricResult.timing, numericUnit: 'millisecond', displayValue: str_(i18n.UIStrings.seconds, {timeInMs: metricResult.timing}), }; } } module.exports = SpeedIndex; module.exports.UIStrings = UIStrings;
SpeedIndex
day03a.rs
#[macro_use] extern crate nom; extern crate aoc; use aoc::*; use std::collections::HashMap; use std::fs; use std::iter::*; use std::str::FromStr; #[derive(Debug)] struct
{ id: usize, min: (usize, usize), max: (usize, usize), } named!(int<&str, usize>, map!(nom::digit, |x| -> usize { FromStr::from_str(x).expect("error") }) ); named!(rect<&str, Rect>, do_parse!( tag!("#") >> id: int >> tag!(" @ ") >> x: int >> tag!(",") >> y: int >> tag!(": ") >> w: int >> tag!("x") >> h: int >> tag!("\n") >> (Rect { id: id - 1, min: (x, y), max: (x + w, y + h) }) ) ); named!(rects<&str, Vec<Rect>>, many1!(rect) ); fn main() { let file = fs::read("data/input03.txt").expect("failed to open file"); let str_file = String::from_utf8(file).expect("could not parse as utf8"); let mut patches = rects(&str_file[..]).expect("could not parse rects").1; let mut grid = vec![vec![0; 1000]; 1000]; for patch in patches { for x in patch.min.0..patch.max.0 { for y in patch.min.1..patch.max.1 { grid[x][y] += 1; } } } let mut count = 0; for x in 0..1000 { for y in 0..1000 { count += ((grid[x][y] >= 2) as usize); } } println!("{}", count)
Rect
load_balancer_sku.py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class LoadBalancerSku(Model): """SKU of a load balancer. :param name: Name of a load balancer SKU. Possible values include: 'Basic', 'Standard' :type name: str or ~azure.mgmt.network.v2018_08_01.models.LoadBalancerSkuName """ _attribute_map = {
super(LoadBalancerSku, self).__init__(**kwargs) self.name = kwargs.get('name', None)
'name': {'key': 'name', 'type': 'str'}, } def __init__(self, **kwargs):
table.rs
use generated_types::wal as wb; use query::exec::{make_schema_pivot, GroupedSeriesSetPlan, SeriesSetPlan}; use tracing::debug; use std::{collections::BTreeSet, collections::HashMap, sync::Arc}; use crate::{ column, column::Column, dictionary::{Dictionary, Error as DictionaryError}, partition::PartitionIdSet, partition::{Partition, PartitionPredicate}, }; use data_types::TIME_COLUMN_NAME; use snafu::{OptionExt, ResultExt, Snafu}; use arrow_deps::{ arrow, arrow::{ array::{ArrayRef, BooleanBuilder, Float64Builder, Int64Builder, StringBuilder}, datatypes::{DataType as ArrowDataType, Field as ArrowField, Schema as ArrowSchema}, record_batch::RecordBatch, }, datafusion, datafusion::logical_plan::Expr, datafusion::logical_plan::LogicalPlan, datafusion::logical_plan::LogicalPlanBuilder, }; #[derive(Debug, Snafu)] pub enum Error { #[snafu(display("Table {} not found", table))] TableNotFound { table: String }, #[snafu(display( "Column {} said it was type {} but extracting a value of that type failed", column, expected ))] WalValueTypeMismatch { column: String, expected: String }, #[snafu(display( "Tag value ID {} not found in dictionary of partition {}", value, partition ))] TagValueIdNotFoundInDictionary { value: u32, partition: String, source: DictionaryError, }, #[snafu(display( "Column type mismatch for column {}: can't insert {} into column with type {}", column, inserted_value_type, existing_column_type ))] ColumnTypeMismatch { column: String, existing_column_type: String, inserted_value_type: String, }, #[snafu(display("Column error on column {}: {}", column, source))] ColumnError { column: String, source: column::Error, }, #[snafu(display( "Internal error: Expected column {} to be type {} but was {}", column_id, expected_column_type, actual_column_type ))] InternalColumnTypeMismatch { column_id: u32, expected_column_type: String, actual_column_type: String, }, #[snafu(display( "Column name '{}' not found in dictionary of partition {}", column_name, partition ))] ColumnNameNotFoundInDictionary { column_name: String, partition: String, source: DictionaryError, }, #[snafu(display( "Internal: Column id '{}' not found in dictionary of partition {}", column_id, partition ))] ColumnIdNotFoundInDictionary { column_id: u32, partition: String, source: DictionaryError, }, #[snafu(display( "Schema mismatch: for column {}: can't insert {} into column with type {}", column, inserted_value_type, existing_column_type ))] SchemaMismatch { column: u32, existing_column_type: String, inserted_value_type: String, }, #[snafu(display("Error building plan: {}", source))] BuildingPlan { source: datafusion::error::DataFusionError, }, #[snafu(display("arrow conversion error: {}", source))] ArrowError { source: arrow::error::ArrowError }, #[snafu(display("Schema mismatch: for column {}: {}", column, source))] InternalSchemaMismatch { column: u32, source: crate::column::Error, }, #[snafu(display( "No index entry found for column {} with id {}", column_name, column_id ))] InternalNoColumnInIndex { column_name: String, column_id: u32 }, #[snafu(display("Error creating column from wal for column {}: {}", column, source))] CreatingFromWal { column: u32, source: crate::column::Error, }, #[snafu(display("Error evaluating column predicate for column {}: {}", column, source))] ColumnPredicateEvaluation { column: u32, source: crate::column::Error, }, #[snafu(display("Row insert to table {} missing column name", table))] ColumnNameNotInRow { table: u32 }, #[snafu(display( "Group column '{}' not found in tag columns: {}", column_name, all_tag_column_names ))] GroupColumnNotFound { column_name: String, all_tag_column_names: String, }, #[snafu(display("Duplicate group column '{}'", column_name))] DuplicateGroupColumn { column_name: String }, } pub type Result<T, E = Error> = std::result::Result<T, E>; #[derive(Debug)] pub struct Table { /// Name of the table as a u32 in the partition dictionary pub id: u32, /// Maps column name (as a u32 in the partition dictionary) to an index in self.columns pub column_id_to_index: HashMap<u32, usize>, /// Actual column storage pub columns: Vec<Column>, } type ArcStringVec = Vec<Arc<String>>; impl Table { pub fn new(id: u32) -> Self
fn append_row( &mut self, dictionary: &mut Dictionary, values: &flatbuffers::Vector<'_, flatbuffers::ForwardsUOffset<wb::Value<'_>>>, ) -> Result<()> { let row_count = self.row_count(); // insert new columns and validate existing ones for value in values { let column_name = value .column() .context(ColumnNameNotInRow { table: self.id })?; let column_id = dictionary.lookup_value_or_insert(column_name); let column = match self.column_id_to_index.get(&column_id) { Some(idx) => &mut self.columns[*idx], None => { // Add the column and make all values for existing rows None let idx = self.columns.len(); self.column_id_to_index.insert(column_id, idx); self.columns.push( Column::with_value(dictionary, row_count, value) .context(CreatingFromWal { column: column_id })?, ); continue; } }; column.push(dictionary, &value).context(ColumnError { column: column_name, })?; } // make sure all the columns are of the same length for col in &mut self.columns { col.push_none_if_len_equal(row_count); } Ok(()) } pub fn row_count(&self) -> usize { self.columns.first().map_or(0, |v| v.len()) } /// Returns a reference to the specified column fn column(&self, column_id: u32) -> Result<&Column> { Ok(self .column_id_to_index .get(&column_id) .map(|&column_index| &self.columns[column_index]) .expect("invalid column id")) } /// Returns a reference to the specified column as a slice of /// i64s. Errors if the type is not i64 pub fn column_i64(&self, column_id: u32) -> Result<&[Option<i64>]> { let column = self.column(column_id)?; match column { Column::I64(vals, _) => Ok(vals), _ => InternalColumnTypeMismatch { column_id, expected_column_type: "i64", actual_column_type: column.type_description(), } .fail(), } } pub fn append_rows( &mut self, dictionary: &mut Dictionary, rows: &flatbuffers::Vector<'_, flatbuffers::ForwardsUOffset<wb::Row<'_>>>, ) -> Result<()> { for row in rows { if let Some(values) = row.values() { self.append_row(dictionary, &values)?; } } Ok(()) } /// Creates and adds a datafuson filtering expression, if any out of the /// combination of predicate and timestamp. Returns the builder fn add_datafusion_predicate( plan_builder: LogicalPlanBuilder, partition_predicate: &PartitionPredicate, ) -> Result<LogicalPlanBuilder> { match partition_predicate.filter_expr() { Some(df_predicate) => plan_builder.filter(df_predicate).context(BuildingPlan), None => Ok(plan_builder), } } /// Creates a DataFusion LogicalPlan that returns column *names* as a /// single column of Strings /// /// The created plan looks like: /// /// Extension(PivotSchema) /// (Optional Projection to get rid of time) /// Filter(predicate) /// InMemoryScan pub fn tag_column_names_plan( &self, partition_predicate: &PartitionPredicate, partition: &Partition, ) -> Result<LogicalPlan> { let need_time_column = partition_predicate.range.is_some(); let time_column_id = partition_predicate.time_column_id; // figure out the tag columns let requested_columns_with_index = self .column_id_to_index .iter() .filter_map(|(&column_id, &column_index)| { // keep tag columns and the timestamp column, if needed to evaluate a timestamp predicate let need_column = if let Column::Tag(_, _) = self.columns[column_index] { true } else { need_time_column && column_id == time_column_id }; if need_column { // the id came out of our map, so it should always be valid let column_name = partition.dictionary.lookup_id(column_id).unwrap(); Some((column_name, column_index)) } else { None } }) .collect::<Vec<_>>(); // TODO avoid materializing here let data = self.to_arrow_impl(partition, &requested_columns_with_index)?; let schema = data.schema(); let projection = None; let projected_schema = schema.clone(); let plan_builder = LogicalPlanBuilder::from(&LogicalPlan::InMemoryScan { data: vec![vec![data]], schema, projection, projected_schema, }); // Shouldn't have field selections here (as we are getting the tags...) assert!(!partition_predicate.has_field_restriction()); let plan_builder = Self::add_datafusion_predicate(plan_builder, partition_predicate)?; // add optional selection to remove time column let plan_builder = if !need_time_column { plan_builder } else { // Create expressions for all columns except time let select_exprs = requested_columns_with_index .iter() .filter_map(|&(column_name, _)| { if column_name != TIME_COLUMN_NAME { Some(Expr::Column(column_name.into())) } else { None } }) .collect(); plan_builder.project(select_exprs).context(BuildingPlan)? }; let plan = plan_builder.build().context(BuildingPlan)?; // And finally pivot the plan let plan = make_schema_pivot(plan); debug!( "Created column_name plan for table '{}':\n{}", partition.dictionary.lookup_id(self.id).unwrap(), plan.display_indent_schema() ); Ok(plan) } /// Creates a DataFusion LogicalPlan that returns column *values* as a /// single column of Strings /// /// The created plan looks like: /// /// Projection /// Filter(predicate) /// InMemoryScan pub fn tag_values_plan( &self, column_name: &str, partition_predicate: &PartitionPredicate, partition: &Partition, ) -> Result<LogicalPlan> { // TODO avoid materializing all the columns here (ideally // DataFusion can prune them out) let data = self.all_to_arrow(partition)?; let schema = data.schema(); let projection = None; let projected_schema = schema.clone(); let select_exprs = vec![Expr::Column(column_name.into())]; // And build the plan! let plan_builder = LogicalPlanBuilder::from(&LogicalPlan::InMemoryScan { data: vec![vec![data]], schema, projection, projected_schema, }); // shouldn't have columns selection (as this is getting tag values...) assert!(!partition_predicate.has_field_restriction()); let plan_builder = Self::add_datafusion_predicate(plan_builder, partition_predicate)?; plan_builder .project(select_exprs) .context(BuildingPlan)? .build() .context(BuildingPlan) } /// Creates a SeriesSet plan that produces an output table with rows that match the predicate /// /// The output looks like: /// (tag_col1, tag_col2, ... field1, field2, ... timestamp) /// /// The order of the tag_columns is orderd by name. /// /// The data is sorted on tag_col1, tag_col2, ...) so that all /// rows for a particular series (groups where all tags are the /// same) occur together in the plan pub fn series_set_plan( &self, partition_predicate: &PartitionPredicate, partition: &Partition, ) -> Result<SeriesSetPlan> { self.series_set_plan_impl(partition_predicate, None, partition) } /// Creates the plans for computing series set, pulling prefix_columns, if any, as a prefix of the ordering /// The created plan looks like: /// /// Projection (select the columns columns needed) /// Order by (tag_columns, timestamp_column) /// Filter(predicate) /// InMemoryScan pub fn series_set_plan_impl( &self, partition_predicate: &PartitionPredicate, prefix_columns: Option<&[String]>, partition: &Partition, ) -> Result<SeriesSetPlan> { // I wonder if all this string creation will be too slow? let table_name = partition .dictionary .lookup_id(self.id) .expect("looking up table name in dictionary") .to_string(); let table_name = Arc::new(table_name); let (mut tag_columns, field_columns) = self.tag_and_field_column_names(partition_predicate, partition)?; // reorder tag_columns to have the prefix columns, if requested if let Some(prefix_columns) = prefix_columns { tag_columns = reorder_prefix(prefix_columns, tag_columns)?; } // TODO avoid materializing all the columns here (ideally // DataFusion can prune them out) let data = self.all_to_arrow(partition)?; let schema = data.schema(); let projection = None; let projected_schema = schema.clone(); // And build the plan from the bottom up let plan_builder = LogicalPlanBuilder::from(&LogicalPlan::InMemoryScan { data: vec![vec![data]], schema, projection, projected_schema, }); // Filtering let plan_builder = Self::add_datafusion_predicate(plan_builder, partition_predicate)?; let mut sort_exprs = Vec::new(); sort_exprs.extend(tag_columns.iter().map(|c| c.into_sort_expr())); sort_exprs.push(TIME_COLUMN_NAME.into_sort_expr()); // Order by let plan_builder = plan_builder.sort(sort_exprs).context(BuildingPlan)?; // Selection let mut select_exprs = Vec::new(); select_exprs.extend(tag_columns.iter().map(|c| c.into_expr())); select_exprs.extend(field_columns.iter().map(|c| c.into_expr())); select_exprs.push(TIME_COLUMN_NAME.into_expr()); let plan_builder = plan_builder.project(select_exprs).context(BuildingPlan)?; // and finally create the plan let plan = plan_builder.build().context(BuildingPlan)?; Ok(SeriesSetPlan { table_name, plan, tag_columns, field_columns, }) } /// Creates a GroupedSeriesSet plan that produces an output table with rows that match the predicate /// /// The output looks like: /// (group_tag_column1, group_tag_column2, ... tag_col1, tag_col2, ... field1, field2, ... timestamp) /// /// The order of the tag_columns is ordered by name. /// /// The data is sorted on tag_col1, tag_col2, ...) so that all /// rows for a particular series (groups where all tags are the /// same) occur together in the plan /// /// The created plan looks like: /// /// Projection (select the columns columns needed) /// Order by (tag_columns, timestamp_column) /// Filter(predicate) /// InMemoryScan pub fn grouped_series_set_plan( &self, partition_predicate: &PartitionPredicate, group_columns: &[String], partition: &Partition, ) -> Result<GroupedSeriesSetPlan> { let series_set_plan = self.series_set_plan_impl(partition_predicate, Some(&group_columns), partition)?; let num_prefix_tag_group_columns = group_columns.len(); Ok(GroupedSeriesSetPlan { series_set_plan, num_prefix_tag_group_columns, }) } /// Creates a plan that produces an output table with rows that /// match the predicate for all fields in the table. /// /// The output looks like (field0, field1, ..., time) /// /// The data is not sorted in any particular order /// /// The created plan looks like: /// /// Projection (select the field columns needed) /// Filter(predicate) [optional] /// InMemoryScan pub fn field_names_plan( &self, partition_predicate: &PartitionPredicate, partition: &Partition, ) -> Result<LogicalPlan> { // TODO avoid materializing all the columns here (ideally // DataFusion can prune them out) let data = self.all_to_arrow(partition)?; let schema = data.schema(); let projection = None; let projected_schema = schema.clone(); // And build the plan from the bottom up let plan_builder = LogicalPlanBuilder::from(&LogicalPlan::InMemoryScan { data: vec![vec![data]], schema, projection, projected_schema, }); // Filtering let plan_builder = Self::add_datafusion_predicate(plan_builder, partition_predicate)?; // Selection let select_exprs = self .field_and_time_column_names(partition_predicate, partition) .into_iter() .map(|c| c.into_expr()) .collect::<Vec<_>>(); let plan_builder = plan_builder.project(select_exprs).context(BuildingPlan)?; // and finally create the plan plan_builder.build().context(BuildingPlan) } // Returns (tag_columns, field_columns) vectors with the names of // all tag and field columns, respectively. The vectors are sorted // by name. fn tag_and_field_column_names( &self, partition_predicate: &PartitionPredicate, partition: &Partition, ) -> Result<(ArcStringVec, ArcStringVec)> { let mut tag_columns = Vec::with_capacity(self.column_id_to_index.len()); let mut field_columns = Vec::with_capacity(self.column_id_to_index.len()); for (&column_id, &column_index) in &self.column_id_to_index { let column_name = partition .dictionary .lookup_id(column_id) .expect("Find column name in dictionary"); if column_name != TIME_COLUMN_NAME { let column_name = Arc::new(column_name.to_string()); match self.columns[column_index] { Column::Tag(_, _) => tag_columns.push(column_name), _ => { if partition_predicate.should_include_field(column_id) { field_columns.push(column_name) } } } } } // tag columns are always sorted by name (aka sorted by tag // key) in the output schema, so ensure the columns are sorted // (the select exprs) tag_columns.sort(); // Sort the field columns too so that the output always comes // out in a predictable order field_columns.sort(); Ok((tag_columns, field_columns)) } // Returns (field_columns and time) in sorted order fn field_and_time_column_names( &self, partition_predicate: &PartitionPredicate, partition: &Partition, ) -> ArcStringVec { let mut field_columns = self .column_id_to_index .iter() .filter_map(|(&column_id, &column_index)| { match self.columns[column_index] { Column::Tag(_, _) => None, // skip tags _ => { if partition_predicate.should_include_field(column_id) || partition_predicate.is_time_column(column_id) { let column_name = partition .dictionary .lookup_id(column_id) .expect("Find column name in dictionary"); Some(Arc::new(column_name.to_string())) } else { None } } } }) .collect::<Vec<_>>(); // Sort the field columns too so that the output always comes // out in a predictable order field_columns.sort(); field_columns } /// Converts this table to an arrow record batch. pub fn to_arrow( &self, partition: &Partition, requested_columns: &[&str], ) -> Result<RecordBatch> { // if requested columns is empty, retrieve all columns in the table if requested_columns.is_empty() { self.all_to_arrow(partition) } else { let columns_with_index = self.column_names_with_index(partition, requested_columns)?; self.to_arrow_impl(partition, &columns_with_index) } } fn column_names_with_index<'a>( &self, partition: &Partition, columns: &[&'a str], ) -> Result<Vec<(&'a str, usize)>> { columns .iter() .map(|&column_name| { let column_id = partition.dictionary.lookup_value(column_name).context( ColumnNameNotFoundInDictionary { column_name, partition: &partition.key, }, )?; let column_index = *self .column_id_to_index .get(&column_id) .context(InternalNoColumnInIndex { column_name, column_id, })?; Ok((column_name, column_index)) }) .collect() } /// Convert all columns to an arrow record batch pub fn all_to_arrow(&self, partition: &Partition) -> Result<RecordBatch> { let mut requested_columns_with_index = self .column_id_to_index .iter() .map(|(&column_id, &column_index)| { let column_name = partition.dictionary.lookup_id(column_id).context( ColumnIdNotFoundInDictionary { column_id, partition: &partition.key, }, )?; Ok((column_name, column_index)) }) .collect::<Result<Vec<_>>>()?; requested_columns_with_index.sort_by(|(a, _), (b, _)| a.cmp(b)); self.to_arrow_impl(partition, &requested_columns_with_index) } /// Converts this table to an arrow record batch, /// /// requested columns with index are tuples of column_name, column_index pub fn to_arrow_impl( &self, partition: &Partition, requested_columns_with_index: &[(&str, usize)], ) -> Result<RecordBatch> { let mut fields = Vec::with_capacity(requested_columns_with_index.len()); let mut columns: Vec<ArrayRef> = Vec::with_capacity(requested_columns_with_index.len()); for &(column_name, column_index) in requested_columns_with_index.iter() { let arrow_col: ArrayRef = match &self.columns[column_index] { Column::String(vals, _) => { fields.push(ArrowField::new(column_name, ArrowDataType::Utf8, true)); let mut builder = StringBuilder::with_capacity(vals.len(), vals.len() * 10); for v in vals { match v { None => builder.append_null(), Some(s) => builder.append_value(s), } .context(ArrowError {})?; } Arc::new(builder.finish()) } Column::Tag(vals, _) => { fields.push(ArrowField::new(column_name, ArrowDataType::Utf8, true)); let mut builder = StringBuilder::with_capacity(vals.len(), vals.len() * 10); for v in vals { match v { None => builder.append_null(), Some(value_id) => { let tag_value = partition.dictionary.lookup_id(*value_id).context( TagValueIdNotFoundInDictionary { value: *value_id, partition: &partition.key, }, )?; builder.append_value(tag_value) } } .context(ArrowError {})?; } Arc::new(builder.finish()) } Column::F64(vals, _) => { fields.push(ArrowField::new(column_name, ArrowDataType::Float64, true)); let mut builder = Float64Builder::new(vals.len()); for v in vals { builder.append_option(*v).context(ArrowError {})?; } Arc::new(builder.finish()) } Column::I64(vals, _) => { fields.push(ArrowField::new(column_name, ArrowDataType::Int64, true)); let mut builder = Int64Builder::new(vals.len()); for v in vals { builder.append_option(*v).context(ArrowError {})?; } Arc::new(builder.finish()) } Column::Bool(vals, _) => { fields.push(ArrowField::new(column_name, ArrowDataType::Boolean, true)); let mut builder = BooleanBuilder::new(vals.len()); for v in vals { builder.append_option(*v).context(ArrowError {})?; } Arc::new(builder.finish()) } }; columns.push(arrow_col); } let schema = ArrowSchema::new(fields); RecordBatch::try_new(Arc::new(schema), columns).context(ArrowError {}) } /// returns true if any row in this table could possible match the /// predicate. true does not mean any rows will *actually* match, /// just that the entire table can not be ruled out. /// /// false means that no rows in this table could possibly match pub fn could_match_predicate(&self, partition_predicate: &PartitionPredicate) -> Result<bool> { Ok( self.matches_column_selection(partition_predicate.field_restriction.as_ref()) && self.matches_table_name_predicate( partition_predicate.table_name_predicate.as_ref(), ) && self.matches_timestamp_predicate(partition_predicate)? && self.has_columns(partition_predicate.required_columns.as_ref()), ) } /// Returns true if the table contains at least one of the fields /// requested or there are no specific fields requested. fn matches_column_selection(&self, column_selection: Option<&BTreeSet<u32>>) -> bool { match column_selection { Some(column_selection) => { // figure out if any of the columns exists self.column_id_to_index .keys() .any(|column_id| column_selection.contains(column_id)) } None => true, // no specific selection } } fn matches_table_name_predicate(&self, table_name_predicate: Option<&BTreeSet<u32>>) -> bool { match table_name_predicate { Some(table_name_predicate) => table_name_predicate.contains(&self.id), None => true, // no table predicate } } /// returns true if there are any timestamps in this table that /// fall within the timestamp range fn matches_timestamp_predicate( &self, partition_predicate: &PartitionPredicate, ) -> Result<bool> { match &partition_predicate.range { None => Ok(true), Some(range) => { let time_column_id = partition_predicate.time_column_id; let time_column = self.column(time_column_id)?; time_column.has_i64_range(range.start, range.end).context( ColumnPredicateEvaluation { column: time_column_id, }, ) } } } /// returns true if no columns are specified, or the table has all /// columns specified fn has_columns(&self, columns: Option<&PartitionIdSet>) -> bool { if let Some(columns) = columns { match columns { PartitionIdSet::AtLeastOneMissing => return false, PartitionIdSet::Present(symbols) => { for symbol in symbols { if !self.column_id_to_index.contains_key(symbol) { return false; } } } } } true } /// returns true if there are any rows in column that are non-null /// and within the timestamp range specified by pred pub fn column_matches_predicate<T>( &self, column: &[Option<T>], partition_predicate: &PartitionPredicate, ) -> Result<bool> { match partition_predicate.range { None => Ok(true), Some(range) => { let time_column_id = partition_predicate.time_column_id; let time_column = self.column(time_column_id)?; time_column .has_non_null_i64_range(column, range.start, range.end) .context(ColumnPredicateEvaluation { column: time_column_id, }) } } } } /// Reorders tag_columns so that its prefix matches exactly /// prefix_columns. Returns an error if there are duplicates, or other /// untoward inputs fn reorder_prefix( prefix_columns: &[String], tag_columns: Vec<Arc<String>>, ) -> Result<Vec<Arc<String>>> { // tag_used_set[i[ is true if we have used the value in tag_columns[i] let mut tag_used_set = vec![false; tag_columns.len()]; // Note that this is an O(N^2) algorithm. We are assuming the // number of tag columns is reasonably small // map from prefix_column[idx] -> index in tag_columns let prefix_map = prefix_columns .iter() .map(|pc| { let found_location = tag_columns .iter() .enumerate() .find(|(_, c)| pc == c.as_ref()); if let Some((index, _)) = found_location { if tag_used_set[index] { DuplicateGroupColumn { column_name: pc }.fail() } else { tag_used_set[index] = true; Ok(index) } } else { GroupColumnNotFound { column_name: pc, all_tag_column_names: tag_columns .iter() .map(|s| s.as_ref() as &str) .collect::<Vec<_>>() .as_slice() .join(", "), } .fail() } }) .collect::<Result<Vec<_>>>()?; let mut new_tag_columns = prefix_map .iter() .map(|&i| tag_columns[i].clone()) .collect::<Vec<_>>(); new_tag_columns.extend(tag_columns.into_iter().enumerate().filter_map(|(i, c)| { // already used in prefix if tag_used_set[i] { None } else { Some(c) } })); Ok(new_tag_columns) } /// Traits to help creating DataFuson expressions from strings trait IntoExpr { /// Creates a DataFuson expr fn into_expr(&self) -> Expr; /// creates a DataFusion SortExpr fn into_sort_expr(&self) -> Expr { Expr::Sort { expr: Box::new(self.into_expr()), asc: true, // Sort ASCENDING nulls_first: true, } } } impl IntoExpr for Arc<String> { fn into_expr(&self) -> Expr { Expr::Column(self.as_ref().clone()) } } impl IntoExpr for str { fn into_expr(&self) -> Expr { Expr::Column(self.to_string()) } } #[cfg(test)] mod tests { use arrow::util::pretty::pretty_format_batches; use data_types::data::split_lines_into_write_entry_partitions; use datafusion::{logical_plan::Operator, scalar::ScalarValue}; use influxdb_line_protocol::{parse_lines, ParsedLine}; use query::{exec::Executor, predicate::PredicateBuilder}; use test_helpers::str_vec_to_arc_vec; use super::*; #[test] fn test_has_columns() { // setup a test table let mut partition = Partition::new("dummy_partition_key"); let dictionary = &mut partition.dictionary; let mut table = Table::new(dictionary.lookup_value_or_insert("table_name")); let lp_lines = vec![ "h2o,state=MA,city=Boston temp=70.4 100", "h2o,state=MA,city=Boston temp=72.4 250", ]; write_lines_to_table(&mut table, dictionary, lp_lines); let state_symbol = dictionary.id("state").unwrap(); let new_symbol = dictionary.lookup_value_or_insert("not_a_columns"); assert!(table.has_columns(None)); let pred = PartitionIdSet::AtLeastOneMissing; assert!(!table.has_columns(Some(&pred))); let set = BTreeSet::<u32>::new(); let pred = PartitionIdSet::Present(set); assert!(table.has_columns(Some(&pred))); let mut set = BTreeSet::new(); set.insert(state_symbol); let pred = PartitionIdSet::Present(set); assert!(table.has_columns(Some(&pred))); let mut set = BTreeSet::new(); set.insert(new_symbol); let pred = PartitionIdSet::Present(set); assert!(!table.has_columns(Some(&pred))); let mut set = BTreeSet::new(); set.insert(state_symbol); set.insert(new_symbol); let pred = PartitionIdSet::Present(set); assert!(!table.has_columns(Some(&pred))); } #[test] fn test_matches_table_name_predicate() { // setup a test table let mut partition = Partition::new("dummy_partition_key"); let dictionary = &mut partition.dictionary; let mut table = Table::new(dictionary.lookup_value_or_insert("h2o")); let lp_lines = vec![ "h2o,state=MA,city=Boston temp=70.4 100", "h2o,state=MA,city=Boston temp=72.4 250", ]; write_lines_to_table(&mut table, dictionary, lp_lines); let h2o_symbol = dictionary.id("h2o").unwrap(); assert!(table.matches_table_name_predicate(None)); let set = BTreeSet::new(); assert!(!table.matches_table_name_predicate(Some(&set))); let mut set = BTreeSet::new(); set.insert(h2o_symbol); assert!(table.matches_table_name_predicate(Some(&set))); // Some symbol that is not the same as h2o_symbol assert_ne!(37377, h2o_symbol); let mut set = BTreeSet::new(); set.insert(37377); assert!(!table.matches_table_name_predicate(Some(&set))); } #[tokio::test] async fn test_series_set_plan() { // setup a test table let mut partition = Partition::new("dummy_partition_key"); let dictionary = &mut partition.dictionary; let mut table = Table::new(dictionary.lookup_value_or_insert("table_name")); let lp_lines = vec![ "h2o,state=MA,city=Boston temp=70.4 100", "h2o,state=MA,city=Boston temp=72.4 250", "h2o,state=CA,city=LA temp=90.0 200", "h2o,state=CA,city=LA temp=90.0 350", ]; write_lines_to_table(&mut table, dictionary, lp_lines); let predicate = PredicateBuilder::default().build(); let partition_predicate = partition.compile_predicate(&predicate).unwrap(); let series_set_plan = table .series_set_plan(&partition_predicate, &partition) .expect("creating the series set plan"); assert_eq!(series_set_plan.table_name.as_ref(), "table_name"); assert_eq!( series_set_plan.tag_columns, *str_vec_to_arc_vec(&["city", "state"]) ); assert_eq!( series_set_plan.field_columns, *str_vec_to_arc_vec(&["temp"]) ); // run the created plan, ensuring the output is as expected let results = run_plan(series_set_plan.plan).await; let expected = vec![ "+--------+-------+------+------+", "| city | state | temp | time |", "+--------+-------+------+------+", "| Boston | MA | 70.4 | 100 |", "| Boston | MA | 72.4 | 250 |", "| LA | CA | 90 | 200 |", "| LA | CA | 90 | 350 |", "+--------+-------+------+------+", ]; assert_eq!(expected, results, "expected output"); } #[tokio::test] async fn test_series_set_plan_order() { // test that the columns and rows come out in the right order (tags then timestamp) // setup a test table let mut partition = Partition::new("dummy_partition_key"); let dictionary = &mut partition.dictionary; let mut table = Table::new(dictionary.lookup_value_or_insert("table_name")); let lp_lines = vec![ "h2o,zz_tag=A,state=MA,city=Kingston temp=70.1 800", "h2o,state=MA,city=Kingston,zz_tag=B temp=70.2 100", "h2o,state=CA,city=Boston temp=70.3 250", "h2o,state=MA,city=Boston,zz_tag=A temp=70.4 1000", "h2o,state=MA,city=Boston temp=70.5,other=5.0 250", ]; write_lines_to_table(&mut table, dictionary, lp_lines); let predicate = PredicateBuilder::default().build(); let partition_predicate = partition.compile_predicate(&predicate).unwrap(); let series_set_plan = table .series_set_plan(&partition_predicate, &partition) .expect("creating the series set plan"); assert_eq!(series_set_plan.table_name.as_ref(), "table_name"); assert_eq!( series_set_plan.tag_columns, *str_vec_to_arc_vec(&["city", "state", "zz_tag"]) ); assert_eq!( series_set_plan.field_columns, *str_vec_to_arc_vec(&["other", "temp"]) ); // run the created plan, ensuring the output is as expected let results = run_plan(series_set_plan.plan).await; let expected = vec![ "+----------+-------+--------+-------+------+------+", "| city | state | zz_tag | other | temp | time |", "+----------+-------+--------+-------+------+------+", "| Boston | CA | | | 70.3 | 250 |", "| Boston | MA | | 5 | 70.5 | 250 |", "| Boston | MA | A | | 70.4 | 1000 |", "| Kingston | MA | A | | 70.1 | 800 |", "| Kingston | MA | B | | 70.2 | 100 |", "+----------+-------+--------+-------+------+------+", ]; assert_eq!(expected, results, "expected output"); } #[tokio::test] async fn test_series_set_plan_filter() { // test that filters are applied reasonably // setup a test table let mut partition = Partition::new("dummy_partition_key"); let dictionary = &mut partition.dictionary; let mut table = Table::new(dictionary.lookup_value_or_insert("table_name")); let lp_lines = vec![ "h2o,state=MA,city=Boston temp=70.4 100", "h2o,state=MA,city=Boston temp=72.4 250", "h2o,state=CA,city=LA temp=90.0 200", "h2o,state=CA,city=LA temp=90.0 350", ]; write_lines_to_table(&mut table, dictionary, lp_lines); let predicate = PredicateBuilder::default() .add_expr(Expr::BinaryExpr { left: Box::new(Expr::Column("city".into())), op: Operator::Eq, right: Box::new(Expr::Literal(ScalarValue::Utf8(Some("LA".into())))), }) .timestamp_range(190, 210) .build(); let partition_predicate = partition.compile_predicate(&predicate).unwrap(); let series_set_plan = table .series_set_plan(&partition_predicate, &partition) .expect("creating the series set plan"); assert_eq!(series_set_plan.table_name.as_ref(), "table_name"); assert_eq!( series_set_plan.tag_columns, *str_vec_to_arc_vec(&["city", "state"]) ); assert_eq!( series_set_plan.field_columns, *str_vec_to_arc_vec(&["temp"]) ); // run the created plan, ensuring the output is as expected let results = run_plan(series_set_plan.plan).await; let expected = vec![ "+------+-------+------+------+", "| city | state | temp | time |", "+------+-------+------+------+", "| LA | CA | 90 | 200 |", "+------+-------+------+------+", ]; assert_eq!(expected, results, "expected output"); } #[tokio::test] async fn test_grouped_series_set_plan() { // test that filters are applied reasonably // setup a test table let mut partition = Partition::new("dummy_partition_key"); let dictionary = &mut partition.dictionary; let mut table = Table::new(dictionary.lookup_value_or_insert("table_name")); let lp_lines = vec![ "h2o,state=MA,city=Boston temp=70.4 100", "h2o,state=MA,city=Boston temp=72.4 250", "h2o,state=CA,city=LA temp=90.0 200", "h2o,state=CA,city=LA temp=90.0 350", ]; write_lines_to_table(&mut table, dictionary, lp_lines); let predicate = PredicateBuilder::default() .add_expr(Expr::BinaryExpr { left: Box::new(Expr::Column("city".into())), op: Operator::Eq, right: Box::new(Expr::Literal(ScalarValue::Utf8(Some("LA".into())))), }) .timestamp_range(190, 210) .build(); let partition_predicate = partition.compile_predicate(&predicate).unwrap(); let group_columns = vec![String::from("state")]; let grouped_series_set_plan = table .grouped_series_set_plan(&partition_predicate, &group_columns, &partition) .expect("creating the grouped_series set plan"); assert_eq!(grouped_series_set_plan.num_prefix_tag_group_columns, 1); // run the created plan, ensuring the output is as expected let results = run_plan(grouped_series_set_plan.series_set_plan.plan).await; let expected = vec![ "+-------+------+------+------+", "| state | city | temp | time |", "+-------+------+------+------+", "| CA | LA | 90 | 200 |", "+-------+------+------+------+", ]; assert_eq!(expected, results, "expected output"); } #[tokio::test] async fn test_field_name_plan() { // setup a test table let mut partition = Partition::new("dummy_partition_key"); let dictionary = &mut partition.dictionary; let mut table = Table::new(dictionary.lookup_value_or_insert("table_name")); let lp_lines = vec![ // Order this so field3 comes before field2 // (and thus the columns need to get reordered) "h2o,tag1=foo,tag2=bar field1=70.6,field3=2 100", "h2o,tag1=foo,tag2=bar field1=70.4,field2=\"ss\" 100", "h2o,tag1=foo,tag2=bar field1=70.5,field2=\"ss\" 100", "h2o,tag1=foo,tag2=bar field1=70.6,field4=true 1000", ]; write_lines_to_table(&mut table, dictionary, lp_lines); let predicate = PredicateBuilder::default().timestamp_range(0, 200).build(); let partition_predicate = partition.compile_predicate(&predicate).unwrap(); let field_names_set_plan = table .field_names_plan(&partition_predicate, &partition) .expect("creating the field_name plan"); // run the created plan, ensuring the output is as expected let results = run_plan(field_names_set_plan).await; let expected = vec![ "+--------+--------+--------+--------+------+", "| field1 | field2 | field3 | field4 | time |", "+--------+--------+--------+--------+------+", "| 70.6 | | 2 | | 100 |", "| 70.4 | ss | | | 100 |", "| 70.5 | ss | | | 100 |", "+--------+--------+--------+--------+------+", ]; assert_eq!(expected, results, "expected output"); } #[test] fn test_reorder_prefix() { assert_eq!(reorder_prefix_ok(&[], &[]), &[] as &[&str]); assert_eq!(reorder_prefix_ok(&[], &["one"]), &["one"]); assert_eq!(reorder_prefix_ok(&["one"], &["one"]), &["one"]); assert_eq!(reorder_prefix_ok(&[], &["one", "two"]), &["one", "two"]); assert_eq!( reorder_prefix_ok(&["one"], &["one", "two"]), &["one", "two"] ); assert_eq!( reorder_prefix_ok(&["two"], &["one", "two"]), &["two", "one"] ); assert_eq!( reorder_prefix_ok(&["two", "one"], &["one", "two"]), &["two", "one"] ); assert_eq!( reorder_prefix_ok(&[], &["one", "two", "three"]), &["one", "two", "three"] ); assert_eq!( reorder_prefix_ok(&["one"], &["one", "two", "three"]), &["one", "two", "three"] ); assert_eq!( reorder_prefix_ok(&["two"], &["one", "two", "three"]), &["two", "one", "three"] ); assert_eq!( reorder_prefix_ok(&["three", "one"], &["one", "two", "three"]), &["three", "one", "two"] ); // errors assert_eq!( reorder_prefix_err(&["one"], &[]), "Group column \'one\' not found in tag columns: " ); assert_eq!( reorder_prefix_err(&["one"], &["two", "three"]), "Group column \'one\' not found in tag columns: two, three" ); assert_eq!( reorder_prefix_err(&["two", "one", "two"], &["one", "two"]), "Duplicate group column \'two\'" ); } fn reorder_prefix_ok(prefix: &[&str], table_columns: &[&str]) -> Vec<String> { let prefix = prefix.iter().map(|s| s.to_string()).collect::<Vec<_>>(); let table_columns = Arc::try_unwrap(str_vec_to_arc_vec(table_columns)).expect("unwrap the arc"); let res = reorder_prefix(&prefix, table_columns); let message = format!("Expected OK, got {:?}", res); let res = res.expect(&message); res.into_iter() .map(|a| Arc::try_unwrap(a).expect("unwrapping arc")) .collect() } // returns the error string or panics if `reorder_prefix` doesn't return an error fn reorder_prefix_err(prefix: &[&str], table_columns: &[&str]) -> String { let prefix = prefix.iter().map(|s| s.to_string()).collect::<Vec<_>>(); let table_columns = Arc::try_unwrap(str_vec_to_arc_vec(table_columns)).expect("unwrap the arc"); let res = reorder_prefix(&prefix, table_columns); match res { Ok(r) => { panic!( "Expected error result from reorder_prefix_err, but was OK: '{:?}'", r ); } Err(e) => format!("{}", e), } } /// Runs `plan` and returns the output as petty-formatted array of strings async fn run_plan(plan: LogicalPlan) -> Vec<String> { // run the created plan, ensuring the output is as expected let batches = Executor::new() .run_logical_plan(plan) .await .expect("ok running plan"); pretty_format_batches(&batches) .expect("formatting results") .trim() .split('\n') .map(|s| s.to_string()) .collect::<Vec<_>>() } /// Insert the line protocol lines in `lp_lines` into this table fn write_lines_to_table(table: &mut Table, dictionary: &mut Dictionary, lp_lines: Vec<&str>) { let lp_data = lp_lines.join("\n"); let lines: Vec<_> = parse_lines(&lp_data).map(|l| l.unwrap()).collect(); let data = split_lines_into_write_entry_partitions(partition_key_func, &lines); let batch = flatbuffers::get_root::<wb::WriteBufferBatch<'_>>(&data); let entries = batch.entries().expect("at least one entry"); for entry in entries { let table_batches = entry.table_batches().expect("there were table batches"); for batch in table_batches { let rows = batch.rows().expect("Had rows in the batch"); table .append_rows(dictionary, &rows) .expect("Appended the row"); } } } fn partition_key_func(_: &ParsedLine<'_>) -> String { String::from("the_partition_key") } }
{ Self { id, column_id_to_index: HashMap::new(), columns: Vec::new(), } }
test_expand_v2_op_xpu.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import sys import numpy as np sys.path.append("..") from op_test import OpTest from op_test_xpu import XPUOpTest import paddle.fluid as fluid from paddle.fluid import Program, program_guard import paddle from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper paddle.enable_static() np.random.seed(10) # CANN Op Support X: float32, int32, int64 # Situation 1: shape is a list(without tensor) class XPUTestExpandV2Op(XPUOpTestWrapper): def __init__(self): self.op_name = 'expand_v2' self.use_dynamic_create_class = False class TestExpandV2XPUOp(XPUOpTest): def setUp(self): self.init_dtype() self.set_xpu() self.op_type = "expand_v2" self.place = paddle.XPUPlace(0) self.init_data() self.inputs = { 'X': np.random.random(self.ori_shape).astype(self.dtype) } self.attrs = {'shape': self.shape} output = np.tile(self.inputs['X'], self.expand_times) self.outputs = {'Out': output} def init_dtype(self): self.dtype = self.in_type def set_xpu(self): self.__class__.use_xpu = True self.__class__.no_need_check_grad = True def init_data(self): self.ori_shape = [100] self.shape = [100] self.expand_times = [1] def test_check_output(self): self.check_output_with_place(self.place) class TestExpandV2OpRank2_DimExpanding(TestExpandV2XPUOp): def init_data(self): self.ori_shape = [120] self.shape = [2, 120] self.expand_times = [2, 1] class TestExpandV2OpRank2(TestExpandV2XPUOp): def init_data(self): self.ori_shape = [1, 140] self.shape = [12, 140] self.expand_times = [12, 1] class TestExpandV2OpRank3_Corner(TestExpandV2XPUOp): def init_data(self): self.ori_shape = (2, 10, 5) self.shape = (2, 10, 5) self.expand_times = (1, 1, 1) class TestExpandV2OpRank4(TestExpandV2XPUOp): def init_data(self): self.ori_shape = (2, 4, 5, 7) self.shape = (-1, -1, -1, -1) self.expand_times = (1, 1, 1, 1) class TestExpandV2OpRank5(TestExpandV2XPUOp): def init_data(self): self.ori_shape = (2, 4, 1, 15) self.shape = (2, -1, 4, -1) self.expand_times = (1, 1, 4, 1) class TestExpandV2OpRank6(TestExpandV2XPUOp): def init_data(self): self.ori_shape = (4, 1, 30) self.shape = (2, -1, 4, 30) self.expand_times = (2, 1, 4, 1) # Situation 2: shape is a list(with tensor) class TestExpandV2OpXPURank1_tensor_attr(TestExpandV2XPUOp): def setUp(self): self.set_xpu() self.place = paddle.XPUPlace(0) self.op_type = "expand_v2" self.init_data() self.dtype = np.float32 expand_shapes_tensor = [] for index, ele in enumerate(self.expand_shape): expand_shapes_tensor.append(("x" + str(index), np.ones( (1)).astype('int32') * ele)) self.inputs = { 'X': np.random.random(self.ori_shape).astype(self.dtype), 'expand_shapes_tensor': expand_shapes_tensor, } self.attrs = {"shape": self.infer_expand_shape} output = np.tile(self.inputs['X'], self.expand_times) self.outputs = {'Out': output} def init_data(self): self.ori_shape = [100] self.expand_times = [1] self.expand_shape = [100] self.infer_expand_shape = [-1] class
( TestExpandV2OpXPURank1_tensor_attr): def init_data(self): self.ori_shape = [12, 14] self.expand_times = [1, 1] self.expand_shape = [12, 14] self.infer_expand_shape = [12, -1] # Situation 3: shape is a tensor class TestExpandV2XPUOp_tensor(TestExpandV2XPUOp): def setUp(self): self.set_xpu() self.place = paddle.XPUPlace(0) self.op_type = "expand_v2" self.init_data() self.dtype = np.float32 self.inputs = { 'X': np.random.random(self.ori_shape).astype(self.dtype), 'Shape': np.array(self.expand_shape).astype("int32"), } self.attrs = {} output = np.tile(self.inputs['X'], self.expand_times) self.outputs = {'Out': output} def init_data(self): self.ori_shape = [100] self.expand_times = [2, 1] self.expand_shape = [2, 100] # Situation 5: input x is int32 # skip grad check for int32 class TestExpandV2OpInteger(XPUOpTest): def init_type(self): self.dtype = 'int32' def setUp(self): self.set_xpu() self.init_type() self.place = paddle.XPUPlace(0) self.op_type = "expand_v2" self.inputs = { 'X': np.random.randint(10, size=(2, 4, 20)).astype(self.dtype) } self.attrs = {'shape': [2, 4, 20]} output = np.tile(self.inputs['X'], (1, 1, 1)) self.outputs = {'Out': output} def set_xpu(self): self.__class__.use_xpu = True def test_check_output(self): self.check_output_with_place(self.place) def test_check_grad(self): pass # Test python API class TestExpandV2API(unittest.TestCase): def test_static(self): with fluid.program_guard(fluid.Program(), fluid.Program()): input = np.random.random([12, 14]).astype("float32") x = fluid.layers.data(name='x', shape=[12, 14], append_batch_size=False, dtype="float32") positive_2 = fluid.layers.fill_constant([1], "int32", 12) expand_shape = fluid.layers.data(name="expand_shape", shape=[2], append_batch_size=False, dtype="int32") out_1 = paddle.expand(x, shape=[12, 14]) out_2 = paddle.expand(x, shape=[positive_2, 14]) out_3 = paddle.expand(x, shape=expand_shape) g0 = fluid.backward.calc_gradient(out_2, x) exe = fluid.Executor(place=paddle.XPUPlace(0)) res_1, res_2, res_3 = exe.run(fluid.default_main_program(), feed={ "x": input, "expand_shape": np.array([12, 14]).astype("int32") }, fetch_list=[out_1, out_2, out_3]) assert np.array_equal(res_1, np.tile(input, (1, 1))) assert np.array_equal(res_2, np.tile(input, (1, 1))) assert np.array_equal(res_3, np.tile(input, (1, 1))) support_types = get_xpu_op_support_types('expand_v2') for stype in support_types: create_test_class(globals(), XPUTestExpandV2Op, stype) if __name__ == "__main__": unittest.main()
TestExpandV2OpRank2_Corner_tensor_attr
da3.py
"""Source code for distributed attentional actor architecture (DA3) model. Author: Yoshinari Motokawa <[email protected]> """ from typing import List import torch from core.utils.logging import initialize_logging from omegaconf import DictConfig from torch import nn from ..hard_shrink_attention import HardShrinkBlock from ..vit import Block, PatchEmbed logger = initialize_logging(__name__) class DA3(nn.Module): def __init__(self, config: DictConfig, input_shape: List[int], output_size: int): super().__init__() patched_size_x = input_shape[1] // config.model.patch_size patched_size_y = input_shape[2] // config.model.patch_size self.view_method = config.observation_area_mask self.patch_embed = PatchEmbed( patch_size=config.model.patch_size, in_chans=input_shape[0], embed_dim=config.model.embed_dim, ) self.saliency_vector = nn.Parameter(torch.zeros(1, 1, config.model.embed_dim)) self.pos_embed = nn.Parameter( torch.zeros(1, patched_size_x * patched_size_y + 1, config.model.embed_dim) ) block = HardShrinkBlock if config.model.attention == "hard" else Block self.blocks = nn.ModuleList( [ block( dim=config.model.embed_dim, num_heads=config.model.num_heads, mlp_ratio=config.model.mlp_ratio, **{"af_lambd": config.model.af_lambd} ) for _ in range(config.model.block_loop) ] ) self.norm = nn.LayerNorm(config.model.embed_dim) self.head = nn.Linear(config.model.embed_dim, output_size) def forward(self, state): x = self.state_encoder(state) out = self.patch_embed(x) saliency_vector = self.saliency_vector.expand(out.shape[0], -1, -1) out = torch.cat((saliency_vector, out), dim=1) out = out + self.pos_embed for blk in self.blocks: out = blk(out) out = self.norm(out) out = out[:, 0] out = self.head(out) return out def
(self, state): x = self.state_encoder(state) out = self.patch_embed(x) saliency_vector = self.saliency_vector.expand(out.shape[0], -1, -1) out = torch.cat((saliency_vector, out), dim=1) out = out + self.pos_embed attns = list() for blk in self.blocks: out, attn = blk.forward_attn(out) attns.append(attn.detach()) out = self.norm(out) out = out[:, 0] out = self.head(out) return out, [attns] def state_encoder(self, state): return state[self.view_method]
forward_attn
tests.rs
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}; use std::sync::Arc; use std::time::{Duration, Instant}; use std::{io, str}; use futures::{future, FutureExt, StreamExt, TryFutureExt}; use tokio; use tracing::{info, info_span}; use tracing_futures::Instrument as _; use super::{ ClientConfigBuilder, Endpoint, EndpointDriver, Incoming, NewConnection, RecvStream, SendStream, ServerConfigBuilder, }; #[test] fn handshake_timeout() { let _guard = subscribe();
let runtime = tokio::runtime::Runtime::new().unwrap(); runtime.spawn(client_driver.unwrap_or_else(|e| panic!("client endpoint driver failed: {}", e))); let mut client_config = crate::ClientConfig::default(); const IDLE_TIMEOUT: u64 = 500; client_config.transport = Arc::new(crate::TransportConfig { idle_timeout: IDLE_TIMEOUT, initial_rtt: 10_000, // Ensure initial PTO doesn't influence the timeout significantly ..Default::default() }); let start = Instant::now(); runtime.block_on(async move { match client .connect_with( client_config, &SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 1), "localhost", ) .unwrap() .await { Err(crate::ConnectionError::TimedOut) => {} Err(e) => panic!("unexpected error: {:?}", e), Ok(_) => panic!("unexpected success"), } }); let dt = start.elapsed(); assert!( dt > Duration::from_millis(IDLE_TIMEOUT) && dt < 2 * Duration::from_millis(IDLE_TIMEOUT) ); } #[test] fn drop_endpoint() { let _guard = subscribe(); let (driver, endpoint, _) = Endpoint::builder() .bind(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0)) .unwrap(); let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); runtime.spawn( endpoint .connect( &SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 1234), "localhost", ) .unwrap() .map(|x| match x { Err(crate::ConnectionError::TransportError(proto::TransportError { code: proto::TransportErrorCode::INTERNAL_ERROR, .. })) => {} Err(e) => panic!("unexpected error: {}", e), Ok(_) => { panic!("unexpected success"); } }), ); drop((driver, endpoint)); runtime.run().unwrap(); } #[test] fn drop_endpoint_driver() { let _guard = subscribe(); let endpoint = Endpoint::builder(); let (_, endpoint, _) = endpoint .bind(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0)) .unwrap(); assert!(endpoint .connect( &SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 1234), "localhost", ) .is_err()); } #[test] fn close_endpoint() { let _guard = subscribe(); let endpoint = Endpoint::builder(); let (_driver, endpoint, incoming) = endpoint .bind(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0)) .unwrap(); let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); runtime.spawn(incoming.for_each(|_| future::ready(()))); runtime.spawn( endpoint .connect( &SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 1234), "localhost", ) .unwrap() .map(|x| match x { Err(crate::ConnectionError::LocallyClosed) => (), Err(e) => panic!("unexpected error: {}", e), Ok(_) => { panic!("unexpected success"); } }), ); endpoint.close(0u32.into(), &[]); runtime.run().unwrap(); } #[test] fn local_addr() { let socket = UdpSocket::bind("[::1]:0").unwrap(); let addr = socket.local_addr().unwrap(); let (_, ep, _) = Endpoint::builder().with_socket(socket).unwrap(); assert_eq!( addr, ep.local_addr() .expect("Could not obtain our local endpoint") ); } #[test] fn read_after_close() { let _guard = subscribe(); let (driver, endpoint, mut incoming) = endpoint(); let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); runtime.spawn(driver.unwrap_or_else(|e| panic!("{}", e))); const MSG: &[u8] = b"goodbye!"; runtime.spawn(async move { let new_conn = incoming .next() .await .expect("endpoint") .await .expect("connection"); tokio::runtime::current_thread::spawn(new_conn.driver.unwrap_or_else(|_| ())); let mut s = new_conn.connection.open_uni().await.unwrap(); s.write_all(MSG).await.unwrap(); s.finish().await.unwrap(); }); runtime.spawn(async move { let mut new_conn = endpoint .connect(&endpoint.local_addr().unwrap(), "localhost") .unwrap() .await .expect("connect"); tokio::runtime::current_thread::spawn(new_conn.driver.unwrap_or_else(|_| ())); tokio::timer::delay(Instant::now() + Duration::from_millis(100)).await; let stream = new_conn .uni_streams .next() .await .expect("incoming streams") .expect("missing stream"); let msg = stream .read_to_end(usize::max_value()) .await .expect("read_to_end"); assert_eq!(msg, MSG); }); runtime.run().unwrap(); } /// Construct an endpoint suitable for connecting to itself fn endpoint() -> (EndpointDriver, Endpoint, Incoming) { let mut endpoint = Endpoint::builder(); let mut server_config = ServerConfigBuilder::default(); let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); let key = crate::PrivateKey::from_der(&cert.serialize_private_key_der()).unwrap(); let cert = crate::Certificate::from_der(&cert.serialize_der().unwrap()).unwrap(); let cert_chain = crate::CertificateChain::from_certs(vec![cert.clone()]); server_config.certificate(cert_chain, key).unwrap(); endpoint.listen(server_config.build()); let mut client_config = ClientConfigBuilder::default(); client_config.add_certificate_authority(cert).unwrap(); endpoint.default_client_config(client_config.build()); let (x, y, z) = endpoint .bind(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0)) .unwrap(); (x, y, z) } #[test] fn zero_rtt() { let _guard = subscribe(); let (driver, endpoint, incoming) = endpoint(); let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); runtime.spawn(driver.unwrap_or_else(|e| panic!("{}", e))); const MSG: &[u8] = b"goodbye!"; runtime.spawn(incoming.take(2).for_each(|incoming| { async { let NewConnection { driver, mut uni_streams, connection, .. } = incoming.into_0rtt().unwrap_or_else(|_| unreachable!()).0; tokio::runtime::current_thread::spawn(driver.unwrap_or_else(|_| ())); tokio::runtime::current_thread::spawn(async move { while let Some(Ok(x)) = uni_streams.next().await { let msg = x.read_to_end(usize::max_value()).await.unwrap(); assert_eq!(msg, MSG); } }); let mut s = connection.open_uni().await.expect("open_uni"); s.write_all(MSG).await.expect("write"); s.finish().await.expect("finish"); } })); runtime.block_on(async { let NewConnection { driver, mut uni_streams, .. } = endpoint .connect(&endpoint.local_addr().unwrap(), "localhost") .unwrap() .into_0rtt() .err() .expect("0-RTT succeeded without keys") .await .expect("connect"); tokio::runtime::current_thread::spawn(async move { // Buy time for the driver to process the server's NewSessionTicket tokio::timer::delay(Instant::now() + Duration::from_millis(100)).await; let stream = uni_streams .next() .await .expect("incoming streams") .expect("missing stream"); let msg = stream .read_to_end(usize::max_value()) .await .expect("read_to_end"); assert_eq!(msg, MSG); }); driver.unwrap_or_else(|_| ()).await }); info!("initial connection complete"); let ( NewConnection { connection, driver, mut uni_streams, .. }, zero_rtt, ) = endpoint .connect(&endpoint.local_addr().unwrap(), "localhost") .unwrap() .into_0rtt() .ok() .expect("missing 0-RTT keys"); // Send something before the driver starts to ensure it's 0-RTT runtime.spawn(async move { let mut s = connection.open_uni().await.expect("0-RTT open uni"); s.write_all(MSG).await.expect("0-RTT write"); s.finish().await.expect("0-RTT finish"); }); runtime.spawn(driver.unwrap_or_else(|_| ())); runtime.block_on(async move { let stream = uni_streams .next() .await .expect("incoming streams") .expect("missing stream"); let msg = stream .read_to_end(usize::max_value()) .await .expect("read_to_end"); assert_eq!(msg, MSG); assert_eq!(zero_rtt.await, true); }); // The endpoint driver won't finish if we could still create new connections drop(endpoint); runtime.run().unwrap(); } #[test] fn echo_v6() { run_echo( SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0), SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 0), ); } #[test] fn echo_v4() { run_echo( SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), ); } #[test] #[cfg(any(target_os = "linux", target_os = "macos"))] // Dual-stack sockets aren't the default anywhere else. fn echo_dualstack() { run_echo( SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0), SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), ); } fn run_echo(client_addr: SocketAddr, server_addr: SocketAddr) { let _guard = subscribe(); let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); { // We don't use the `endpoint` helper here because we want two different endpoints with // different addresses. let mut server_config = ServerConfigBuilder::default(); let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); let key = crate::PrivateKey::from_der(&cert.serialize_private_key_der()).unwrap(); let cert = crate::Certificate::from_der(&cert.serialize_der().unwrap()).unwrap(); let cert_chain = crate::CertificateChain::from_certs(vec![cert.clone()]); server_config.certificate(cert_chain, key).unwrap(); let mut server = Endpoint::builder(); server.listen(server_config.build()); let server_sock = UdpSocket::bind(server_addr).unwrap(); let server_addr = server_sock.local_addr().unwrap(); let (server_driver, _, mut server_incoming) = server.with_socket(server_sock).unwrap(); let mut client_config = ClientConfigBuilder::default(); client_config.add_certificate_authority(cert).unwrap(); client_config.enable_keylog(); let mut client = Endpoint::builder(); client.default_client_config(client_config.build()); let (client_driver, client, _) = client.bind(&client_addr).unwrap(); runtime.spawn( server_driver .unwrap_or_else(|e| panic!("server driver failed: {}", e)) .instrument(info_span!("server endpoint")), ); runtime.spawn( client_driver .unwrap_or_else(|e| panic!("client driver failed: {}", e)) .instrument(info_span!("client endpoint")), ); runtime.spawn(async move { let incoming = server_incoming.next().await.unwrap(); let new_conn = incoming.instrument(info_span!("server")).await.unwrap(); tokio::spawn( new_conn .bi_streams .take_while(|x| future::ready(x.is_ok())) .for_each(|s| echo(s.unwrap())), ); new_conn .driver .unwrap_or_else(|_| ()) .instrument(info_span!("server")) .await }); info!("connecting from {} to {}", client_addr, server_addr); runtime.block_on(async move { let new_conn = client .connect(&server_addr, "localhost") .unwrap() .instrument(info_span!("client")) .await .expect("connect"); tokio::spawn( new_conn .driver .unwrap_or_else(|e| eprintln!("outgoing connection lost: {}", e)) .instrument(info_span!("client")), ); let (mut send, recv) = new_conn.connection.open_bi().await.expect("stream open"); send.write_all(b"foo").await.expect("write"); send.finish().await.expect("finish"); let data = recv.read_to_end(usize::max_value()).await.expect("read"); assert_eq!(&data[..], b"foo"); new_conn.connection.close(0u32.into(), b"done"); }); } runtime.run().unwrap(); } async fn echo((mut send, recv): (SendStream, RecvStream)) { let data = recv .read_to_end(usize::max_value()) .await .expect("read_to_end"); send.write_all(&data).await.expect("send"); let _ = send.finish().await; } pub fn subscribe() -> tracing::subscriber::DefaultGuard { let sub = tracing_subscriber::FmtSubscriber::builder() .with_env_filter("quinn=trace") .with_writer(|| TestWriter) .finish(); tracing::subscriber::set_default(sub) } struct TestWriter; impl std::io::Write for TestWriter { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { print!( "{}", str::from_utf8(buf).expect("tried to log invalid UTF-8") ); Ok(buf.len()) } fn flush(&mut self) -> io::Result<()> { io::stdout().flush() } }
let (client_driver, client, _) = Endpoint::builder() .bind(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0)) .unwrap();
conftest.py
import logging import os import uuid from distutils import util from pathlib import Path import pytest import test_infra.utils as infra_utils from test_infra import assisted_service_api, consts, utils qe_env = False def is_qe_env(): return os.environ.get('NODE_ENV') == 'QE_VM' def _get_cluster_name(): cluster_name = utils.get_env('CLUSTER_NAME', f'{consts.CLUSTER_PREFIX}') if cluster_name == consts.CLUSTER_PREFIX: cluster_name = cluster_name + '-' + str(uuid.uuid4())[:8] return cluster_name # TODO changes it if is_qe_env(): from test_infra.controllers.node_controllers.qe_vm_controler import \ QeVmController as nodeController qe_env = True else: from test_infra.controllers.node_controllers.terraform_controller import \ TerraformController as nodeController private_ssh_key_path_default = os.path.join(os.getcwd(), "ssh_key/key") if not qe_env else \ os.path.join(str(Path.home()), ".ssh/id_rsa") env_variables = {"ssh_public_key": utils.get_env('SSH_PUB_KEY'), "remote_service_url": utils.get_env('REMOTE_SERVICE_URL'), "pull_secret": utils.get_env('PULL_SECRET'), "offline_token": utils.get_env('OFFLINE_TOKEN'), "openshift_version": utils.get_openshift_version(), "base_domain": utils.get_env('BASE_DOMAIN', "redhat.com"), "num_masters": int(utils.get_env('NUM_MASTERS', consts.NUMBER_OF_MASTERS)), "num_workers": max(2, int(utils.get_env('NUM_WORKERS', 0))), "vip_dhcp_allocation": bool(util.strtobool(utils.get_env('VIP_DHCP_ALLOCATION'))), "worker_memory": int(utils.get_env('WORKER_MEMORY', '8892')), "master_memory": int(utils.get_env('MASTER_MEMORY', '16984')), "network_mtu": utils.get_env('NETWORK_MTU', '1500'), "worker_disk": int(utils.get_env('WORKER_DISK', '21474836480')), "master_disk": int(utils.get_env('MASTER_DISK', '128849018880')), "storage_pool_path": utils.get_env('STORAGE_POOL_PATH', os.path.join(os.getcwd(), "storage_pool")), "cluster_name": _get_cluster_name(), "private_ssh_key_path": utils.get_env('PRIVATE_KEY_PATH', private_ssh_key_path_default), "kubeconfig_path": utils.get_env('KUBECONFIG', ''), "log_folder": utils.get_env('LOG_FOLDER', consts.LOG_FOLDER), "service_cidr": utils.get_env('SERVICE_CIDR', '172.30.0.0/16'), "cluster_cidr": utils.get_env('CLUSTER_CIDR', '10.128.0.0/14'), "host_prefix": int(utils.get_env('HOST_PREFIX', '23')), "iso_image_type": utils.get_env('ISO_IMAGE_TYPE', consts.ImageType.FULL_ISO), "worker_vcpu": utils.get_env('WORKER_CPU', consts.WORKER_CPU), "master_vcpu": utils.get_env('MASTER_CPU', consts.MASTER_CPU), "test_teardown": bool(util.strtobool(utils.get_env('TEST_TEARDOWN', 'true'))), "namespace": utils.get_env('NAMESPACE', consts.DEFAULT_NAMESPACE), "olm_operators": utils.get_env('OLM_OPERATORS', []), } cluster_mid_name = infra_utils.get_random_name() # Tests running on terraform parallel must have unique ISO file if not qe_env: image = utils.get_env('ISO', os.path.join(consts.IMAGE_FOLDER, f'{env_variables["cluster_name"]}-{cluster_mid_name}-' f'installer-image.iso')).strip() env_variables["kubeconfig_path"] = f'/tmp/test_kubeconfig_{cluster_mid_name}' else: image = utils.get_env('ISO', os.path.join(consts.IMAGE_FOLDER, f'{env_variables["cluster_name"]}-installer-image.iso')). \ strip() env_variables["iso_download_path"] = image env_variables["num_nodes"] = env_variables["num_workers"] + env_variables["num_masters"] @pytest.fixture(scope="session") def api_client(): logging.info('--- SETUP --- api_client\n') yield get_api_client() def get_api_client(offline_token=env_variables['offline_token'], **kwargs): url = env_variables['remote_service_url'] if not url: url = utils.get_local_assisted_service_url( utils.get_env('PROFILE'), env_variables['namespace'], 'assisted-service', utils.get_env('DEPLOY_TARGET')) return assisted_service_api.create_client(url, offline_token, **kwargs) @pytest.fixture(scope="session") def
(): logging.info('--- SETUP --- node controller\n') yield nodeController logging.info('--- TEARDOWN --- node controller\n') @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): outcome = yield result = outcome.get_result() setattr(item, "result_" + result.when, result)
setup_node_controller
proxy.ts
export enum ProxySource { no = 'no', system = 'system', manual = 'manual' } export enum ProxyProtocol { http = 'http', https = 'https', socks4 = 'socks4', socks4a = 'socks4a', socks5 = 'socks5',
protocol: '' | ProxyProtocol host: string port: string username: string password: string } export type Proxy = { source: ProxySource manualProxyConfig: ManualProxy }
socks5h = 'socks5h' } export type ManualProxy = {
app.js
/******/ (function(modules) { // webpackBootstrap /******/ // The module cache /******/ var installedModules = {}; /******/ /******/ // The require function /******/ function __webpack_require__(moduleId) { /******/ /******/ // Check if module is in cache /******/ if(installedModules[moduleId]) { /******/ return installedModules[moduleId].exports; /******/ } /******/ // Create a new module (and put it into the cache) /******/ var module = installedModules[moduleId] = { /******/ i: moduleId, /******/ l: false, /******/ exports: {} /******/ }; /******/ /******/ // Execute the module function /******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); /******/ /******/ // Flag the module as loaded /******/ module.l = true; /******/ /******/ // Return the exports of the module /******/ return module.exports; /******/ } /******/ /******/ /******/ // expose the modules object (__webpack_modules__) /******/ __webpack_require__.m = modules; /******/ /******/ // expose the module cache /******/ __webpack_require__.c = installedModules; /******/ /******/ // define getter function for harmony exports /******/ __webpack_require__.d = function(exports, name, getter) { /******/ if(!__webpack_require__.o(exports, name)) { /******/ Object.defineProperty(exports, name, { /******/ configurable: false, /******/ enumerable: true, /******/ get: getter /******/ }); /******/ } /******/ }; /******/ /******/ // getDefaultExport function for compatibility with non-harmony modules /******/ __webpack_require__.n = function(module) { /******/ var getter = module && module.__esModule ? /******/ function getDefault() { return module['default']; } : /******/ function getModuleExports() { return module; }; /******/ __webpack_require__.d(getter, 'a', getter); /******/ return getter; /******/ }; /******/ /******/ // Object.prototype.hasOwnProperty.call /******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; /******/ /******/ // __webpack_public_path__ /******/ __webpack_require__.p = "/"; /******/ /******/ // Load entry module and return exports /******/ return __webpack_require__(__webpack_require__.s = 2); /******/ }) /************************************************************************/ /******/ ([ /* 0 */ /***/ (function(module, exports, __webpack_require__) { var __WEBPACK_AMD_DEFINE_ARRAY__, __WEBPACK_AMD_DEFINE_RESULT__;/*! * jQuery JavaScript Library v3.3.1 * https://jquery.com/ * * Includes Sizzle.js * https://sizzlejs.com/ * * Copyright JS Foundation and other contributors * Released under the MIT license * https://jquery.org/license * * Date: 2018-01-20T17:24Z */ ( function( global, factory ) { "use strict"; if ( typeof module === "object" && typeof module.exports === "object" ) { // For CommonJS and CommonJS-like environments where a proper `window` // is present, execute the factory and get jQuery. // For environments that do not have a `window` with a `document` // (such as Node.js), expose a factory as module.exports. // This accentuates the need for the creation of a real `window`. // e.g. var jQuery = require("jquery")(window); // See ticket #14549 for more info. module.exports = global.document ? factory( global, true ) : function( w ) { if ( !w.document ) { throw new Error( "jQuery requires a window with a document" ); } return factory( w ); }; } else { factory( global ); } // Pass this if window is not defined yet } )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { // Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 // throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode // arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common // enough that all such attempts are guarded in a try block. "use strict"; var arr = []; var document = window.document; var getProto = Object.getPrototypeOf; var slice = arr.slice; var concat = arr.concat; var push = arr.push; var indexOf = arr.indexOf; var class2type = {}; var toString = class2type.toString; var hasOwn = class2type.hasOwnProperty; var fnToString = hasOwn.toString; var ObjectFunctionString = fnToString.call( Object ); var support = {}; var isFunction = function isFunction( obj ) { // Support: Chrome <=57, Firefox <=52 // In some browsers, typeof returns "function" for HTML <object> elements // (i.e., `typeof document.createElement( "object" ) === "function"`). // We don't want to classify *any* DOM node as a function. return typeof obj === "function" && typeof obj.nodeType !== "number"; }; var isWindow = function isWindow( obj ) { return obj != null && obj === obj.window; }; var preservedScriptAttributes = { type: true, src: true, noModule: true }; function DOMEval( code, doc, node ) { doc = doc || document; var i, script = doc.createElement( "script" ); script.text = code; if ( node ) { for ( i in preservedScriptAttributes ) { if ( node[ i ] ) { script[ i ] = node[ i ]; } } } doc.head.appendChild( script ).parentNode.removeChild( script ); } function toType( obj ) { if ( obj == null ) { return obj + ""; } // Support: Android <=2.3 only (functionish RegExp) return typeof obj === "object" || typeof obj === "function" ? class2type[ toString.call( obj ) ] || "object" : typeof obj; } /* global Symbol */ // Defining this global in .eslintrc.json would create a danger of using the global // unguarded in another place, it seems safer to define global only for this module var version = "3.3.1", // Define a local copy of jQuery jQuery = function( selector, context ) { // The jQuery object is actually just the init constructor 'enhanced' // Need init if jQuery is called (just allow error to be thrown if not included) return new jQuery.fn.init( selector, context ); }, // Support: Android <=4.0 only // Make sure we trim BOM and NBSP rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g; jQuery.fn = jQuery.prototype = { // The current version of jQuery being used jquery: version, constructor: jQuery, // The default length of a jQuery object is 0 length: 0, toArray: function() { return slice.call( this ); }, // Get the Nth element in the matched element set OR // Get the whole matched element set as a clean array get: function( num ) { // Return all the elements in a clean array if ( num == null ) { return slice.call( this ); } // Return just the one element from the set return num < 0 ? this[ num + this.length ] : this[ num ]; }, // Take an array of elements and push it onto the stack // (returning the new matched element set) pushStack: function( elems ) { // Build a new jQuery matched element set var ret = jQuery.merge( this.constructor(), elems ); // Add the old object onto the stack (as a reference) ret.prevObject = this; // Return the newly-formed element set return ret; }, // Execute a callback for every element in the matched set. each: function( callback ) { return jQuery.each( this, callback ); }, map: function( callback ) { return this.pushStack( jQuery.map( this, function( elem, i ) { return callback.call( elem, i, elem ); } ) ); }, slice: function() { return this.pushStack( slice.apply( this, arguments ) ); }, first: function() { return this.eq( 0 ); }, last: function() { return this.eq( -1 ); }, eq: function( i ) { var len = this.length, j = +i + ( i < 0 ? len : 0 ); return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); }, end: function() { return this.prevObject || this.constructor(); }, // For internal use only. // Behaves like an Array's method, not like a jQuery method. push: push, sort: arr.sort, splice: arr.splice }; jQuery.extend = jQuery.fn.extend = function() { var options, name, src, copy, copyIsArray, clone, target = arguments[ 0 ] || {}, i = 1, length = arguments.length, deep = false; // Handle a deep copy situation if ( typeof target === "boolean" ) { deep = target; // Skip the boolean and the target target = arguments[ i ] || {}; i++; } // Handle case when target is a string or something (possible in deep copy) if ( typeof target !== "object" && !isFunction( target ) ) { target = {}; } // Extend jQuery itself if only one argument is passed if ( i === length ) { target = this; i--; } for ( ; i < length; i++ ) { // Only deal with non-null/undefined values if ( ( options = arguments[ i ] ) != null ) { // Extend the base object for ( name in options ) { src = target[ name ]; copy = options[ name ]; // Prevent never-ending loop if ( target === copy ) { continue; } // Recurse if we're merging plain objects or arrays if ( deep && copy && ( jQuery.isPlainObject( copy ) || ( copyIsArray = Array.isArray( copy ) ) ) ) { if ( copyIsArray ) { copyIsArray = false; clone = src && Array.isArray( src ) ? src : []; } else { clone = src && jQuery.isPlainObject( src ) ? src : {}; } // Never move original objects, clone them target[ name ] = jQuery.extend( deep, clone, copy ); // Don't bring in undefined values } else if ( copy !== undefined ) { target[ name ] = copy; } } } } // Return the modified object return target; }; jQuery.extend( { // Unique for each copy of jQuery on the page expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), // Assume jQuery is ready without the ready module isReady: true, error: function( msg ) { throw new Error( msg ); }, noop: function() {}, isPlainObject: function( obj ) { var proto, Ctor; // Detect obvious negatives // Use toString instead of jQuery.type to catch host objects if ( !obj || toString.call( obj ) !== "[object Object]" ) { return false; } proto = getProto( obj ); // Objects with no prototype (e.g., `Object.create( null )`) are plain if ( !proto ) { return true; } // Objects with prototype are plain iff they were constructed by a global Object function Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; }, isEmptyObject: function( obj ) { /* eslint-disable no-unused-vars */ // See https://github.com/eslint/eslint/issues/6125 var name; for ( name in obj ) { return false; } return true; }, // Evaluates a script in a global context globalEval: function( code ) { DOMEval( code ); }, each: function( obj, callback ) { var length, i = 0; if ( isArrayLike( obj ) ) { length = obj.length; for ( ; i < length; i++ ) { if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { break; } } } else { for ( i in obj ) { if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { break; } } } return obj; }, // Support: Android <=4.0 only trim: function( text ) { return text == null ? "" : ( text + "" ).replace( rtrim, "" ); }, // results is for internal usage only makeArray: function( arr, results ) { var ret = results || []; if ( arr != null ) { if ( isArrayLike( Object( arr ) ) ) { jQuery.merge( ret, typeof arr === "string" ? [ arr ] : arr ); } else { push.call( ret, arr ); } } return ret; }, inArray: function( elem, arr, i ) { return arr == null ? -1 : indexOf.call( arr, elem, i ); }, // Support: Android <=4.0 only, PhantomJS 1 only // push.apply(_, arraylike) throws on ancient WebKit merge: function( first, second ) { var len = +second.length, j = 0, i = first.length; for ( ; j < len; j++ ) { first[ i++ ] = second[ j ]; } first.length = i; return first; }, grep: function( elems, callback, invert ) { var callbackInverse, matches = [], i = 0, length = elems.length, callbackExpect = !invert; // Go through the array, only saving the items // that pass the validator function for ( ; i < length; i++ ) { callbackInverse = !callback( elems[ i ], i ); if ( callbackInverse !== callbackExpect ) { matches.push( elems[ i ] ); } } return matches; }, // arg is for internal usage only map: function( elems, callback, arg ) { var length, value, i = 0, ret = []; // Go through the array, translating each of the items to their new values if ( isArrayLike( elems ) ) { length = elems.length; for ( ; i < length; i++ ) { value = callback( elems[ i ], i, arg ); if ( value != null ) { ret.push( value ); } } // Go through every key on the object, } else { for ( i in elems ) { value = callback( elems[ i ], i, arg ); if ( value != null ) { ret.push( value ); } } } // Flatten any nested arrays return concat.apply( [], ret ); }, // A global GUID counter for objects guid: 1, // jQuery.support is not used in Core but other projects attach their // properties to it so it needs to exist. support: support } ); if ( typeof Symbol === "function" ) { jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; } // Populate the class2type map jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), function( i, name ) { class2type[ "[object " + name + "]" ] = name.toLowerCase(); } ); function isArrayLike( obj ) { // Support: real iOS 8.2 only (not reproducible in simulator) // `in` check used to prevent JIT error (gh-2145) // hasOwn isn't used here due to false negatives // regarding Nodelist length in IE var length = !!obj && "length" in obj && obj.length, type = toType( obj ); if ( isFunction( obj ) || isWindow( obj ) ) { return false; } return type === "array" || length === 0 || typeof length === "number" && length > 0 && ( length - 1 ) in obj; } var Sizzle = /*! * Sizzle CSS Selector Engine v2.3.3 * https://sizzlejs.com/ * * Copyright jQuery Foundation and other contributors * Released under the MIT license * http://jquery.org/license * * Date: 2016-08-08 */ (function( window ) { var i, support, Expr, getText, isXML, tokenize, compile, select, outermostContext, sortInput, hasDuplicate, // Local document vars setDocument, document, docElem, documentIsHTML, rbuggyQSA, rbuggyMatches, matches, contains, // Instance-specific data expando = "sizzle" + 1 * new Date(), preferredDoc = window.document, dirruns = 0, done = 0, classCache = createCache(), tokenCache = createCache(), compilerCache = createCache(), sortOrder = function( a, b ) { if ( a === b ) { hasDuplicate = true; } return 0; }, // Instance methods hasOwn = ({}).hasOwnProperty, arr = [], pop = arr.pop, push_native = arr.push, push = arr.push, slice = arr.slice, // Use a stripped-down indexOf as it's faster than native // https://jsperf.com/thor-indexof-vs-for/5 indexOf = function( list, elem ) { var i = 0, len = list.length; for ( ; i < len; i++ ) { if ( list[i] === elem ) { return i; } } return -1; }, booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", // Regular expressions // http://www.w3.org/TR/css3-selectors/#whitespace whitespace = "[\\x20\\t\\r\\n\\f]", // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + // Operator (capture 2) "*([*^$|!~]?=)" + whitespace + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + "*\\]", pseudos = ":(" + identifier + ")(?:\\((" + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: // 1. quoted (capture 3; capture 4 or capture 5) "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + // 2. simple (capture 6) "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + // 3. anything else (capture 2) ".*" + ")\\)|)", // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter rwhitespace = new RegExp( whitespace + "+", "g" ), rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), rpseudo = new RegExp( pseudos ), ridentifier = new RegExp( "^" + identifier + "$" ), matchExpr = { "ID": new RegExp( "^#(" + identifier + ")" ), "CLASS": new RegExp( "^\\.(" + identifier + ")" ), "TAG": new RegExp( "^(" + identifier + "|[*])" ), "ATTR": new RegExp( "^" + attributes ), "PSEUDO": new RegExp( "^" + pseudos ), "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), // For use in libraries implementing .is() // We use this for POS matching in `select` "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) }, rinputs = /^(?:input|select|textarea|button)$/i, rheader = /^h\d$/i, rnative = /^[^{]+\{\s*\[native \w/, // Easily-parseable/retrievable ID or TAG or CLASS selectors rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, rsibling = /[+~]/, // CSS escapes // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), funescape = function( _, escaped, escapedWhitespace ) { var high = "0x" + escaped - 0x10000; // NaN means non-codepoint // Support: Firefox<24 // Workaround erroneous numeric interpretation of +"0x" return high !== high || escapedWhitespace ? escaped : high < 0 ? // BMP codepoint String.fromCharCode( high + 0x10000 ) : // Supplemental Plane codepoint (surrogate pair) String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); }, // CSS string/identifier serialization // https://drafts.csswg.org/cssom/#common-serializing-idioms rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, fcssescape = function( ch, asCodePoint ) { if ( asCodePoint ) { // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER if ( ch === "\0" ) { return "\uFFFD"; } // Control characters and (dependent upon position) numbers get escaped as code points return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; } // Other potentially-special ASCII characters get backslash-escaped return "\\" + ch; }, // Used for iframes // See setDocument() // Removing the function wrapper causes a "Permission Denied" // error in IE unloadHandler = function() { setDocument(); }, disabledAncestor = addCombinator( function( elem ) { return elem.disabled === true && ("form" in elem || "label" in elem); }, { dir: "parentNode", next: "legend" } ); // Optimize for push.apply( _, NodeList ) try { push.apply( (arr = slice.call( preferredDoc.childNodes )), preferredDoc.childNodes ); // Support: Android<4.0 // Detect silently failing push.apply arr[ preferredDoc.childNodes.length ].nodeType; } catch ( e ) { push = { apply: arr.length ? // Leverage slice if possible function( target, els ) { push_native.apply( target, slice.call(els) ); } : // Support: IE<9 // Otherwise append directly function( target, els ) { var j = target.length, i = 0; // Can't trust NodeList.length while ( (target[j++] = els[i++]) ) {} target.length = j - 1; } }; } function Sizzle( selector, context, results, seed ) { var m, i, elem, nid, match, groups, newSelector, newContext = context && context.ownerDocument, // nodeType defaults to 9, since context defaults to document nodeType = context ? context.nodeType : 9; results = results || []; // Return early from calls with invalid selector or context if ( typeof selector !== "string" || !selector || nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { return results; } // Try to shortcut find operations (as opposed to filters) in HTML documents if ( !seed ) { if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { setDocument( context ); } context = context || document; if ( documentIsHTML ) { // If the selector is sufficiently simple, try using a "get*By*" DOM method // (excepting DocumentFragment context, where the methods don't exist) if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { // ID selector if ( (m = match[1]) ) { // Document context if ( nodeType === 9 ) { if ( (elem = context.getElementById( m )) ) { // Support: IE, Opera, Webkit // TODO: identify versions // getElementById can match elements by name instead of ID if ( elem.id === m ) { results.push( elem ); return results; } } else { return results; } // Element context } else { // Support: IE, Opera, Webkit // TODO: identify versions // getElementById can match elements by name instead of ID if ( newContext && (elem = newContext.getElementById( m )) && contains( context, elem ) && elem.id === m ) { results.push( elem ); return results; } } // Type selector } else if ( match[2] ) { push.apply( results, context.getElementsByTagName( selector ) ); return results; // Class selector } else if ( (m = match[3]) && support.getElementsByClassName && context.getElementsByClassName ) { push.apply( results, context.getElementsByClassName( m ) ); return results; } } // Take advantage of querySelectorAll if ( support.qsa && !compilerCache[ selector + " " ] && (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { if ( nodeType !== 1 ) { newContext = context; newSelector = selector; // qSA looks outside Element context, which is not what we want // Thanks to Andrew Dupont for this workaround technique // Support: IE <=8 // Exclude object elements } else if ( context.nodeName.toLowerCase() !== "object" ) { // Capture the context ID, setting it first if necessary if ( (nid = context.getAttribute( "id" )) ) { nid = nid.replace( rcssescape, fcssescape ); } else { context.setAttribute( "id", (nid = expando) ); } // Prefix every selector in the list groups = tokenize( selector ); i = groups.length; while ( i-- ) { groups[i] = "#" + nid + " " + toSelector( groups[i] ); } newSelector = groups.join( "," ); // Expand context for sibling selectors newContext = rsibling.test( selector ) && testContext( context.parentNode ) || context; } if ( newSelector ) { try { push.apply( results, newContext.querySelectorAll( newSelector ) ); return results; } catch ( qsaError ) { } finally { if ( nid === expando ) { context.removeAttribute( "id" ); } } } } } } // All others return select( selector.replace( rtrim, "$1" ), context, results, seed ); } /** * Create key-value caches of limited size * @returns {function(string, object)} Returns the Object data after storing it on itself with * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) * deleting the oldest entry */ function createCache() { var keys = []; function cache( key, value ) { // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) if ( keys.push( key + " " ) > Expr.cacheLength ) { // Only keep the most recent entries delete cache[ keys.shift() ]; } return (cache[ key + " " ] = value); } return cache; } /** * Mark a function for special use by Sizzle * @param {Function} fn The function to mark */ function markFunction( fn ) { fn[ expando ] = true; return fn; } /** * Support testing using an element * @param {Function} fn Passed the created element and returns a boolean result */ function assert( fn ) { var el = document.createElement("fieldset"); try { return !!fn( el ); } catch (e) { return false; } finally { // Remove from its parent by default if ( el.parentNode ) { el.parentNode.removeChild( el ); } // release memory in IE el = null; } } /** * Adds the same handler for all of the specified attrs * @param {String} attrs Pipe-separated list of attributes * @param {Function} handler The method that will be applied */ function addHandle( attrs, handler ) { var arr = attrs.split("|"), i = arr.length; while ( i-- ) { Expr.attrHandle[ arr[i] ] = handler; } } /** * Checks document order of two siblings * @param {Element} a * @param {Element} b * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b */ function siblingCheck( a, b ) { var cur = b && a, diff = cur && a.nodeType === 1 && b.nodeType === 1 && a.sourceIndex - b.sourceIndex; // Use IE sourceIndex if available on both nodes if ( diff ) { return diff; } // Check if b follows a if ( cur ) { while ( (cur = cur.nextSibling) ) { if ( cur === b ) { return -1; } } } return a ? 1 : -1; } /** * Returns a function to use in pseudos for input types * @param {String} type */ function createInputPseudo( type ) { return function( elem ) { var name = elem.nodeName.toLowerCase(); return name === "input" && elem.type === type; }; } /** * Returns a function to use in pseudos for buttons * @param {String} type */ function createButtonPseudo( type ) { return function( elem ) { var name = elem.nodeName.toLowerCase(); return (name === "input" || name === "button") && elem.type === type; }; } /** * Returns a function to use in pseudos for :enabled/:disabled * @param {Boolean} disabled true for :disabled; false for :enabled */ function createDisabledPseudo( disabled ) { // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable return function( elem ) { // Only certain elements can match :enabled or :disabled // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled if ( "form" in elem ) { // Check for inherited disabledness on relevant non-disabled elements: // * listed form-associated elements in a disabled fieldset // https://html.spec.whatwg.org/multipage/forms.html#category-listed // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled // * option elements in a disabled optgroup // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled // All such elements have a "form" property. if ( elem.parentNode && elem.disabled === false ) { // Option elements defer to a parent optgroup if present if ( "label" in elem ) { if ( "label" in elem.parentNode ) { return elem.parentNode.disabled === disabled; } else { return elem.disabled === disabled; } } // Support: IE 6 - 11 // Use the isDisabled shortcut property to check for disabled fieldset ancestors return elem.isDisabled === disabled || // Where there is no isDisabled, check manually /* jshint -W018 */ elem.isDisabled !== !disabled && disabledAncestor( elem ) === disabled; } return elem.disabled === disabled; // Try to winnow out elements that can't be disabled before trusting the disabled property. // Some victims get caught in our net (label, legend, menu, track), but it shouldn't // even exist on them, let alone have a boolean value. } else if ( "label" in elem ) { return elem.disabled === disabled; } // Remaining elements are neither :enabled nor :disabled return false; }; } /** * Returns a function to use in pseudos for positionals * @param {Function} fn */ function createPositionalPseudo( fn ) { return markFunction(function( argument ) { argument = +argument; return markFunction(function( seed, matches ) { var j, matchIndexes = fn( [], seed.length, argument ), i = matchIndexes.length; // Match elements found at the specified indexes while ( i-- ) { if ( seed[ (j = matchIndexes[i]) ] ) { seed[j] = !(matches[j] = seed[j]); } } }); }); } /** * Checks a node for validity as a Sizzle context * @param {Element|Object=} context * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value */ function testContext( context ) { return context && typeof context.getElementsByTagName !== "undefined" && context; } // Expose support vars for convenience support = Sizzle.support = {}; /** * Detects XML nodes * @param {Element|Object} elem An element or a document * @returns {Boolean} True iff elem is a non-HTML XML node */ isXML = Sizzle.isXML = function( elem ) { // documentElement is verified for cases where it doesn't yet exist // (such as loading iframes in IE - #4833) var documentElement = elem && (elem.ownerDocument || elem).documentElement; return documentElement ? documentElement.nodeName !== "HTML" : false; }; /** * Sets document-related variables once based on the current document * @param {Element|Object} [doc] An element or document object to use to set the document * @returns {Object} Returns the current document */ setDocument = Sizzle.setDocument = function( node ) { var hasCompare, subWindow, doc = node ? node.ownerDocument || node : preferredDoc; // Return early if doc is invalid or already selected if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { return document; } // Update global variables document = doc; docElem = document.documentElement; documentIsHTML = !isXML( document ); // Support: IE 9-11, Edge // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) if ( preferredDoc !== document && (subWindow = document.defaultView) && subWindow.top !== subWindow ) { // Support: IE 11, Edge if ( subWindow.addEventListener ) { subWindow.addEventListener( "unload", unloadHandler, false ); // Support: IE 9 - 10 only } else if ( subWindow.attachEvent ) { subWindow.attachEvent( "onunload", unloadHandler ); } } /* Attributes ---------------------------------------------------------------------- */ // Support: IE<8 // Verify that getAttribute really returns attributes and not properties // (excepting IE8 booleans) support.attributes = assert(function( el ) { el.className = "i"; return !el.getAttribute("className"); }); /* getElement(s)By* ---------------------------------------------------------------------- */ // Check if getElementsByTagName("*") returns only elements support.getElementsByTagName = assert(function( el ) { el.appendChild( document.createComment("") ); return !el.getElementsByTagName("*").length; }); // Support: IE<9 support.getElementsByClassName = rnative.test( document.getElementsByClassName ); // Support: IE<10 // Check if getElementById returns elements by name // The broken getElementById methods don't pick up programmatically-set names, // so use a roundabout getElementsByName test support.getById = assert(function( el ) { docElem.appendChild( el ).id = expando; return !document.getElementsByName || !document.getElementsByName( expando ).length; }); // ID filter and find if ( support.getById ) { Expr.filter["ID"] = function( id ) { var attrId = id.replace( runescape, funescape ); return function( elem ) { return elem.getAttribute("id") === attrId; }; }; Expr.find["ID"] = function( id, context ) { if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { var elem = context.getElementById( id ); return elem ? [ elem ] : []; } }; } else { Expr.filter["ID"] = function( id ) { var attrId = id.replace( runescape, funescape ); return function( elem ) { var node = typeof elem.getAttributeNode !== "undefined" && elem.getAttributeNode("id"); return node && node.value === attrId; }; }; // Support: IE 6 - 7 only // getElementById is not reliable as a find shortcut Expr.find["ID"] = function( id, context ) { if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { var node, i, elems, elem = context.getElementById( id ); if ( elem ) { // Verify the id attribute node = elem.getAttributeNode("id"); if ( node && node.value === id ) { return [ elem ]; } // Fall back on getElementsByName elems = context.getElementsByName( id ); i = 0; while ( (elem = elems[i++]) ) { node = elem.getAttributeNode("id"); if ( node && node.value === id ) { return [ elem ]; } } } return []; } }; } // Tag Expr.find["TAG"] = support.getElementsByTagName ? function( tag, context ) { if ( typeof context.getElementsByTagName !== "undefined" ) { return context.getElementsByTagName( tag ); // DocumentFragment nodes don't have gEBTN } else if ( support.qsa ) { return context.querySelectorAll( tag ); } } : function( tag, context ) { var elem, tmp = [], i = 0, // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too results = context.getElementsByTagName( tag ); // Filter out possible comments if ( tag === "*" ) { while ( (elem = results[i++]) ) { if ( elem.nodeType === 1 ) { tmp.push( elem ); } } return tmp; } return results; }; // Class Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { return context.getElementsByClassName( className ); } }; /* QSA/matchesSelector ---------------------------------------------------------------------- */ // QSA and matchesSelector support // matchesSelector(:active) reports false when true (IE9/Opera 11.5) rbuggyMatches = []; // qSa(:focus) reports false when true (Chrome 21) // We allow this because of a bug in IE8/9 that throws an error // whenever `document.activeElement` is accessed on an iframe // So, we allow :focus to pass through QSA all the time to avoid the IE error // See https://bugs.jquery.com/ticket/13378 rbuggyQSA = []; if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { // Build QSA regex // Regex strategy adopted from Diego Perini assert(function( el ) { // Select is set to empty string on purpose // This is to test IE's treatment of not explicitly // setting a boolean content attribute, // since its presence should be enough // https://bugs.jquery.com/ticket/12359 docElem.appendChild( el ).innerHTML = "<a id='" + expando + "'></a>" + "<select id='" + expando + "-\r\\' msallowcapture=''>" + "<option selected=''></option></select>"; // Support: IE8, Opera 11-12.16 // Nothing should be selected when empty strings follow ^= or $= or *= // The test attribute must be unknown in Opera but "safe" for WinRT // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section if ( el.querySelectorAll("[msallowcapture^='']").length ) { rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); } // Support: IE8 // Boolean attributes and "value" are not treated correctly if ( !el.querySelectorAll("[selected]").length ) { rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); } // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { rbuggyQSA.push("~="); } // Webkit/Opera - :checked should return selected option elements // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked // IE8 throws error here and will not see later tests if ( !el.querySelectorAll(":checked").length ) { rbuggyQSA.push(":checked"); } // Support: Safari 8+, iOS 8+ // https://bugs.webkit.org/show_bug.cgi?id=136851 // In-page `selector#id sibling-combinator selector` fails if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { rbuggyQSA.push(".#.+[+~]"); } }); assert(function( el ) { el.innerHTML = "<a href='' disabled='disabled'></a>" + "<select disabled='disabled'><option/></select>"; // Support: Windows 8 Native Apps // The type and name attributes are restricted during .innerHTML assignment var input = document.createElement("input"); input.setAttribute( "type", "hidden" ); el.appendChild( input ).setAttribute( "name", "D" ); // Support: IE8 // Enforce case-sensitivity of name attribute if ( el.querySelectorAll("[name=d]").length ) { rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); } // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) // IE8 throws error here and will not see later tests if ( el.querySelectorAll(":enabled").length !== 2 ) { rbuggyQSA.push( ":enabled", ":disabled" ); } // Support: IE9-11+ // IE's :disabled selector does not pick up the children of disabled fieldsets docElem.appendChild( el ).disabled = true; if ( el.querySelectorAll(":disabled").length !== 2 ) { rbuggyQSA.push( ":enabled", ":disabled" ); } // Opera 10-11 does not throw on post-comma invalid pseudos el.querySelectorAll("*,:x"); rbuggyQSA.push(",.*:"); }); } if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || docElem.webkitMatchesSelector || docElem.mozMatchesSelector || docElem.oMatchesSelector || docElem.msMatchesSelector) )) ) { assert(function( el ) { // Check to see if it's possible to do matchesSelector // on a disconnected node (IE 9) support.disconnectedMatch = matches.call( el, "*" ); // This should fail with an exception // Gecko does not error, returns false instead matches.call( el, "[s!='']:x" ); rbuggyMatches.push( "!=", pseudos ); }); } rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); /* Contains ---------------------------------------------------------------------- */ hasCompare = rnative.test( docElem.compareDocumentPosition ); // Element contains another // Purposefully self-exclusive // As in, an element does not contain itself contains = hasCompare || rnative.test( docElem.contains ) ? function( a, b ) { var adown = a.nodeType === 9 ? a.documentElement : a, bup = b && b.parentNode; return a === bup || !!( bup && bup.nodeType === 1 && ( adown.contains ? adown.contains( bup ) : a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 )); } : function( a, b ) { if ( b ) { while ( (b = b.parentNode) ) { if ( b === a ) { return true; } } } return false; }; /* Sorting ---------------------------------------------------------------------- */ // Document order sorting sortOrder = hasCompare ? function( a, b ) { // Flag for duplicate removal if ( a === b ) { hasDuplicate = true; return 0; } // Sort on method existence if only one input has compareDocumentPosition var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; if ( compare ) { return compare; } // Calculate position if both inputs belong to the same document compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? a.compareDocumentPosition( b ) : // Otherwise we know they are disconnected 1; // Disconnected nodes if ( compare & 1 || (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { // Choose the first element that is related to our preferred document if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { return -1; } if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { return 1; } // Maintain original order return sortInput ? ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : 0; } return compare & 4 ? -1 : 1; } : function( a, b ) { // Exit early if the nodes are identical if ( a === b ) { hasDuplicate = true; return 0; } var cur, i = 0, aup = a.parentNode, bup = b.parentNode, ap = [ a ], bp = [ b ]; // Parentless nodes are either documents or disconnected if ( !aup || !bup ) { return a === document ? -1 : b === document ? 1 : aup ? -1 : bup ? 1 : sortInput ? ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : 0; // If the nodes are siblings, we can do a quick check } else if ( aup === bup ) { return siblingCheck( a, b ); } // Otherwise we need full lists of their ancestors for comparison cur = a; while ( (cur = cur.parentNode) ) { ap.unshift( cur ); } cur = b; while ( (cur = cur.parentNode) ) { bp.unshift( cur ); } // Walk down the tree looking for a discrepancy while ( ap[i] === bp[i] ) { i++; } return i ? // Do a sibling check if the nodes have a common ancestor siblingCheck( ap[i], bp[i] ) : // Otherwise nodes in our document sort first ap[i] === preferredDoc ? -1 : bp[i] === preferredDoc ? 1 : 0; }; return document; }; Sizzle.matches = function( expr, elements ) { return Sizzle( expr, null, null, elements ); }; Sizzle.matchesSelector = function( elem, expr ) { // Set document vars if needed if ( ( elem.ownerDocument || elem ) !== document ) { setDocument( elem ); } // Make sure that attribute selectors are quoted expr = expr.replace( rattributeQuotes, "='$1']" ); if ( support.matchesSelector && documentIsHTML && !compilerCache[ expr + " " ] && ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { try { var ret = matches.call( elem, expr ); // IE 9's matchesSelector returns false on disconnected nodes if ( ret || support.disconnectedMatch || // As well, disconnected nodes are said to be in a document // fragment in IE 9 elem.document && elem.document.nodeType !== 11 ) { return ret; } } catch (e) {} } return Sizzle( expr, document, null, [ elem ] ).length > 0; }; Sizzle.contains = function( context, elem ) { // Set document vars if needed if ( ( context.ownerDocument || context ) !== document ) { setDocument( context ); } return contains( context, elem ); }; Sizzle.attr = function( elem, name ) { // Set document vars if needed if ( ( elem.ownerDocument || elem ) !== document ) { setDocument( elem ); } var fn = Expr.attrHandle[ name.toLowerCase() ], // Don't get fooled by Object.prototype properties (jQuery #13807) val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? fn( elem, name, !documentIsHTML ) : undefined; return val !== undefined ? val : support.attributes || !documentIsHTML ? elem.getAttribute( name ) : (val = elem.getAttributeNode(name)) && val.specified ? val.value : null; }; Sizzle.escape = function( sel ) { return (sel + "").replace( rcssescape, fcssescape ); }; Sizzle.error = function( msg ) { throw new Error( "Syntax error, unrecognized expression: " + msg ); }; /** * Document sorting and removing duplicates * @param {ArrayLike} results */ Sizzle.uniqueSort = function( results ) { var elem, duplicates = [], j = 0, i = 0; // Unless we *know* we can detect duplicates, assume their presence hasDuplicate = !support.detectDuplicates; sortInput = !support.sortStable && results.slice( 0 ); results.sort( sortOrder ); if ( hasDuplicate ) { while ( (elem = results[i++]) ) { if ( elem === results[ i ] ) { j = duplicates.push( i ); } } while ( j-- ) { results.splice( duplicates[ j ], 1 ); } } // Clear input after sorting to release objects // See https://github.com/jquery/sizzle/pull/225 sortInput = null; return results; }; /** * Utility function for retrieving the text value of an array of DOM nodes * @param {Array|Element} elem */ getText = Sizzle.getText = function( elem ) { var node, ret = "", i = 0, nodeType = elem.nodeType; if ( !nodeType ) { // If no nodeType, this is expected to be an array while ( (node = elem[i++]) ) { // Do not traverse comment nodes ret += getText( node ); } } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { // Use textContent for elements // innerText usage removed for consistency of new lines (jQuery #11153) if ( typeof elem.textContent === "string" ) { return elem.textContent; } else { // Traverse its children for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { ret += getText( elem ); } } } else if ( nodeType === 3 || nodeType === 4 ) { return elem.nodeValue; } // Do not include comment or processing instruction nodes return ret; }; Expr = Sizzle.selectors = { // Can be adjusted by the user cacheLength: 50, createPseudo: markFunction, match: matchExpr, attrHandle: {}, find: {}, relative: { ">": { dir: "parentNode", first: true }, " ": { dir: "parentNode" }, "+": { dir: "previousSibling", first: true }, "~": { dir: "previousSibling" } }, preFilter: { "ATTR": function( match ) { match[1] = match[1].replace( runescape, funescape ); // Move the given value to match[3] whether quoted or unquoted match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); if ( match[2] === "~=" ) { match[3] = " " + match[3] + " "; } return match.slice( 0, 4 ); }, "CHILD": function( match ) { /* matches from matchExpr["CHILD"] 1 type (only|nth|...) 2 what (child|of-type) 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) 4 xn-component of xn+y argument ([+-]?\d*n|) 5 sign of xn-component 6 x of xn-component 7 sign of y-component 8 y of y-component */ match[1] = match[1].toLowerCase(); if ( match[1].slice( 0, 3 ) === "nth" ) { // nth-* requires argument if ( !match[3] ) { Sizzle.error( match[0] ); } // numeric x and y parameters for Expr.filter.CHILD // remember that false/true cast respectively to 0/1 match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); // other types prohibit arguments } else if ( match[3] ) { Sizzle.error( match[0] ); } return match; }, "PSEUDO": function( match ) { var excess, unquoted = !match[6] && match[2]; if ( matchExpr["CHILD"].test( match[0] ) ) { return null; } // Accept quoted arguments as-is if ( match[3] ) { match[2] = match[4] || match[5] || ""; // Strip excess characters from unquoted arguments } else if ( unquoted && rpseudo.test( unquoted ) && // Get excess from tokenize (recursively) (excess = tokenize( unquoted, true )) && // advance to the next closing parenthesis (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { // excess is a negative index match[0] = match[0].slice( 0, excess ); match[2] = unquoted.slice( 0, excess ); } // Return only captures needed by the pseudo filter method (type and argument) return match.slice( 0, 3 ); } }, filter: { "TAG": function( nodeNameSelector ) { var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); return nodeNameSelector === "*" ? function() { return true; } : function( elem ) { return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; }; }, "CLASS": function( className ) { var pattern = classCache[ className + " " ]; return pattern || (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && classCache( className, function( elem ) { return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); }); }, "ATTR": function( name, operator, check ) { return function( elem ) { var result = Sizzle.attr( elem, name ); if ( result == null ) { return operator === "!="; } if ( !operator ) { return true; } result += ""; return operator === "=" ? result === check : operator === "!=" ? result !== check : operator === "^=" ? check && result.indexOf( check ) === 0 : operator === "*=" ? check && result.indexOf( check ) > -1 : operator === "$=" ? check && result.slice( -check.length ) === check : operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : false; }; }, "CHILD": function( type, what, argument, first, last ) { var simple = type.slice( 0, 3 ) !== "nth", forward = type.slice( -4 ) !== "last", ofType = what === "of-type"; return first === 1 && last === 0 ? // Shortcut for :nth-*(n) function( elem ) { return !!elem.parentNode; } : function( elem, context, xml ) { var cache, uniqueCache, outerCache, node, nodeIndex, start, dir = simple !== forward ? "nextSibling" : "previousSibling", parent = elem.parentNode, name = ofType && elem.nodeName.toLowerCase(), useCache = !xml && !ofType, diff = false; if ( parent ) { // :(first|last|only)-(child|of-type) if ( simple ) { while ( dir ) { node = elem; while ( (node = node[ dir ]) ) { if ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) { return false; } } // Reverse direction for :only-* (if we haven't yet done so) start = dir = type === "only" && !start && "nextSibling"; } return true; } start = [ forward ? parent.firstChild : parent.lastChild ]; // non-xml :nth-child(...) stores cache data on `parent` if ( forward && useCache ) { // Seek `elem` from a previously-cached index // ...in a gzip-friendly way node = parent; outerCache = node[ expando ] || (node[ expando ] = {}); // Support: IE <9 only // Defend against cloned attroperties (jQuery gh-1709) uniqueCache = outerCache[ node.uniqueID ] || (outerCache[ node.uniqueID ] = {}); cache = uniqueCache[ type ] || []; nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; diff = nodeIndex && cache[ 2 ]; node = nodeIndex && parent.childNodes[ nodeIndex ]; while ( (node = ++nodeIndex && node && node[ dir ] || // Fallback to seeking `elem` from the start (diff = nodeIndex = 0) || start.pop()) ) { // When found, cache indexes on `parent` and break if ( node.nodeType === 1 && ++diff && node === elem ) { uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; break; } } } else { // Use previously-cached element index if available if ( useCache ) { // ...in a gzip-friendly way node = elem; outerCache = node[ expando ] || (node[ expando ] = {}); // Support: IE <9 only // Defend against cloned attroperties (jQuery gh-1709) uniqueCache = outerCache[ node.uniqueID ] || (outerCache[ node.uniqueID ] = {}); cache = uniqueCache[ type ] || []; nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; diff = nodeIndex; } // xml :nth-child(...) // or :nth-last-child(...) or :nth(-last)?-of-type(...) if ( diff === false ) { // Use the same loop as above to seek `elem` from the start while ( (node = ++nodeIndex && node && node[ dir ] || (diff = nodeIndex = 0) || start.pop()) ) { if ( ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) && ++diff ) { // Cache the index of each encountered element if ( useCache ) { outerCache = node[ expando ] || (node[ expando ] = {}); // Support: IE <9 only // Defend against cloned attroperties (jQuery gh-1709) uniqueCache = outerCache[ node.uniqueID ] || (outerCache[ node.uniqueID ] = {}); uniqueCache[ type ] = [ dirruns, diff ]; } if ( node === elem ) { break; } } } } } // Incorporate the offset, then check against cycle size diff -= last; return diff === first || ( diff % first === 0 && diff / first >= 0 ); } }; }, "PSEUDO": function( pseudo, argument ) { // pseudo-class names are case-insensitive // http://www.w3.org/TR/selectors/#pseudo-classes // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters // Remember that setFilters inherits from pseudos var args, fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || Sizzle.error( "unsupported pseudo: " + pseudo ); // The user may use createPseudo to indicate that // arguments are needed to create the filter function // just as Sizzle does if ( fn[ expando ] ) { return fn( argument ); } // But maintain support for old signatures if ( fn.length > 1 ) { args = [ pseudo, pseudo, "", argument ]; return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? markFunction(function( seed, matches ) { var idx, matched = fn( seed, argument ), i = matched.length; while ( i-- ) { idx = indexOf( seed, matched[i] ); seed[ idx ] = !( matches[ idx ] = matched[i] ); } }) : function( elem ) { return fn( elem, 0, args ); }; } return fn; } }, pseudos: { // Potentially complex pseudos "not": markFunction(function( selector ) { // Trim the selector passed to compile // to avoid treating leading and trailing // spaces as combinators var input = [], results = [], matcher = compile( selector.replace( rtrim, "$1" ) ); return matcher[ expando ] ? markFunction(function( seed, matches, context, xml ) { var elem, unmatched = matcher( seed, null, xml, [] ), i = seed.length; // Match elements unmatched by `matcher` while ( i-- ) { if ( (elem = unmatched[i]) ) { seed[i] = !(matches[i] = elem); } } }) : function( elem, context, xml ) { input[0] = elem; matcher( input, null, xml, results ); // Don't keep the element (issue #299) input[0] = null; return !results.pop(); }; }), "has": markFunction(function( selector ) { return function( elem ) { return Sizzle( selector, elem ).length > 0; }; }), "contains": markFunction(function( text ) { text = text.replace( runescape, funescape ); return function( elem ) { return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; }; }), // "Whether an element is represented by a :lang() selector // is based solely on the element's language value // being equal to the identifier C, // or beginning with the identifier C immediately followed by "-". // The matching of C against the element's language value is performed case-insensitively. // The identifier C does not have to be a valid language name." // http://www.w3.org/TR/selectors/#lang-pseudo "lang": markFunction( function( lang ) { // lang value must be a valid identifier if ( !ridentifier.test(lang || "") ) { Sizzle.error( "unsupported lang: " + lang ); } lang = lang.replace( runescape, funescape ).toLowerCase(); return function( elem ) { var elemLang; do { if ( (elemLang = documentIsHTML ? elem.lang : elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { elemLang = elemLang.toLowerCase(); return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; } } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); return false; }; }), // Miscellaneous "target": function( elem ) { var hash = window.location && window.location.hash; return hash && hash.slice( 1 ) === elem.id; }, "root": function( elem ) { return elem === docElem; }, "focus": function( elem ) { return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); }, // Boolean properties "enabled": createDisabledPseudo( false ), "disabled": createDisabledPseudo( true ), "checked": function( elem ) { // In CSS3, :checked should return both checked and selected elements // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked var nodeName = elem.nodeName.toLowerCase(); return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); }, "selected": function( elem ) { // Accessing this property makes selected-by-default // options in Safari work properly if ( elem.parentNode ) { elem.parentNode.selectedIndex; } return elem.selected === true; }, // Contents "empty": function( elem ) { // http://www.w3.org/TR/selectors/#empty-pseudo // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), // but not by others (comment: 8; processing instruction: 7; etc.) // nodeType < 6 works because attributes (2) do not appear as children for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { if ( elem.nodeType < 6 ) { return false; } } return true; }, "parent": function( elem ) { return !Expr.pseudos["empty"]( elem ); }, // Element/input types "header": function( elem ) { return rheader.test( elem.nodeName ); }, "input": function( elem ) { return rinputs.test( elem.nodeName ); }, "button": function( elem ) { var name = elem.nodeName.toLowerCase(); return name === "input" && elem.type === "button" || name === "button"; }, "text": function( elem ) { var attr; return elem.nodeName.toLowerCase() === "input" && elem.type === "text" && // Support: IE<8 // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); }, // Position-in-collection "first": createPositionalPseudo(function() { return [ 0 ]; }), "last": createPositionalPseudo(function( matchIndexes, length ) { return [ length - 1 ]; }), "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { return [ argument < 0 ? argument + length : argument ]; }), "even": createPositionalPseudo(function( matchIndexes, length ) { var i = 0; for ( ; i < length; i += 2 ) { matchIndexes.push( i ); } return matchIndexes; }), "odd": createPositionalPseudo(function( matchIndexes, length ) { var i = 1; for ( ; i < length; i += 2 ) { matchIndexes.push( i ); } return matchIndexes; }), "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { var i = argument < 0 ? argument + length : argument; for ( ; --i >= 0; ) { matchIndexes.push( i ); } return matchIndexes; }), "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { var i = argument < 0 ? argument + length : argument; for ( ; ++i < length; ) { matchIndexes.push( i ); } return matchIndexes; }) } }; Expr.pseudos["nth"] = Expr.pseudos["eq"]; // Add button/input type pseudos for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { Expr.pseudos[ i ] = createInputPseudo( i ); } for ( i in { submit: true, reset: true } ) { Expr.pseudos[ i ] = createButtonPseudo( i ); } // Easy API for creating new setFilters function setFilters() {} setFilters.prototype = Expr.filters = Expr.pseudos; Expr.setFilters = new setFilters(); tokenize = Sizzle.tokenize = function( selector, parseOnly ) { var matched, match, tokens, type, soFar, groups, preFilters, cached = tokenCache[ selector + " " ]; if ( cached ) { return parseOnly ? 0 : cached.slice( 0 ); } soFar = selector; groups = []; preFilters = Expr.preFilter; while ( soFar ) { // Comma and first run if ( !matched || (match = rcomma.exec( soFar )) ) { if ( match ) { // Don't consume trailing commas as valid soFar = soFar.slice( match[0].length ) || soFar; } groups.push( (tokens = []) ); } matched = false; // Combinators if ( (match = rcombinators.exec( soFar )) ) { matched = match.shift(); tokens.push({ value: matched, // Cast descendant combinators to space type: match[0].replace( rtrim, " " ) }); soFar = soFar.slice( matched.length ); } // Filters for ( type in Expr.filter ) { if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || (match = preFilters[ type ]( match ))) ) { matched = match.shift(); tokens.push({ value: matched, type: type, matches: match }); soFar = soFar.slice( matched.length ); } } if ( !matched ) { break; } } // Return the length of the invalid excess // if we're just parsing // Otherwise, throw an error or return tokens return parseOnly ? soFar.length : soFar ? Sizzle.error( selector ) : // Cache the tokens tokenCache( selector, groups ).slice( 0 ); }; function toSelector( tokens ) { var i = 0, len = tokens.length, selector = ""; for ( ; i < len; i++ ) { selector += tokens[i].value; } return selector; } function addCombinator( matcher, combinator, base ) { var dir = combinator.dir, skip = combinator.next, key = skip || dir, checkNonElements = base && key === "parentNode", doneName = done++; return combinator.first ? // Check against closest ancestor/preceding element function( elem, context, xml ) { while ( (elem = elem[ dir ]) ) { if ( elem.nodeType === 1 || checkNonElements ) { return matcher( elem, context, xml ); } } return false; } : // Check against all ancestor/preceding elements function( elem, context, xml ) { var oldCache, uniqueCache, outerCache, newCache = [ dirruns, doneName ]; // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching if ( xml ) { while ( (elem = elem[ dir ]) ) { if ( elem.nodeType === 1 || checkNonElements ) { if ( matcher( elem, context, xml ) ) { return true; } } } } else { while ( (elem = elem[ dir ]) ) { if ( elem.nodeType === 1 || checkNonElements ) { outerCache = elem[ expando ] || (elem[ expando ] = {}); // Support: IE <9 only // Defend against cloned attroperties (jQuery gh-1709) uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); if ( skip && skip === elem.nodeName.toLowerCase() ) { elem = elem[ dir ] || elem; } else if ( (oldCache = uniqueCache[ key ]) && oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { // Assign to newCache so results back-propagate to previous elements return (newCache[ 2 ] = oldCache[ 2 ]); } else { // Reuse newcache so results back-propagate to previous elements uniqueCache[ key ] = newCache; // A match means we're done; a fail means we have to keep checking if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { return true; } } } } } return false; }; } function elementMatcher( matchers ) { return matchers.length > 1 ? function( elem, context, xml ) { var i = matchers.length; while ( i-- ) { if ( !matchers[i]( elem, context, xml ) ) { return false; } } return true; } : matchers[0]; } function multipleContexts( selector, contexts, results ) { var i = 0, len = contexts.length; for ( ; i < len; i++ ) { Sizzle( selector, contexts[i], results ); } return results; } function condense( unmatched, map, filter, context, xml ) { var elem, newUnmatched = [], i = 0, len = unmatched.length, mapped = map != null; for ( ; i < len; i++ ) { if ( (elem = unmatched[i]) ) { if ( !filter || filter( elem, context, xml ) ) { newUnmatched.push( elem ); if ( mapped ) { map.push( i ); } } } } return newUnmatched; } function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { if ( postFilter && !postFilter[ expando ] ) { postFilter = setMatcher( postFilter ); } if ( postFinder && !postFinder[ expando ] ) { postFinder = setMatcher( postFinder, postSelector ); } return markFunction(function( seed, results, context, xml ) { var temp, i, elem, preMap = [], postMap = [], preexisting = results.length, // Get initial elements from seed or context elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), // Prefilter to get matcher input, preserving a map for seed-results synchronization matcherIn = preFilter && ( seed || !selector ) ? condense( elems, preMap, preFilter, context, xml ) : elems, matcherOut = matcher ? // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, postFinder || ( seed ? preFilter : preexisting || postFilter ) ? // ...intermediate processing is necessary [] : // ...otherwise use results directly results : matcherIn; // Find primary matches if ( matcher ) { matcher( matcherIn, matcherOut, context, xml ); } // Apply postFilter if ( postFilter ) { temp = condense( matcherOut, postMap ); postFilter( temp, [], context, xml ); // Un-match failing elements by moving them back to matcherIn i = temp.length; while ( i-- ) { if ( (elem = temp[i]) ) { matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); } } } if ( seed ) { if ( postFinder || preFilter ) { if ( postFinder ) { // Get the final matcherOut by condensing this intermediate into postFinder contexts temp = []; i = matcherOut.length; while ( i-- ) { if ( (elem = matcherOut[i]) ) { // Restore matcherIn since elem is not yet a final match temp.push( (matcherIn[i] = elem) ); } } postFinder( null, (matcherOut = []), temp, xml ); } // Move matched elements from seed to results to keep them synchronized i = matcherOut.length; while ( i-- ) { if ( (elem = matcherOut[i]) && (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { seed[temp] = !(results[temp] = elem); } } } // Add elements to results, through postFinder if defined } else { matcherOut = condense( matcherOut === results ? matcherOut.splice( preexisting, matcherOut.length ) : matcherOut ); if ( postFinder ) { postFinder( null, results, matcherOut, xml ); } else { push.apply( results, matcherOut ); } } }); } function matcherFromTokens( tokens ) { var checkContext, matcher, j, len = tokens.length, leadingRelative = Expr.relative[ tokens[0].type ], implicitRelative = leadingRelative || Expr.relative[" "], i = leadingRelative ? 1 : 0, // The foundational matcher ensures that elements are reachable from top-level context(s) matchContext = addCombinator( function( elem ) { return elem === checkContext; }, implicitRelative, true ), matchAnyContext = addCombinator( function( elem ) { return indexOf( checkContext, elem ) > -1; }, implicitRelative, true ), matchers = [ function( elem, context, xml ) { var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( (checkContext = context).nodeType ? matchContext( elem, context, xml ) : matchAnyContext( elem, context, xml ) ); // Avoid hanging onto element (issue #299) checkContext = null; return ret; } ]; for ( ; i < len; i++ ) { if ( (matcher = Expr.relative[ tokens[i].type ]) ) { matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; } else { matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); // Return special upon seeing a positional matcher if ( matcher[ expando ] ) { // Find the next relative operator (if any) for proper handling j = ++i; for ( ; j < len; j++ ) { if ( Expr.relative[ tokens[j].type ] ) { break; } } return setMatcher( i > 1 && elementMatcher( matchers ), i > 1 && toSelector( // If the preceding token was a descendant combinator, insert an implicit any-element `*` tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) ).replace( rtrim, "$1" ), matcher, i < j && matcherFromTokens( tokens.slice( i, j ) ), j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), j < len && toSelector( tokens ) ); } matchers.push( matcher ); } } return elementMatcher( matchers ); } function matcherFromGroupMatchers( elementMatchers, setMatchers ) { var bySet = setMatchers.length > 0, byElement = elementMatchers.length > 0, superMatcher = function( seed, context, xml, results, outermost ) { var elem, j, matcher, matchedCount = 0, i = "0", unmatched = seed && [], setMatched = [], contextBackup = outermostContext, // We must always have either seed elements or outermost context elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), // Use integer dirruns iff this is the outermost matcher dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), len = elems.length; if ( outermost ) { outermostContext = context === document || context || outermost; } // Add elements passing elementMatchers directly to results // Support: IE<9, Safari // Tolerate NodeList properties (IE: "length"; Safari: <number>) matching elements by id for ( ; i !== len && (elem = elems[i]) != null; i++ ) { if ( byElement && elem ) { j = 0; if ( !context && elem.ownerDocument !== document ) { setDocument( elem ); xml = !documentIsHTML; } while ( (matcher = elementMatchers[j++]) ) { if ( matcher( elem, context || document, xml) ) { results.push( elem ); break; } } if ( outermost ) { dirruns = dirrunsUnique; } } // Track unmatched elements for set filters if ( bySet ) { // They will have gone through all possible matchers if ( (elem = !matcher && elem) ) { matchedCount--; } // Lengthen the array for every element, matched or not if ( seed ) { unmatched.push( elem ); } } } // `i` is now the count of elements visited above, and adding it to `matchedCount` // makes the latter nonnegative. matchedCount += i; // Apply set filters to unmatched elements // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` // equals `i`), unless we didn't visit _any_ elements in the above loop because we have // no element matchers and no seed. // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that // case, which will result in a "00" `matchedCount` that differs from `i` but is also // numerically zero. if ( bySet && i !== matchedCount ) { j = 0; while ( (matcher = setMatchers[j++]) ) { matcher( unmatched, setMatched, context, xml ); } if ( seed ) { // Reintegrate element matches to eliminate the need for sorting if ( matchedCount > 0 ) { while ( i-- ) { if ( !(unmatched[i] || setMatched[i]) ) { setMatched[i] = pop.call( results ); } } } // Discard index placeholder values to get only actual matches setMatched = condense( setMatched ); } // Add matches to results push.apply( results, setMatched ); // Seedless set matches succeeding multiple successful matchers stipulate sorting if ( outermost && !seed && setMatched.length > 0 && ( matchedCount + setMatchers.length ) > 1 ) { Sizzle.uniqueSort( results ); } } // Override manipulation of globals by nested matchers if ( outermost ) { dirruns = dirrunsUnique; outermostContext = contextBackup; } return unmatched; }; return bySet ? markFunction( superMatcher ) : superMatcher; } compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { var i, setMatchers = [], elementMatchers = [], cached = compilerCache[ selector + " " ]; if ( !cached ) { // Generate a function of recursive functions that can be used to check each element if ( !match ) { match = tokenize( selector ); } i = match.length; while ( i-- ) { cached = matcherFromTokens( match[i] ); if ( cached[ expando ] ) { setMatchers.push( cached ); } else { elementMatchers.push( cached ); } } // Cache the compiled function cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); // Save selector and tokenization cached.selector = selector; } return cached; }; /** * A low-level selection function that works with Sizzle's compiled * selector functions * @param {String|Function} selector A selector or a pre-compiled * selector function built with Sizzle.compile * @param {Element} context * @param {Array} [results] * @param {Array} [seed] A set of elements to match against */ select = Sizzle.select = function( selector, context, results, seed ) { var i, tokens, token, type, find, compiled = typeof selector === "function" && selector, match = !seed && tokenize( (selector = compiled.selector || selector) ); results = results || []; // Try to minimize operations if there is only one selector in the list and no seed // (the latter of which guarantees us context) if ( match.length === 1 ) { // Reduce context if the leading compound selector is an ID tokens = match[0] = match[0].slice( 0 ); if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; if ( !context ) { return results; // Precompiled matchers will still verify ancestry, so step up a level } else if ( compiled ) { context = context.parentNode; } selector = selector.slice( tokens.shift().value.length ); } // Fetch a seed set for right-to-left matching i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; while ( i-- ) { token = tokens[i]; // Abort if we hit a combinator if ( Expr.relative[ (type = token.type) ] ) { break; } if ( (find = Expr.find[ type ]) ) { // Search, expanding context for leading sibling combinators if ( (seed = find( token.matches[0].replace( runescape, funescape ), rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context )) ) { // If seed is empty or no tokens remain, we can return early tokens.splice( i, 1 ); selector = seed.length && toSelector( tokens ); if ( !selector ) { push.apply( results, seed ); return results; } break; } } } } // Compile and execute a filtering function if one is not provided // Provide `match` to avoid retokenization if we modified the selector above ( compiled || compile( selector, match ) )( seed, context, !documentIsHTML, results, !context || rsibling.test( selector ) && testContext( context.parentNode ) || context ); return results; }; // One-time assignments // Sort stability support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; // Support: Chrome 14-35+ // Always assume duplicates if they aren't passed to the comparison function support.detectDuplicates = !!hasDuplicate; // Initialize against the default document setDocument(); // Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) // Detached nodes confoundingly follow *each other* support.sortDetached = assert(function( el ) { // Should return 1, but returns 4 (following) return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; }); // Support: IE<8 // Prevent attribute/property "interpolation" // https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx if ( !assert(function( el ) { el.innerHTML = "<a href='#'></a>"; return el.firstChild.getAttribute("href") === "#" ; }) ) { addHandle( "type|href|height|width", function( elem, name, isXML ) { if ( !isXML ) { return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); } }); } // Support: IE<9 // Use defaultValue in place of getAttribute("value") if ( !support.attributes || !assert(function( el ) { el.innerHTML = "<input/>"; el.firstChild.setAttribute( "value", "" ); return el.firstChild.getAttribute( "value" ) === ""; }) ) { addHandle( "value", function( elem, name, isXML ) { if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { return elem.defaultValue; } }); } // Support: IE<9 // Use getAttributeNode to fetch booleans when getAttribute lies if ( !assert(function( el ) { return el.getAttribute("disabled") == null; }) ) { addHandle( booleans, function( elem, name, isXML ) { var val; if ( !isXML ) { return elem[ name ] === true ? name.toLowerCase() : (val = elem.getAttributeNode( name )) && val.specified ? val.value : null; } }); } return Sizzle; })( window ); jQuery.find = Sizzle; jQuery.expr = Sizzle.selectors; // Deprecated jQuery.expr[ ":" ] = jQuery.expr.pseudos; jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; jQuery.text = Sizzle.getText; jQuery.isXMLDoc = Sizzle.isXML; jQuery.contains = Sizzle.contains; jQuery.escapeSelector = Sizzle.escape; var dir = function( elem, dir, until ) { var matched = [], truncate = until !== undefined; while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { if ( elem.nodeType === 1 ) { if ( truncate && jQuery( elem ).is( until ) ) { break; } matched.push( elem ); } } return matched; }; var siblings = function( n, elem ) { var matched = []; for ( ; n; n = n.nextSibling ) { if ( n.nodeType === 1 && n !== elem ) { matched.push( n ); } } return matched; }; var rneedsContext = jQuery.expr.match.needsContext; function nodeName( elem, name ) { return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); }; var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); // Implement the identical functionality for filter and not function winnow( elements, qualifier, not ) { if ( isFunction( qualifier ) ) { return jQuery.grep( elements, function( elem, i ) { return !!qualifier.call( elem, i, elem ) !== not; } ); } // Single element if ( qualifier.nodeType ) { return jQuery.grep( elements, function( elem ) { return ( elem === qualifier ) !== not; } ); } // Arraylike of elements (jQuery, arguments, Array) if ( typeof qualifier !== "string" ) { return jQuery.grep( elements, function( elem ) { return ( indexOf.call( qualifier, elem ) > -1 ) !== not; } ); } // Filtered directly for both simple and complex selectors return jQuery.filter( qualifier, elements, not ); } jQuery.filter = function( expr, elems, not ) { var elem = elems[ 0 ]; if ( not ) { expr = ":not(" + expr + ")"; } if ( elems.length === 1 && elem.nodeType === 1 ) { return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; } return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { return elem.nodeType === 1; } ) ); }; jQuery.fn.extend( { find: function( selector ) { var i, ret, len = this.length, self = this; if ( typeof selector !== "string" ) { return this.pushStack( jQuery( selector ).filter( function() { for ( i = 0; i < len; i++ ) { if ( jQuery.contains( self[ i ], this ) ) { return true; } } } ) ); } ret = this.pushStack( [] ); for ( i = 0; i < len; i++ ) { jQuery.find( selector, self[ i ], ret ); } return len > 1 ? jQuery.uniqueSort( ret ) : ret; }, filter: function( selector ) { return this.pushStack( winnow( this, selector || [], false ) ); }, not: function( selector ) { return this.pushStack( winnow( this, selector || [], true ) ); }, is: function( selector ) { return !!winnow( this, // If this is a positional/relative selector, check membership in the returned set // so $("p:first").is("p:last") won't return true for a doc with two "p". typeof selector === "string" && rneedsContext.test( selector ) ? jQuery( selector ) : selector || [], false ).length; } } ); // Initialize a jQuery object // A central reference to the root jQuery(document) var rootjQuery, // A simple way to check for HTML strings // Prioritize #id over <tag> to avoid XSS via location.hash (#9521) // Strict HTML recognition (#11290: must start with <) // Shortcut simple #id case for speed rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, init = jQuery.fn.init = function( selector, context, root ) { var match, elem; // HANDLE: $(""), $(null), $(undefined), $(false) if ( !selector ) { return this; } // Method init() accepts an alternate rootjQuery // so migrate can support jQuery.sub (gh-2101) root = root || rootjQuery; // Handle HTML strings if ( typeof selector === "string" ) { if ( selector[ 0 ] === "<" && selector[ selector.length - 1 ] === ">" && selector.length >= 3 ) { // Assume that strings that start and end with <> are HTML and skip the regex check match = [ null, selector, null ]; } else { match = rquickExpr.exec( selector ); } // Match html or make sure no context is specified for #id if ( match && ( match[ 1 ] || !context ) ) { // HANDLE: $(html) -> $(array) if ( match[ 1 ] ) { context = context instanceof jQuery ? context[ 0 ] : context; // Option to run scripts is true for back-compat // Intentionally let the error be thrown if parseHTML is not present jQuery.merge( this, jQuery.parseHTML( match[ 1 ], context && context.nodeType ? context.ownerDocument || context : document, true ) ); // HANDLE: $(html, props) if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { for ( match in context ) { // Properties of context are called as methods if possible if ( isFunction( this[ match ] ) ) { this[ match ]( context[ match ] ); // ...and otherwise set as attributes } else { this.attr( match, context[ match ] ); } } } return this; // HANDLE: $(#id) } else { elem = document.getElementById( match[ 2 ] ); if ( elem ) { // Inject the element directly into the jQuery object this[ 0 ] = elem; this.length = 1; } return this; } // HANDLE: $(expr, $(...)) } else if ( !context || context.jquery ) { return ( context || root ).find( selector ); // HANDLE: $(expr, context) // (which is just equivalent to: $(context).find(expr) } else { return this.constructor( context ).find( selector ); } // HANDLE: $(DOMElement) } else if ( selector.nodeType ) { this[ 0 ] = selector; this.length = 1; return this; // HANDLE: $(function) // Shortcut for document ready } else if ( isFunction( selector ) ) { return root.ready !== undefined ? root.ready( selector ) : // Execute immediately if ready is not present selector( jQuery ); } return jQuery.makeArray( selector, this ); }; // Give the init function the jQuery prototype for later instantiation init.prototype = jQuery.fn; // Initialize central reference rootjQuery = jQuery( document ); var rparentsprev = /^(?:parents|prev(?:Until|All))/, // Methods guaranteed to produce a unique set when starting from a unique set guaranteedUnique = { children: true, contents: true, next: true, prev: true }; jQuery.fn.extend( { has: function( target ) { var targets = jQuery( target, this ), l = targets.length; return this.filter( function() { var i = 0; for ( ; i < l; i++ ) { if ( jQuery.contains( this, targets[ i ] ) ) { return true; } } } ); }, closest: function( selectors, context ) { var cur, i = 0, l = this.length, matched = [], targets = typeof selectors !== "string" && jQuery( selectors ); // Positional selectors never match, since there's no _selection_ context if ( !rneedsContext.test( selectors ) ) { for ( ; i < l; i++ ) { for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { // Always skip document fragments if ( cur.nodeType < 11 && ( targets ? targets.index( cur ) > -1 : // Don't pass non-elements to Sizzle cur.nodeType === 1 && jQuery.find.matchesSelector( cur, selectors ) ) ) { matched.push( cur ); break; } } } } return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); }, // Determine the position of an element within the set index: function( elem ) { // No argument, return index in parent if ( !elem ) { return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; } // Index in selector if ( typeof elem === "string" ) { return indexOf.call( jQuery( elem ), this[ 0 ] ); } // Locate the position of the desired element return indexOf.call( this, // If it receives a jQuery object, the first element is used elem.jquery ? elem[ 0 ] : elem ); }, add: function( selector, context ) { return this.pushStack( jQuery.uniqueSort( jQuery.merge( this.get(), jQuery( selector, context ) ) ) ); }, addBack: function( selector ) { return this.add( selector == null ? this.prevObject : this.prevObject.filter( selector ) ); } } ); function sibling( cur, dir ) { while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} return cur; } jQuery.each( { parent: function( elem ) { var parent = elem.parentNode; return parent && parent.nodeType !== 11 ? parent : null; }, parents: function( elem ) { return dir( elem, "parentNode" ); }, parentsUntil: function( elem, i, until ) { return dir( elem, "parentNode", until ); }, next: function( elem ) { return sibling( elem, "nextSibling" ); }, prev: function( elem ) { return sibling( elem, "previousSibling" ); }, nextAll: function( elem ) { return dir( elem, "nextSibling" ); }, prevAll: function( elem ) { return dir( elem, "previousSibling" ); }, nextUntil: function( elem, i, until ) { return dir( elem, "nextSibling", until ); }, prevUntil: function( elem, i, until ) { return dir( elem, "previousSibling", until ); }, siblings: function( elem ) { return siblings( ( elem.parentNode || {} ).firstChild, elem ); }, children: function( elem ) { return siblings( elem.firstChild ); }, contents: function( elem ) { if ( nodeName( elem, "iframe" ) ) { return elem.contentDocument; } // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only // Treat the template element as a regular one in browsers that // don't support it. if ( nodeName( elem, "template" ) ) { elem = elem.content || elem; } return jQuery.merge( [], elem.childNodes ); } }, function( name, fn ) { jQuery.fn[ name ] = function( until, selector ) { var matched = jQuery.map( this, fn, until ); if ( name.slice( -5 ) !== "Until" ) { selector = until; } if ( selector && typeof selector === "string" ) { matched = jQuery.filter( selector, matched ); } if ( this.length > 1 ) { // Remove duplicates if ( !guaranteedUnique[ name ] ) { jQuery.uniqueSort( matched ); } // Reverse order for parents* and prev-derivatives if ( rparentsprev.test( name ) ) { matched.reverse(); } } return this.pushStack( matched ); }; } ); var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); // Convert String-formatted options into Object-formatted ones function createOptions( options ) { var object = {}; jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { object[ flag ] = true; } ); return object; } /* * Create a callback list using the following parameters: * * options: an optional list of space-separated options that will change how * the callback list behaves or a more traditional option object * * By default a callback list will act like an event callback list and can be * "fired" multiple times. * * Possible options: * * once: will ensure the callback list can only be fired once (like a Deferred) * * memory: will keep track of previous values and will call any callback added * after the list has been fired right away with the latest "memorized" * values (like a Deferred) * * unique: will ensure a callback can only be added once (no duplicate in the list) * * stopOnFalse: interrupt callings when a callback returns false * */ jQuery.Callbacks = function( options ) { // Convert options from String-formatted to Object-formatted if needed // (we check in cache first) options = typeof options === "string" ? createOptions( options ) : jQuery.extend( {}, options ); var // Flag to know if list is currently firing firing, // Last fire value for non-forgettable lists memory, // Flag to know if list was already fired fired, // Flag to prevent firing locked, // Actual callback list list = [], // Queue of execution data for repeatable lists queue = [], // Index of currently firing callback (modified by add/remove as needed) firingIndex = -1, // Fire callbacks fire = function() { // Enforce single-firing locked = locked || options.once; // Execute callbacks for all pending executions, // respecting firingIndex overrides and runtime changes fired = firing = true; for ( ; queue.length; firingIndex = -1 ) { memory = queue.shift(); while ( ++firingIndex < list.length ) { // Run callback and check for early termination if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && options.stopOnFalse ) { // Jump to end and forget the data so .add doesn't re-fire firingIndex = list.length; memory = false; } } } // Forget the data if we're done with it if ( !options.memory ) { memory = false; } firing = false; // Clean up if we're done firing for good if ( locked ) { // Keep an empty list if we have data for future add calls if ( memory ) { list = []; // Otherwise, this object is spent } else { list = ""; } } }, // Actual Callbacks object self = { // Add a callback or a collection of callbacks to the list add: function() { if ( list ) { // If we have memory from a past run, we should fire after adding if ( memory && !firing ) { firingIndex = list.length - 1; queue.push( memory ); } ( function add( args ) { jQuery.each( args, function( _, arg ) { if ( isFunction( arg ) ) { if ( !options.unique || !self.has( arg ) ) { list.push( arg ); } } else if ( arg && arg.length && toType( arg ) !== "string" ) { // Inspect recursively add( arg ); } } ); } )( arguments ); if ( memory && !firing ) { fire(); } } return this; }, // Remove a callback from the list remove: function() { jQuery.each( arguments, function( _, arg ) { var index; while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { list.splice( index, 1 ); // Handle firing indexes if ( index <= firingIndex ) { firingIndex--; } } } ); return this; }, // Check if a given callback is in the list. // If no argument is given, return whether or not list has callbacks attached. has: function( fn ) { return fn ? jQuery.inArray( fn, list ) > -1 : list.length > 0; }, // Remove all callbacks from the list empty: function() { if ( list ) { list = []; } return this; }, // Disable .fire and .add // Abort any current/pending executions // Clear all callbacks and values disable: function() { locked = queue = []; list = memory = ""; return this; }, disabled: function() { return !list; }, // Disable .fire // Also disable .add unless we have memory (since it would have no effect) // Abort any pending executions lock: function() { locked = queue = []; if ( !memory && !firing ) { list = memory = ""; } return this; }, locked: function() { return !!locked; }, // Call all callbacks with the given context and arguments fireWith: function( context, args ) { if ( !locked ) { args = args || []; args = [ context, args.slice ? args.slice() : args ]; queue.push( args ); if ( !firing ) { fire(); } } return this; }, // Call all the callbacks with the given arguments fire: function() { self.fireWith( this, arguments ); return this; }, // To know if the callbacks have already been called at least once fired: function() { return !!fired; } }; return self; }; function Identity( v ) { return v; } function Thrower( ex ) { throw ex; } function adoptValue( value, resolve, reject, noValue ) { var method; try { // Check for promise aspect first to privilege synchronous behavior if ( value && isFunction( ( method = value.promise ) ) ) { method.call( value ).done( resolve ).fail( reject ); // Other thenables } else if ( value && isFunction( ( method = value.then ) ) ) { method.call( value, resolve, reject ); // Other non-thenables } else { // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: // * false: [ value ].slice( 0 ) => resolve( value ) // * true: [ value ].slice( 1 ) => resolve() resolve.apply( undefined, [ value ].slice( noValue ) ); } // For Promises/A+, convert exceptions into rejections // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in // Deferred#then to conditionally suppress rejection. } catch ( value ) { // Support: Android 4.0 only // Strict mode functions invoked without .call/.apply get global-object context reject.apply( undefined, [ value ] ); } } jQuery.extend( { Deferred: function( func ) { var tuples = [ // action, add listener, callbacks, // ... .then handlers, argument index, [final state] [ "notify", "progress", jQuery.Callbacks( "memory" ), jQuery.Callbacks( "memory" ), 2 ], [ "resolve", "done", jQuery.Callbacks( "once memory" ), jQuery.Callbacks( "once memory" ), 0, "resolved" ], [ "reject", "fail", jQuery.Callbacks( "once memory" ), jQuery.Callbacks( "once memory" ), 1, "rejected" ] ], state = "pending", promise = { state: function() { return state; }, always: function() { deferred.done( arguments ).fail( arguments ); return this; }, "catch": function( fn ) { return promise.then( null, fn ); }, // Keep pipe for back-compat pipe: function( /* fnDone, fnFail, fnProgress */ ) { var fns = arguments; return jQuery.Deferred( function( newDefer ) { jQuery.each( tuples, function( i, tuple ) { // Map tuples (progress, done, fail) to arguments (done, fail, progress) var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; // deferred.progress(function() { bind to newDefer or newDefer.notify }) // deferred.done(function() { bind to newDefer or newDefer.resolve }) // deferred.fail(function() { bind to newDefer or newDefer.reject }) deferred[ tuple[ 1 ] ]( function() { var returned = fn && fn.apply( this, arguments ); if ( returned && isFunction( returned.promise ) ) { returned.promise() .progress( newDefer.notify ) .done( newDefer.resolve ) .fail( newDefer.reject ); } else { newDefer[ tuple[ 0 ] + "With" ]( this, fn ? [ returned ] : arguments ); } } ); } ); fns = null; } ).promise(); }, then: function( onFulfilled, onRejected, onProgress ) { var maxDepth = 0; function resolve( depth, deferred, handler, special ) { return function() { var that = this, args = arguments, mightThrow = function() { var returned, then; // Support: Promises/A+ section 2.3.3.3.3 // https://promisesaplus.com/#point-59 // Ignore double-resolution attempts if ( depth < maxDepth ) { return; } returned = handler.apply( that, args ); // Support: Promises/A+ section 2.3.1 // https://promisesaplus.com/#point-48 if ( returned === deferred.promise() ) { throw new TypeError( "Thenable self-resolution" ); } // Support: Promises/A+ sections 2.3.3.1, 3.5 // https://promisesaplus.com/#point-54 // https://promisesaplus.com/#point-75 // Retrieve `then` only once then = returned && // Support: Promises/A+ section 2.3.4 // https://promisesaplus.com/#point-64 // Only check objects and functions for thenability ( typeof returned === "object" || typeof returned === "function" ) && returned.then; // Handle a returned thenable if ( isFunction( then ) ) { // Special processors (notify) just wait for resolution if ( special ) { then.call( returned, resolve( maxDepth, deferred, Identity, special ), resolve( maxDepth, deferred, Thrower, special ) ); // Normal processors (resolve) also hook into progress } else { // ...and disregard older resolution values maxDepth++; then.call( returned, resolve( maxDepth, deferred, Identity, special ), resolve( maxDepth, deferred, Thrower, special ), resolve( maxDepth, deferred, Identity, deferred.notifyWith ) ); } // Handle all other returned values } else { // Only substitute handlers pass on context // and multiple values (non-spec behavior) if ( handler !== Identity ) { that = undefined; args = [ returned ]; } // Process the value(s) // Default process is resolve ( special || deferred.resolveWith )( that, args ); } }, // Only normal processors (resolve) catch and reject exceptions process = special ? mightThrow : function() { try { mightThrow(); } catch ( e ) { if ( jQuery.Deferred.exceptionHook ) { jQuery.Deferred.exceptionHook( e, process.stackTrace ); } // Support: Promises/A+ section 2.3.3.3.4.1 // https://promisesaplus.com/#point-61 // Ignore post-resolution exceptions if ( depth + 1 >= maxDepth ) { // Only substitute handlers pass on context // and multiple values (non-spec behavior) if ( handler !== Thrower ) { that = undefined; args = [ e ]; } deferred.rejectWith( that, args ); } } }; // Support: Promises/A+ section 2.3.3.3.1 // https://promisesaplus.com/#point-57 // Re-resolve promises immediately to dodge false rejection from // subsequent errors if ( depth ) { process(); } else { // Call an optional hook to record the stack, in case of exception // since it's otherwise lost when execution goes async if ( jQuery.Deferred.getStackHook ) { process.stackTrace = jQuery.Deferred.getStackHook(); } window.setTimeout( process ); } }; } return jQuery.Deferred( function( newDefer ) { // progress_handlers.add( ... ) tuples[ 0 ][ 3 ].add( resolve( 0, newDefer, isFunction( onProgress ) ? onProgress : Identity, newDefer.notifyWith ) ); // fulfilled_handlers.add( ... ) tuples[ 1 ][ 3 ].add( resolve( 0, newDefer, isFunction( onFulfilled ) ? onFulfilled : Identity ) ); // rejected_handlers.add( ... ) tuples[ 2 ][ 3 ].add( resolve( 0, newDefer, isFunction( onRejected ) ? onRejected : Thrower ) ); } ).promise(); }, // Get a promise for this deferred // If obj is provided, the promise aspect is added to the object promise: function( obj ) { return obj != null ? jQuery.extend( obj, promise ) : promise; } }, deferred = {}; // Add list-specific methods jQuery.each( tuples, function( i, tuple ) { var list = tuple[ 2 ], stateString = tuple[ 5 ]; // promise.progress = list.add // promise.done = list.add // promise.fail = list.add promise[ tuple[ 1 ] ] = list.add; // Handle state if ( stateString ) { list.add( function() { // state = "resolved" (i.e., fulfilled) // state = "rejected" state = stateString; }, // rejected_callbacks.disable // fulfilled_callbacks.disable tuples[ 3 - i ][ 2 ].disable, // rejected_handlers.disable // fulfilled_handlers.disable tuples[ 3 - i ][ 3 ].disable, // progress_callbacks.lock tuples[ 0 ][ 2 ].lock, // progress_handlers.lock tuples[ 0 ][ 3 ].lock ); } // progress_handlers.fire // fulfilled_handlers.fire // rejected_handlers.fire list.add( tuple[ 3 ].fire ); // deferred.notify = function() { deferred.notifyWith(...) } // deferred.resolve = function() { deferred.resolveWith(...) } // deferred.reject = function() { deferred.rejectWith(...) } deferred[ tuple[ 0 ] ] = function() { deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); return this; }; // deferred.notifyWith = list.fireWith // deferred.resolveWith = list.fireWith // deferred.rejectWith = list.fireWith deferred[ tuple[ 0 ] + "With" ] = list.fireWith; } ); // Make the deferred a promise promise.promise( deferred ); // Call given func if any if ( func ) { func.call( deferred, deferred ); } // All done! return deferred; }, // Deferred helper when: function( singleValue ) { var // count of uncompleted subordinates remaining = arguments.length, // count of unprocessed arguments i = remaining, // subordinate fulfillment data resolveContexts = Array( i ), resolveValues = slice.call( arguments ), // the master Deferred master = jQuery.Deferred(), // subordinate callback factory updateFunc = function( i ) { return function( value ) { resolveContexts[ i ] = this; resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; if ( !( --remaining ) ) { master.resolveWith( resolveContexts, resolveValues ); } }; }; // Single- and empty arguments are adopted like Promise.resolve if ( remaining <= 1 ) { adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, !remaining ); // Use .then() to unwrap secondary thenables (cf. gh-3000) if ( master.state() === "pending" || isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { return master.then(); } } // Multiple arguments are aggregated like Promise.all array elements while ( i-- ) { adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); } return master.promise(); } } ); // These usually indicate a programmer mistake during development, // warn about them ASAP rather than swallowing them by default. var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; jQuery.Deferred.exceptionHook = function( error, stack ) { // Support: IE 8 - 9 only // Console exists when dev tools are open, which can happen at any time if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); } }; jQuery.readyException = function( error ) { window.setTimeout( function() { throw error; } ); }; // The deferred used on DOM ready var readyList = jQuery.Deferred(); jQuery.fn.ready = function( fn ) { readyList .then( fn ) // Wrap jQuery.readyException in a function so that the lookup // happens at the time of error handling instead of callback // registration. .catch( function( error ) { jQuery.readyException( error ); } ); return this; }; jQuery.extend( { // Is the DOM ready to be used? Set to true once it occurs. isReady: false, // A counter to track how many items to wait for before // the ready event fires. See #6781 readyWait: 1, // Handle when the DOM is ready ready: function( wait ) { // Abort if there are pending holds or we're already ready if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { return; } // Remember that the DOM is ready jQuery.isReady = true; // If a normal DOM Ready event fired, decrement, and wait if need be if ( wait !== true && --jQuery.readyWait > 0 ) { return; } // If there are functions bound, to execute readyList.resolveWith( document, [ jQuery ] ); } } ); jQuery.ready.then = readyList.then; // The ready event handler and self cleanup method function completed() { document.removeEventListener( "DOMContentLoaded", completed ); window.removeEventListener( "load", completed ); jQuery.ready(); } // Catch cases where $(document).ready() is called // after the browser event has already occurred. // Support: IE <=9 - 10 only // Older IE sometimes signals "interactive" too soon if ( document.readyState === "complete" || ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { // Handle it asynchronously to allow scripts the opportunity to delay ready window.setTimeout( jQuery.ready ); } else { // Use the handy event callback document.addEventListener( "DOMContentLoaded", completed ); // A fallback to window.onload, that will always work window.addEventListener( "load", completed ); } // Multifunctional method to get and set values of a collection // The value/s can optionally be executed if it's a function var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { var i = 0, len = elems.length, bulk = key == null; // Sets many values if ( toType( key ) === "object" ) { chainable = true; for ( i in key ) { access( elems, fn, i, key[ i ], true, emptyGet, raw ); } // Sets one value } else if ( value !== undefined ) { chainable = true; if ( !isFunction( value ) ) { raw = true; } if ( bulk ) { // Bulk operations run against the entire set if ( raw ) { fn.call( elems, value ); fn = null; // ...except when executing function values } else { bulk = fn; fn = function( elem, key, value ) { return bulk.call( jQuery( elem ), value ); }; } } if ( fn ) { for ( ; i < len; i++ ) { fn( elems[ i ], key, raw ? value : value.call( elems[ i ], i, fn( elems[ i ], key ) ) ); } } } if ( chainable ) { return elems; } // Gets if ( bulk ) { return fn.call( elems ); } return len ? fn( elems[ 0 ], key ) : emptyGet; }; // Matches dashed string for camelizing var rmsPrefix = /^-ms-/, rdashAlpha = /-([a-z])/g; // Used by camelCase as callback to replace() function fcamelCase( all, letter ) { return letter.toUpperCase(); } // Convert dashed to camelCase; used by the css and data modules // Support: IE <=9 - 11, Edge 12 - 15 // Microsoft forgot to hump their vendor prefix (#9572) function camelCase( string ) { return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); } var acceptData = function( owner ) { // Accepts only: // - Node // - Node.ELEMENT_NODE // - Node.DOCUMENT_NODE // - Object // - Any return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); }; function Data() { this.expando = jQuery.expando + Data.uid++; } Data.uid = 1; Data.prototype = { cache: function( owner ) { // Check if the owner object already has a cache var value = owner[ this.expando ]; // If not, create one if ( !value ) { value = {}; // We can accept data for non-element nodes in modern browsers, // but we should not, see #8335. // Always return an empty object. if ( acceptData( owner ) ) { // If it is a node unlikely to be stringify-ed or looped over // use plain assignment if ( owner.nodeType ) { owner[ this.expando ] = value; // Otherwise secure it in a non-enumerable property // configurable must be true to allow the property to be // deleted when data is removed } else { Object.defineProperty( owner, this.expando, { value: value, configurable: true } ); } } } return value; }, set: function( owner, data, value ) { var prop, cache = this.cache( owner ); // Handle: [ owner, key, value ] args // Always use camelCase key (gh-2257) if ( typeof data === "string" ) { cache[ camelCase( data ) ] = value; // Handle: [ owner, { properties } ] args } else { // Copy the properties one-by-one to the cache object for ( prop in data ) { cache[ camelCase( prop ) ] = data[ prop ]; } } return cache; }, get: function( owner, key ) { return key === undefined ? this.cache( owner ) : // Always use camelCase key (gh-2257) owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; }, access: function( owner, key, value ) { // In cases where either: // // 1. No key was specified // 2. A string key was specified, but no value provided // // Take the "read" path and allow the get method to determine // which value to return, respectively either: // // 1. The entire cache object // 2. The data stored at the key // if ( key === undefined || ( ( key && typeof key === "string" ) && value === undefined ) ) { return this.get( owner, key ); } // When the key is not a string, or both a key and value // are specified, set or extend (existing objects) with either: // // 1. An object of properties // 2. A key and value // this.set( owner, key, value ); // Since the "set" path can have two possible entry points // return the expected data based on which path was taken[*] return value !== undefined ? value : key; }, remove: function( owner, key ) { var i, cache = owner[ this.expando ]; if ( cache === undefined ) { return; } if ( key !== undefined ) { // Support array or space separated string of keys if ( Array.isArray( key ) ) { // If key is an array of keys... // We always set camelCase keys, so remove that. key = key.map( camelCase ); } else { key = camelCase( key ); // If a key with the spaces exists, use it. // Otherwise, create an array by matching non-whitespace key = key in cache ? [ key ] : ( key.match( rnothtmlwhite ) || [] ); } i = key.length; while ( i-- ) { delete cache[ key[ i ] ]; } } // Remove the expando if there's no more data if ( key === undefined || jQuery.isEmptyObject( cache ) ) { // Support: Chrome <=35 - 45 // Webkit & Blink performance suffers when deleting properties // from DOM nodes, so set to undefined instead // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) if ( owner.nodeType ) { owner[ this.expando ] = undefined; } else { delete owner[ this.expando ]; } } }, hasData: function( owner ) { var cache = owner[ this.expando ]; return cache !== undefined && !jQuery.isEmptyObject( cache ); } }; var dataPriv = new Data(); var dataUser = new Data(); // Implementation Summary // // 1. Enforce API surface and semantic compatibility with 1.9.x branch // 2. Improve the module's maintainability by reducing the storage // paths to a single mechanism. // 3. Use the same single mechanism to support "private" and "user" data. // 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) // 5. Avoid exposing implementation details on user objects (eg. expando properties) // 6. Provide a clear path for implementation upgrade to WeakMap in 2014 var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, rmultiDash = /[A-Z]/g; function getData( data ) { if ( data === "true" ) { return true; } if ( data === "false" ) { return false; } if ( data === "null" ) { return null; } // Only convert to a number if it doesn't change the string if ( data === +data + "" ) { return +data; } if ( rbrace.test( data ) ) { return JSON.parse( data ); } return data; } function dataAttr( elem, key, data ) { var name; // If nothing was found internally, try to fetch any // data from the HTML5 data-* attribute if ( data === undefined && elem.nodeType === 1 ) { name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); data = elem.getAttribute( name ); if ( typeof data === "string" ) { try { data = getData( data ); } catch ( e ) {} // Make sure we set the data so it isn't changed later dataUser.set( elem, key, data ); } else { data = undefined; } } return data; } jQuery.extend( { hasData: function( elem ) { return dataUser.hasData( elem ) || dataPriv.hasData( elem ); }, data: function( elem, name, data ) { return dataUser.access( elem, name, data ); }, removeData: function( elem, name ) { dataUser.remove( elem, name ); }, // TODO: Now that all calls to _data and _removeData have been replaced // with direct calls to dataPriv methods, these can be deprecated. _data: function( elem, name, data ) { return dataPriv.access( elem, name, data ); }, _removeData: function( elem, name ) { dataPriv.remove( elem, name ); } } ); jQuery.fn.extend( { data: function( key, value ) { var i, name, data, elem = this[ 0 ], attrs = elem && elem.attributes; // Gets all values if ( key === undefined ) { if ( this.length ) { data = dataUser.get( elem ); if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { i = attrs.length; while ( i-- ) { // Support: IE 11 only // The attrs elements can be null (#14894) if ( attrs[ i ] ) { name = attrs[ i ].name; if ( name.indexOf( "data-" ) === 0 ) { name = camelCase( name.slice( 5 ) ); dataAttr( elem, name, data[ name ] ); } } } dataPriv.set( elem, "hasDataAttrs", true ); } } return data; } // Sets multiple values if ( typeof key === "object" ) { return this.each( function() { dataUser.set( this, key ); } ); } return access( this, function( value ) { var data; // The calling jQuery object (element matches) is not empty // (and therefore has an element appears at this[ 0 ]) and the // `value` parameter was not undefined. An empty jQuery object // will result in `undefined` for elem = this[ 0 ] which will // throw an exception if an attempt to read a data cache is made. if ( elem && value === undefined ) { // Attempt to get data from the cache // The key will always be camelCased in Data data = dataUser.get( elem, key ); if ( data !== undefined ) { return data; } // Attempt to "discover" the data in // HTML5 custom data-* attrs data = dataAttr( elem, key ); if ( data !== undefined ) { return data; } // We tried really hard, but the data doesn't exist. return; } // Set the data... this.each( function() { // We always store the camelCased key dataUser.set( this, key, value ); } ); }, null, value, arguments.length > 1, null, true ); }, removeData: function( key ) { return this.each( function() { dataUser.remove( this, key ); } ); } } ); jQuery.extend( { queue: function( elem, type, data ) { var queue; if ( elem ) { type = ( type || "fx" ) + "queue"; queue = dataPriv.get( elem, type ); // Speed up dequeue by getting out quickly if this is just a lookup if ( data ) { if ( !queue || Array.isArray( data ) ) { queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); } else { queue.push( data ); } } return queue || []; } }, dequeue: function( elem, type ) { type = type || "fx"; var queue = jQuery.queue( elem, type ), startLength = queue.length, fn = queue.shift(), hooks = jQuery._queueHooks( elem, type ), next = function() { jQuery.dequeue( elem, type ); }; // If the fx queue is dequeued, always remove the progress sentinel if ( fn === "inprogress" ) { fn = queue.shift(); startLength--; } if ( fn ) { // Add a progress sentinel to prevent the fx queue from being // automatically dequeued if ( type === "fx" ) { queue.unshift( "inprogress" ); } // Clear up the last queue stop function delete hooks.stop; fn.call( elem, next, hooks ); } if ( !startLength && hooks ) { hooks.empty.fire(); } }, // Not public - generate a queueHooks object, or return the current one _queueHooks: function( elem, type ) { var key = type + "queueHooks"; return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { empty: jQuery.Callbacks( "once memory" ).add( function() { dataPriv.remove( elem, [ type + "queue", key ] ); } ) } ); } } ); jQuery.fn.extend( { queue: function( type, data ) { var setter = 2; if ( typeof type !== "string" ) { data = type; type = "fx"; setter--; } if ( arguments.length < setter ) { return jQuery.queue( this[ 0 ], type ); } return data === undefined ? this : this.each( function() { var queue = jQuery.queue( this, type, data ); // Ensure a hooks for this queue jQuery._queueHooks( this, type ); if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { jQuery.dequeue( this, type ); } } ); }, dequeue: function( type ) { return this.each( function() { jQuery.dequeue( this, type ); } ); }, clearQueue: function( type ) { return this.queue( type || "fx", [] ); }, // Get a promise resolved when queues of a certain type // are emptied (fx is the type by default) promise: function( type, obj ) { var tmp, count = 1, defer = jQuery.Deferred(), elements = this, i = this.length, resolve = function() { if ( !( --count ) ) { defer.resolveWith( elements, [ elements ] ); } }; if ( typeof type !== "string" ) { obj = type; type = undefined; } type = type || "fx"; while ( i-- ) { tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); if ( tmp && tmp.empty ) { count++; tmp.empty.add( resolve ); } } resolve(); return defer.promise( obj ); } } ); var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; var isHiddenWithinTree = function( elem, el ) { // isHiddenWithinTree might be called from jQuery#filter function; // in that case, element will be second argument elem = el || elem; // Inline style trumps all return elem.style.display === "none" || elem.style.display === "" && // Otherwise, check computed style // Support: Firefox <=43 - 45 // Disconnected elements can have computed display: none, so first confirm that elem is // in the document. jQuery.contains( elem.ownerDocument, elem ) && jQuery.css( elem, "display" ) === "none"; }; var swap = function( elem, options, callback, args ) { var ret, name, old = {}; // Remember the old values, and insert the new ones for ( name in options ) { old[ name ] = elem.style[ name ]; elem.style[ name ] = options[ name ]; } ret = callback.apply( elem, args || [] ); // Revert the old values for ( name in options ) { elem.style[ name ] = old[ name ]; } return ret; }; function adjustCSS( elem, prop, valueParts, tween ) { var adjusted, scale, maxIterations = 20, currentValue = tween ? function() { return tween.cur(); } : function() { return jQuery.css( elem, prop, "" ); }, initial = currentValue(), unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), // Starting value computation is required for potential unit mismatches initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && rcssNum.exec( jQuery.css( elem, prop ) ); if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { // Support: Firefox <=54 // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) initial = initial / 2; // Trust units reported by jQuery.css unit = unit || initialInUnit[ 3 ]; // Iteratively approximate from a nonzero starting point initialInUnit = +initial || 1; while ( maxIterations-- ) { // Evaluate and update our best guess (doubling guesses that zero out). // Finish if the scale equals or crosses 1 (making the old*new product non-positive). jQuery.style( elem, prop, initialInUnit + unit ); if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { maxIterations = 0; } initialInUnit = initialInUnit / scale; } initialInUnit = initialInUnit * 2; jQuery.style( elem, prop, initialInUnit + unit ); // Make sure we update the tween properties later on valueParts = valueParts || []; } if ( valueParts ) { initialInUnit = +initialInUnit || +initial || 0; // Apply relative offset (+=/-=) if specified adjusted = valueParts[ 1 ] ? initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : +valueParts[ 2 ]; if ( tween ) { tween.unit = unit; tween.start = initialInUnit; tween.end = adjusted; } } return adjusted; } var defaultDisplayMap = {}; function getDefaultDisplay( elem ) { var temp, doc = elem.ownerDocument, nodeName = elem.nodeName, display = defaultDisplayMap[ nodeName ]; if ( display ) { return display; } temp = doc.body.appendChild( doc.createElement( nodeName ) ); display = jQuery.css( temp, "display" ); temp.parentNode.removeChild( temp ); if ( display === "none" ) { display = "block"; } defaultDisplayMap[ nodeName ] = display; return display; } function showHide( elements, show ) { var display, elem, values = [], index = 0, length = elements.length; // Determine new display value for elements that need to change for ( ; index < length; index++ ) { elem = elements[ index ]; if ( !elem.style ) { continue; } display = elem.style.display; if ( show ) { // Since we force visibility upon cascade-hidden elements, an immediate (and slow) // check is required in this first loop unless we have a nonempty display value (either // inline or about-to-be-restored) if ( display === "none" ) { values[ index ] = dataPriv.get( elem, "display" ) || null; if ( !values[ index ] ) { elem.style.display = ""; } } if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { values[ index ] = getDefaultDisplay( elem ); } } else { if ( display !== "none" ) { values[ index ] = "none"; // Remember what we're overwriting dataPriv.set( elem, "display", display ); } } } // Set the display of the elements in a second loop to avoid constant reflow for ( index = 0; index < length; index++ ) { if ( values[ index ] != null ) { elements[ index ].style.display = values[ index ]; } } return elements; } jQuery.fn.extend( { show: function() { return showHide( this, true ); }, hide: function() { return showHide( this ); }, toggle: function( state ) { if ( typeof state === "boolean" ) { return state ? this.show() : this.hide(); } return this.each( function() { if ( isHiddenWithinTree( this ) ) { jQuery( this ).show(); } else { jQuery( this ).hide(); } } ); } } ); var rcheckableType = ( /^(?:checkbox|radio)$/i ); var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); // We have to close these tags to support XHTML (#13200) var wrapMap = { // Support: IE <=9 only option: [ 1, "<select multiple='multiple'>", "</select>" ], // XHTML parsers do not magically insert elements in the // same way that tag soup parsers do. So we cannot shorten // this by omitting <tbody> or other required elements. thead: [ 1, "<table>", "</table>" ], col: [ 2, "<table><colgroup>", "</colgroup></table>" ], tr: [ 2, "<table><tbody>", "</tbody></table>" ], td: [ 3, "<table><tbody><tr>", "</tr></tbody></table>" ], _default: [ 0, "", "" ] }; // Support: IE <=9 only wrapMap.optgroup = wrapMap.option; wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; wrapMap.th = wrapMap.td; function getAll( context, tag ) { // Support: IE <=9 - 11 only // Use typeof to avoid zero-argument method invocation on host objects (#15151) var ret; if ( typeof context.getElementsByTagName !== "undefined" ) { ret = context.getElementsByTagName( tag || "*" ); } else if ( typeof context.querySelectorAll !== "undefined" ) { ret = context.querySelectorAll( tag || "*" ); } else { ret = []; } if ( tag === undefined || tag && nodeName( context, tag ) ) { return jQuery.merge( [ context ], ret ); } return ret; } // Mark scripts as having already been evaluated function setGlobalEval( elems, refElements ) { var i = 0, l = elems.length; for ( ; i < l; i++ ) { dataPriv.set( elems[ i ], "globalEval", !refElements || dataPriv.get( refElements[ i ], "globalEval" ) ); } } var rhtml = /<|&#?\w+;/; function buildFragment( elems, context, scripts, selection, ignored ) { var elem, tmp, tag, wrap, contains, j, fragment = context.createDocumentFragment(), nodes = [], i = 0, l = elems.length; for ( ; i < l; i++ ) { elem = elems[ i ]; if ( elem || elem === 0 ) { // Add nodes directly if ( toType( elem ) === "object" ) { // Support: Android <=4.0 only, PhantomJS 1 only // push.apply(_, arraylike) throws on ancient WebKit jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); // Convert non-html into a text node } else if ( !rhtml.test( elem ) ) { nodes.push( context.createTextNode( elem ) ); // Convert html into DOM nodes } else { tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); // Deserialize a standard representation tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); wrap = wrapMap[ tag ] || wrapMap._default; tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; // Descend through wrappers to the right content j = wrap[ 0 ]; while ( j-- ) { tmp = tmp.lastChild; } // Support: Android <=4.0 only, PhantomJS 1 only // push.apply(_, arraylike) throws on ancient WebKit jQuery.merge( nodes, tmp.childNodes ); // Remember the top-level container tmp = fragment.firstChild; // Ensure the created nodes are orphaned (#12392) tmp.textContent = ""; } } } // Remove wrapper from fragment fragment.textContent = ""; i = 0; while ( ( elem = nodes[ i++ ] ) ) { // Skip elements already in the context collection (trac-4087) if ( selection && jQuery.inArray( elem, selection ) > -1 ) { if ( ignored ) { ignored.push( elem ); } continue; } contains = jQuery.contains( elem.ownerDocument, elem ); // Append to fragment tmp = getAll( fragment.appendChild( elem ), "script" ); // Preserve script evaluation history if ( contains ) { setGlobalEval( tmp ); } // Capture executables if ( scripts ) { j = 0; while ( ( elem = tmp[ j++ ] ) ) { if ( rscriptType.test( elem.type || "" ) ) { scripts.push( elem ); } } } } return fragment; } ( function() { var fragment = document.createDocumentFragment(), div = fragment.appendChild( document.createElement( "div" ) ), input = document.createElement( "input" ); // Support: Android 4.0 - 4.3 only // Check state lost if the name is set (#11217) // Support: Windows Web Apps (WWA) // `name` and `type` must use .setAttribute for WWA (#14901) input.setAttribute( "type", "radio" ); input.setAttribute( "checked", "checked" ); input.setAttribute( "name", "t" ); div.appendChild( input ); // Support: Android <=4.1 only // Older WebKit doesn't clone checked state correctly in fragments support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; // Support: IE <=11 only // Make sure textarea (and checkbox) defaultValue is properly cloned div.innerHTML = "<textarea>x</textarea>"; support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; } )(); var documentElement = document.documentElement; var rkeyEvent = /^key/, rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, rtypenamespace = /^([^.]*)(?:\.(.+)|)/; function returnTrue() { return true; } function returnFalse() { return false; } // Support: IE <=9 only // See #13393 for more info function safeActiveElement() { try { return document.activeElement; } catch ( err ) { } } function on( elem, types, selector, data, fn, one ) { var origFn, type; // Types can be a map of types/handlers if ( typeof types === "object" ) { // ( types-Object, selector, data ) if ( typeof selector !== "string" ) { // ( types-Object, data ) data = data || selector; selector = undefined; } for ( type in types ) { on( elem, type, selector, data, types[ type ], one ); } return elem; } if ( data == null && fn == null ) { // ( types, fn ) fn = selector; data = selector = undefined; } else if ( fn == null ) { if ( typeof selector === "string" ) { // ( types, selector, fn ) fn = data; data = undefined; } else { // ( types, data, fn ) fn = data; data = selector; selector = undefined; } } if ( fn === false ) { fn = returnFalse; } else if ( !fn ) { return elem; } if ( one === 1 ) { origFn = fn; fn = function( event ) { // Can use an empty set, since event contains the info jQuery().off( event ); return origFn.apply( this, arguments ); }; // Use same guid so caller can remove using origFn fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); } return elem.each( function() { jQuery.event.add( this, types, fn, data, selector ); } ); } /* * Helper functions for managing events -- not part of the public interface. * Props to Dean Edwards' addEvent library for many of the ideas. */ jQuery.event = { global: {}, add: function( elem, types, handler, data, selector ) { var handleObjIn, eventHandle, tmp, events, t, handleObj, special, handlers, type, namespaces, origType, elemData = dataPriv.get( elem ); // Don't attach events to noData or text/comment nodes (but allow plain objects) if ( !elemData ) { return; } // Caller can pass in an object of custom data in lieu of the handler if ( handler.handler ) { handleObjIn = handler; handler = handleObjIn.handler; selector = handleObjIn.selector; } // Ensure that invalid selectors throw exceptions at attach time // Evaluate against documentElement in case elem is a non-element node (e.g., document) if ( selector ) { jQuery.find.matchesSelector( documentElement, selector ); } // Make sure that the handler has a unique ID, used to find/remove it later if ( !handler.guid ) { handler.guid = jQuery.guid++; } // Init the element's event structure and main handler, if this is the first if ( !( events = elemData.events ) ) { events = elemData.events = {}; } if ( !( eventHandle = elemData.handle ) ) { eventHandle = elemData.handle = function( e ) { // Discard the second event of a jQuery.event.trigger() and // when an event is called after a page has unloaded return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? jQuery.event.dispatch.apply( elem, arguments ) : undefined; }; } // Handle multiple events separated by a space types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; t = types.length; while ( t-- ) { tmp = rtypenamespace.exec( types[ t ] ) || []; type = origType = tmp[ 1 ]; namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); // There *must* be a type, no attaching namespace-only handlers if ( !type ) { continue; } // If event changes its type, use the special event handlers for the changed type special = jQuery.event.special[ type ] || {}; // If selector defined, determine special event api type, otherwise given type type = ( selector ? special.delegateType : special.bindType ) || type; // Update special based on newly reset type special = jQuery.event.special[ type ] || {}; // handleObj is passed to all event handlers handleObj = jQuery.extend( { type: type, origType: origType, data: data, handler: handler, guid: handler.guid, selector: selector, needsContext: selector && jQuery.expr.match.needsContext.test( selector ), namespace: namespaces.join( "." ) }, handleObjIn ); // Init the event handler queue if we're the first if ( !( handlers = events[ type ] ) ) { handlers = events[ type ] = []; handlers.delegateCount = 0; // Only use addEventListener if the special events handler returns false if ( !special.setup || special.setup.call( elem, data, namespaces, eventHandle ) === false ) { if ( elem.addEventListener ) { elem.addEventListener( type, eventHandle ); } } } if ( special.add ) { special.add.call( elem, handleObj ); if ( !handleObj.handler.guid ) { handleObj.handler.guid = handler.guid; } } // Add to the element's handler list, delegates in front if ( selector ) { handlers.splice( handlers.delegateCount++, 0, handleObj ); } else { handlers.push( handleObj ); } // Keep track of which events have ever been used, for event optimization jQuery.event.global[ type ] = true; } }, // Detach an event or set of events from an element remove: function( elem, types, handler, selector, mappedTypes ) { var j, origCount, tmp, events, t, handleObj, special, handlers, type, namespaces, origType, elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); if ( !elemData || !( events = elemData.events ) ) { return; } // Once for each type.namespace in types; type may be omitted types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; t = types.length; while ( t-- ) { tmp = rtypenamespace.exec( types[ t ] ) || []; type = origType = tmp[ 1 ]; namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); // Unbind all events (on this namespace, if provided) for the element if ( !type ) { for ( type in events ) { jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); } continue; } special = jQuery.event.special[ type ] || {}; type = ( selector ? special.delegateType : special.bindType ) || type; handlers = events[ type ] || []; tmp = tmp[ 2 ] && new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); // Remove matching events origCount = j = handlers.length; while ( j-- ) { handleObj = handlers[ j ]; if ( ( mappedTypes || origType === handleObj.origType ) && ( !handler || handler.guid === handleObj.guid ) && ( !tmp || tmp.test( handleObj.namespace ) ) && ( !selector || selector === handleObj.selector || selector === "**" && handleObj.selector ) ) { handlers.splice( j, 1 ); if ( handleObj.selector ) { handlers.delegateCount--; } if ( special.remove ) { special.remove.call( elem, handleObj ); } } } // Remove generic event handler if we removed something and no more handlers exist // (avoids potential for endless recursion during removal of special event handlers) if ( origCount && !handlers.length ) { if ( !special.teardown || special.teardown.call( elem, namespaces, elemData.handle ) === false ) { jQuery.removeEvent( elem, type, elemData.handle ); } delete events[ type ]; } } // Remove data and the expando if it's no longer used if ( jQuery.isEmptyObject( events ) ) { dataPriv.remove( elem, "handle events" ); } }, dispatch: function( nativeEvent ) { // Make a writable jQuery.Event from the native event object var event = jQuery.event.fix( nativeEvent ); var i, j, ret, matched, handleObj, handlerQueue, args = new Array( arguments.length ), handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], special = jQuery.event.special[ event.type ] || {}; // Use the fix-ed jQuery.Event rather than the (read-only) native event args[ 0 ] = event; for ( i = 1; i < arguments.length; i++ ) { args[ i ] = arguments[ i ]; } event.delegateTarget = this; // Call the preDispatch hook for the mapped type, and let it bail if desired if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { return; } // Determine handlers handlerQueue = jQuery.event.handlers.call( this, event, handlers ); // Run delegates first; they may want to stop propagation beneath us i = 0; while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { event.currentTarget = matched.elem; j = 0; while ( ( handleObj = matched.handlers[ j++ ] ) && !event.isImmediatePropagationStopped() ) { // Triggered event must either 1) have no namespace, or 2) have namespace(s) // a subset or equal to those in the bound event (both can have no namespace). if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { event.handleObj = handleObj; event.data = handleObj.data; ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || handleObj.handler ).apply( matched.elem, args ); if ( ret !== undefined ) { if ( ( event.result = ret ) === false ) { event.preventDefault(); event.stopPropagation(); } } } } } // Call the postDispatch hook for the mapped type if ( special.postDispatch ) { special.postDispatch.call( this, event ); } return event.result; }, handlers: function( event, handlers ) { var i, handleObj, sel, matchedHandlers, matchedSelectors, handlerQueue = [], delegateCount = handlers.delegateCount, cur = event.target; // Find delegate handlers if ( delegateCount && // Support: IE <=9 // Black-hole SVG <use> instance trees (trac-13180) cur.nodeType && // Support: Firefox <=42 // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click // Support: IE 11 only // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) !( event.type === "click" && event.button >= 1 ) ) { for ( ; cur !== this; cur = cur.parentNode || this ) { // Don't check non-elements (#13208) // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { matchedHandlers = []; matchedSelectors = {}; for ( i = 0; i < delegateCount; i++ ) { handleObj = handlers[ i ]; // Don't conflict with Object.prototype properties (#13203) sel = handleObj.selector + " "; if ( matchedSelectors[ sel ] === undefined ) { matchedSelectors[ sel ] = handleObj.needsContext ? jQuery( sel, this ).index( cur ) > -1 : jQuery.find( sel, this, null, [ cur ] ).length; } if ( matchedSelectors[ sel ] ) { matchedHandlers.push( handleObj ); } } if ( matchedHandlers.length ) { handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); } } } } // Add the remaining (directly-bound) handlers cur = this; if ( delegateCount < handlers.length ) { handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); } return handlerQueue; }, addProp: function( name, hook ) { Object.defineProperty( jQuery.Event.prototype, name, { enumerable: true, configurable: true, get: isFunction( hook ) ? function() { if ( this.originalEvent ) { return hook( this.originalEvent ); } } : function() { if ( this.originalEvent ) { return this.originalEvent[ name ]; } }, set: function( value ) { Object.defineProperty( this, name, { enumerable: true, configurable: true, writable: true, value: value } ); } } ); }, fix: function( originalEvent ) { return originalEvent[ jQuery.expando ] ? originalEvent : new jQuery.Event( originalEvent ); }, special: { load: { // Prevent triggered image.load events from bubbling to window.load noBubble: true }, focus: { // Fire native event if possible so blur/focus sequence is correct trigger: function() { if ( this !== safeActiveElement() && this.focus ) { this.focus(); return false; } }, delegateType: "focusin" }, blur: { trigger: function() { if ( this === safeActiveElement() && this.blur ) { this.blur(); return false; } }, delegateType: "focusout" }, click: { // For checkbox, fire native event so checked state will be right trigger: function() { if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) { this.click(); return false; } }, // For cross-browser consistency, don't fire native .click() on links _default: function( event ) { return nodeName( event.target, "a" ); } }, beforeunload: { postDispatch: function( event ) { // Support: Firefox 20+ // Firefox doesn't alert if the returnValue field is not set. if ( event.result !== undefined && event.originalEvent ) { event.originalEvent.returnValue = event.result; } } } } }; jQuery.removeEvent = function( elem, type, handle ) { // This "if" is needed for plain objects if ( elem.removeEventListener ) { elem.removeEventListener( type, handle ); } }; jQuery.Event = function( src, props ) { // Allow instantiation without the 'new' keyword if ( !( this instanceof jQuery.Event ) ) { return new jQuery.Event( src, props ); } // Event object if ( src && src.type ) { this.originalEvent = src; this.type = src.type; // Events bubbling up the document may have been marked as prevented // by a handler lower down the tree; reflect the correct value. this.isDefaultPrevented = src.defaultPrevented || src.defaultPrevented === undefined && // Support: Android <=2.3 only src.returnValue === false ? returnTrue : returnFalse; // Create target properties // Support: Safari <=6 - 7 only // Target should not be a text node (#504, #13143) this.target = ( src.target && src.target.nodeType === 3 ) ? src.target.parentNode : src.target; this.currentTarget = src.currentTarget; this.relatedTarget = src.relatedTarget; // Event type } else { this.type = src; } // Put explicitly provided properties onto the event object if ( props ) { jQuery.extend( this, props ); } // Create a timestamp if incoming event doesn't have one this.timeStamp = src && src.timeStamp || Date.now(); // Mark it as fixed this[ jQuery.expando ] = true; }; // jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding // https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html jQuery.Event.prototype = { constructor: jQuery.Event, isDefaultPrevented: returnFalse, isPropagationStopped: returnFalse, isImmediatePropagationStopped: returnFalse, isSimulated: false, preventDefault: function() { var e = this.originalEvent; this.isDefaultPrevented = returnTrue; if ( e && !this.isSimulated ) { e.preventDefault(); } }, stopPropagation: function() { var e = this.originalEvent; this.isPropagationStopped = returnTrue; if ( e && !this.isSimulated ) { e.stopPropagation(); } }, stopImmediatePropagation: function() { var e = this.originalEvent; this.isImmediatePropagationStopped = returnTrue; if ( e && !this.isSimulated ) { e.stopImmediatePropagation(); } this.stopPropagation(); } }; // Includes all common event props including KeyEvent and MouseEvent specific props jQuery.each( { altKey: true, bubbles: true, cancelable: true, changedTouches: true, ctrlKey: true, detail: true, eventPhase: true, metaKey: true, pageX: true, pageY: true, shiftKey: true, view: true, "char": true, charCode: true, key: true, keyCode: true, button: true, buttons: true, clientX: true, clientY: true, offsetX: true, offsetY: true, pointerId: true, pointerType: true, screenX: true, screenY: true, targetTouches: true, toElement: true, touches: true, which: function( event ) { var button = event.button; // Add which for key events if ( event.which == null && rkeyEvent.test( event.type ) ) { return event.charCode != null ? event.charCode : event.keyCode; } // Add which for click: 1 === left; 2 === middle; 3 === right if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { if ( button & 1 ) { return 1; } if ( button & 2 ) { return 3; } if ( button & 4 ) { return 2; } return 0; } return event.which; } }, jQuery.event.addProp ); // Create mouseenter/leave events using mouseover/out and event-time checks // so that event delegation works in jQuery. // Do the same for pointerenter/pointerleave and pointerover/pointerout // // Support: Safari 7 only // Safari sends mouseenter too often; see: // https://bugs.chromium.org/p/chromium/issues/detail?id=470258 // for the description of the bug (it existed in older Chrome versions as well). jQuery.each( { mouseenter: "mouseover", mouseleave: "mouseout", pointerenter: "pointerover", pointerleave: "pointerout" }, function( orig, fix ) { jQuery.event.special[ orig ] = { delegateType: fix, bindType: fix, handle: function( event ) { var ret, target = this, related = event.relatedTarget, handleObj = event.handleObj; // For mouseenter/leave call the handler if related is outside the target. // NB: No relatedTarget if the mouse left/entered the browser window if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { event.type = handleObj.origType; ret = handleObj.handler.apply( this, arguments ); event.type = fix; } return ret; } }; } ); jQuery.fn.extend( { on: function( types, selector, data, fn ) { return on( this, types, selector, data, fn ); }, one: function( types, selector, data, fn ) { return on( this, types, selector, data, fn, 1 ); }, off: function( types, selector, fn ) { var handleObj, type; if ( types && types.preventDefault && types.handleObj ) { // ( event ) dispatched jQuery.Event handleObj = types.handleObj; jQuery( types.delegateTarget ).off( handleObj.namespace ? handleObj.origType + "." + handleObj.namespace : handleObj.origType, handleObj.selector, handleObj.handler ); return this; } if ( typeof types === "object" ) { // ( types-object [, selector] ) for ( type in types ) { this.off( type, selector, types[ type ] ); } return this; } if ( selector === false || typeof selector === "function" ) { // ( types [, fn] ) fn = selector; selector = undefined; } if ( fn === false ) { fn = returnFalse; } return this.each( function() { jQuery.event.remove( this, types, fn, selector ); } ); } } ); var /* eslint-disable max-len */ // See https://github.com/eslint/eslint/issues/3229 rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, /* eslint-enable */ // Support: IE <=10 - 11, Edge 12 - 13 only // In IE/Edge using regex groups here causes severe slowdowns. // See https://connect.microsoft.com/IE/feedback/details/1736512/ rnoInnerhtml = /<script|<style|<link/i, // checked="checked" or checked rchecked = /checked\s*(?:[^=]|=\s*.checked.)/i, rcleanScript = /^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g; // Prefer a tbody over its parent table for containing new rows function manipulationTarget( elem, content ) { if ( nodeName( elem, "table" ) && nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { return jQuery( elem ).children( "tbody" )[ 0 ] || elem; } return elem; } // Replace/restore the type attribute of script elements for safe DOM manipulation function disableScript( elem ) { elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; return elem; } function restoreScript( elem ) { if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { elem.type = elem.type.slice( 5 ); } else { elem.removeAttribute( "type" ); } return elem; } function cloneCopyEvent( src, dest ) { var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; if ( dest.nodeType !== 1 ) { return; } // 1. Copy private data: events, handlers, etc. if ( dataPriv.hasData( src ) ) { pdataOld = dataPriv.access( src ); pdataCur = dataPriv.set( dest, pdataOld ); events = pdataOld.events; if ( events ) { delete pdataCur.handle; pdataCur.events = {}; for ( type in events ) { for ( i = 0, l = events[ type ].length; i < l; i++ ) { jQuery.event.add( dest, type, events[ type ][ i ] ); } } } } // 2. Copy user data if ( dataUser.hasData( src ) ) { udataOld = dataUser.access( src ); udataCur = jQuery.extend( {}, udataOld ); dataUser.set( dest, udataCur ); } } // Fix IE bugs, see support tests function fixInput( src, dest ) { var nodeName = dest.nodeName.toLowerCase(); // Fails to persist the checked state of a cloned checkbox or radio button. if ( nodeName === "input" && rcheckableType.test( src.type ) ) { dest.checked = src.checked; // Fails to return the selected option to the default selected state when cloning options } else if ( nodeName === "input" || nodeName === "textarea" ) { dest.defaultValue = src.defaultValue; } } function domManip( collection, args, callback, ignored ) { // Flatten any nested arrays args = concat.apply( [], args ); var fragment, first, scripts, hasScripts, node, doc, i = 0, l = collection.length, iNoClone = l - 1, value = args[ 0 ], valueIsFunction = isFunction( value ); // We can't cloneNode fragments that contain checked, in WebKit if ( valueIsFunction || ( l > 1 && typeof value === "string" && !support.checkClone && rchecked.test( value ) ) ) { return collection.each( function( index ) { var self = collection.eq( index ); if ( valueIsFunction ) { args[ 0 ] = value.call( this, index, self.html() ); } domManip( self, args, callback, ignored ); } ); } if ( l ) { fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); first = fragment.firstChild; if ( fragment.childNodes.length === 1 ) { fragment = first; } // Require either new content or an interest in ignored elements to invoke the callback if ( first || ignored ) { scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); hasScripts = scripts.length; // Use the original fragment for the last item // instead of the first because it can end up // being emptied incorrectly in certain situations (#8070). for ( ; i < l; i++ ) { node = fragment; if ( i !== iNoClone ) { node = jQuery.clone( node, true, true ); // Keep references to cloned scripts for later restoration if ( hasScripts ) { // Support: Android <=4.0 only, PhantomJS 1 only // push.apply(_, arraylike) throws on ancient WebKit jQuery.merge( scripts, getAll( node, "script" ) ); } } callback.call( collection[ i ], node, i ); } if ( hasScripts ) { doc = scripts[ scripts.length - 1 ].ownerDocument; // Reenable scripts jQuery.map( scripts, restoreScript ); // Evaluate executable scripts on first document insertion for ( i = 0; i < hasScripts; i++ ) { node = scripts[ i ]; if ( rscriptType.test( node.type || "" ) && !dataPriv.access( node, "globalEval" ) && jQuery.contains( doc, node ) ) { if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { // Optional AJAX dependency, but won't run scripts if not present if ( jQuery._evalUrl ) { jQuery._evalUrl( node.src ); } } else { DOMEval( node.textContent.replace( rcleanScript, "" ), doc, node ); } } } } } } return collection; } function remove( elem, selector, keepData ) { var node, nodes = selector ? jQuery.filter( selector, elem ) : elem, i = 0; for ( ; ( node = nodes[ i ] ) != null; i++ ) { if ( !keepData && node.nodeType === 1 ) { jQuery.cleanData( getAll( node ) ); } if ( node.parentNode ) { if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { setGlobalEval( getAll( node, "script" ) ); } node.parentNode.removeChild( node ); } } return elem; } jQuery.extend( { htmlPrefilter: function( html ) { return html.replace( rxhtmlTag, "<$1></$2>" ); }, clone: function( elem, dataAndEvents, deepDataAndEvents ) { var i, l, srcElements, destElements, clone = elem.cloneNode( true ), inPage = jQuery.contains( elem.ownerDocument, elem ); // Fix IE cloning issues if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && !jQuery.isXMLDoc( elem ) ) { // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 destElements = getAll( clone ); srcElements = getAll( elem ); for ( i = 0, l = srcElements.length; i < l; i++ ) { fixInput( srcElements[ i ], destElements[ i ] ); } } // Copy the events from the original to the clone if ( dataAndEvents ) { if ( deepDataAndEvents ) { srcElements = srcElements || getAll( elem ); destElements = destElements || getAll( clone ); for ( i = 0, l = srcElements.length; i < l; i++ ) { cloneCopyEvent( srcElements[ i ], destElements[ i ] ); } } else { cloneCopyEvent( elem, clone ); } } // Preserve script evaluation history destElements = getAll( clone, "script" ); if ( destElements.length > 0 ) { setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); } // Return the cloned set return clone; }, cleanData: function( elems ) { var data, elem, type, special = jQuery.event.special, i = 0; for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { if ( acceptData( elem ) ) { if ( ( data = elem[ dataPriv.expando ] ) ) { if ( data.events ) { for ( type in data.events ) { if ( special[ type ] ) { jQuery.event.remove( elem, type ); // This is a shortcut to avoid jQuery.event.remove's overhead } else { jQuery.removeEvent( elem, type, data.handle ); } } } // Support: Chrome <=35 - 45+ // Assign undefined instead of using delete, see Data#remove elem[ dataPriv.expando ] = undefined; } if ( elem[ dataUser.expando ] ) { // Support: Chrome <=35 - 45+ // Assign undefined instead of using delete, see Data#remove elem[ dataUser.expando ] = undefined; } } } } } ); jQuery.fn.extend( { detach: function( selector ) { return remove( this, selector, true ); }, remove: function( selector ) { return remove( this, selector ); }, text: function( value ) { return access( this, function( value ) { return value === undefined ? jQuery.text( this ) : this.empty().each( function() { if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { this.textContent = value; } } ); }, null, value, arguments.length ); }, append: function() { return domManip( this, arguments, function( elem ) { if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { var target = manipulationTarget( this, elem ); target.appendChild( elem ); } } ); }, prepend: function() { return domManip( this, arguments, function( elem ) { if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { var target = manipulationTarget( this, elem ); target.insertBefore( elem, target.firstChild ); } } ); }, before: function() { return domManip( this, arguments, function( elem ) { if ( this.parentNode ) { this.parentNode.insertBefore( elem, this ); } } ); }, after: function() { return domManip( this, arguments, function( elem ) { if ( this.parentNode ) { this.parentNode.insertBefore( elem, this.nextSibling ); } } ); }, empty: function() { var elem, i = 0; for ( ; ( elem = this[ i ] ) != null; i++ ) { if ( elem.nodeType === 1 ) { // Prevent memory leaks jQuery.cleanData( getAll( elem, false ) ); // Remove any remaining nodes elem.textContent = ""; } } return this; }, clone: function( dataAndEvents, deepDataAndEvents ) { dataAndEvents = dataAndEvents == null ? false : dataAndEvents; deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; return this.map( function() { return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); } ); }, html: function( value ) { return access( this, function( value ) { var elem = this[ 0 ] || {}, i = 0, l = this.length; if ( value === undefined && elem.nodeType === 1 ) { return elem.innerHTML; } // See if we can take a shortcut and just use innerHTML if ( typeof value === "string" && !rnoInnerhtml.test( value ) && !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { value = jQuery.htmlPrefilter( value ); try { for ( ; i < l; i++ ) { elem = this[ i ] || {}; // Remove element nodes and prevent memory leaks if ( elem.nodeType === 1 ) { jQuery.cleanData( getAll( elem, false ) ); elem.innerHTML = value; } } elem = 0; // If using innerHTML throws an exception, use the fallback method } catch ( e ) {} } if ( elem ) { this.empty().append( value ); } }, null, value, arguments.length ); }, replaceWith: function() { var ignored = []; // Make the changes, replacing each non-ignored context element with the new content return domManip( this, arguments, function( elem ) { var parent = this.parentNode; if ( jQuery.inArray( this, ignored ) < 0 ) { jQuery.cleanData( getAll( this ) ); if ( parent ) { parent.replaceChild( elem, this ); } } // Force callback invocation }, ignored ); } } ); jQuery.each( { appendTo: "append", prependTo: "prepend", insertBefore: "before", insertAfter: "after", replaceAll: "replaceWith" }, function( name, original ) { jQuery.fn[ name ] = function( selector ) { var elems, ret = [], insert = jQuery( selector ), last = insert.length - 1, i = 0; for ( ; i <= last; i++ ) { elems = i === last ? this : this.clone( true ); jQuery( insert[ i ] )[ original ]( elems ); // Support: Android <=4.0 only, PhantomJS 1 only // .get() because push.apply(_, arraylike) throws on ancient WebKit push.apply( ret, elems.get() ); } return this.pushStack( ret ); }; } ); var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); var getStyles = function( elem ) { // Support: IE <=11 only, Firefox <=30 (#15098, #14150) // IE throws on elements created in popups // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" var view = elem.ownerDocument.defaultView; if ( !view || !view.opener ) { view = window; } return view.getComputedStyle( elem ); }; var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); ( function() { // Executing both pixelPosition & boxSizingReliable tests require only one layout // so they're executed at the same time to save the second computation. function computeStyleTests() { // This is a singleton, we need to execute it only once if ( !div ) { return; } container.style.cssText = "position:absolute;left:-11111px;width:60px;" + "margin-top:1px;padding:0;border:0"; div.style.cssText = "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + "margin:auto;border:1px;padding:1px;" + "width:60%;top:1%"; documentElement.appendChild( container ).appendChild( div ); var divStyle = window.getComputedStyle( div ); pixelPositionVal = divStyle.top !== "1%"; // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 // Some styles come back with percentage values, even though they shouldn't div.style.right = "60%"; pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; // Support: IE 9 - 11 only // Detect misreporting of content dimensions for box-sizing:border-box elements boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; // Support: IE 9 only // Detect overflow:scroll screwiness (gh-3699) div.style.position = "absolute"; scrollboxSizeVal = div.offsetWidth === 36 || "absolute"; documentElement.removeChild( container ); // Nullify the div so it wouldn't be stored in the memory and // it will also be a sign that checks already performed div = null; } function roundPixelMeasures( measure ) { return Math.round( parseFloat( measure ) ); } var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, reliableMarginLeftVal, container = document.createElement( "div" ), div = document.createElement( "div" ); // Finish early in limited (non-browser) environments if ( !div.style ) { return; } // Support: IE <=9 - 11 only // Style of cloned element affects source element cloned (#8908) div.style.backgroundClip = "content-box"; div.cloneNode( true ).style.backgroundClip = ""; support.clearCloneStyle = div.style.backgroundClip === "content-box"; jQuery.extend( support, { boxSizingReliable: function() { computeStyleTests(); return boxSizingReliableVal; }, pixelBoxStyles: function() { computeStyleTests(); return pixelBoxStylesVal; }, pixelPosition: function() { computeStyleTests(); return pixelPositionVal; }, reliableMarginLeft: function() { computeStyleTests(); return reliableMarginLeftVal; }, scrollboxSize: function() { computeStyleTests(); return scrollboxSizeVal; } } ); } )(); function curCSS( elem, name, computed ) { var width, minWidth, maxWidth, ret, // Support: Firefox 51+ // Retrieving style before computed somehow // fixes an issue with getting wrong values // on detached elements style = elem.style; computed = computed || getStyles( elem ); // getPropertyValue is needed for: // .css('filter') (IE 9 only, #12537) // .css('--customProperty) (#3144) if ( computed ) { ret = computed.getPropertyValue( name ) || computed[ name ]; if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { ret = jQuery.style( elem, name ); } // A tribute to the "awesome hack by Dean Edwards" // Android Browser returns percentage for some values, // but width seems to be reliably pixels. // This is against the CSSOM draft spec: // https://drafts.csswg.org/cssom/#resolved-values if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { // Remember the original values width = style.width; minWidth = style.minWidth; maxWidth = style.maxWidth; // Put in the new values to get a computed value out style.minWidth = style.maxWidth = style.width = ret; ret = computed.width; // Revert the changed values style.width = width; style.minWidth = minWidth; style.maxWidth = maxWidth; } } return ret !== undefined ? // Support: IE <=9 - 11 only // IE returns zIndex value as an integer. ret + "" : ret; } function addGetHookIf( conditionFn, hookFn ) { // Define the hook, we'll check on the first run if it's really needed. return { get: function() { if ( conditionFn() ) { // Hook not needed (or it's not possible to use it due // to missing dependency), remove it. delete this.get; return; } // Hook needed; redefine it so that the support test is not executed again. return ( this.get = hookFn ).apply( this, arguments ); } }; } var // Swappable if display is none or starts with table // except "table", "table-cell", or "table-caption" // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display rdisplayswap = /^(none|table(?!-c[ea]).+)/, rcustomProp = /^--/, cssShow = { position: "absolute", visibility: "hidden", display: "block" }, cssNormalTransform = { letterSpacing: "0", fontWeight: "400" }, cssPrefixes = [ "Webkit", "Moz", "ms" ], emptyStyle = document.createElement( "div" ).style; // Return a css property mapped to a potentially vendor prefixed property function vendorPropName( name ) { // Shortcut for names that are not vendor prefixed if ( name in emptyStyle ) { return name; } // Check for vendor prefixed names var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), i = cssPrefixes.length; while ( i-- ) { name = cssPrefixes[ i ] + capName; if ( name in emptyStyle ) { return name; } } } // Return a property mapped along what jQuery.cssProps suggests or to // a vendor prefixed property. function finalPropName( name ) { var ret = jQuery.cssProps[ name ]; if ( !ret ) { ret = jQuery.cssProps[ name ] = vendorPropName( name ) || name; } return ret; } function setPositiveNumber( elem, value, subtract ) { // Any relative (+/-) values have already been // normalized at this point var matches = rcssNum.exec( value ); return matches ? // Guard against undefined "subtract", e.g., when used as in cssHooks Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : value; } function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { var i = dimension === "width" ? 1 : 0, extra = 0, delta = 0; // Adjustment may not be necessary if ( box === ( isBorderBox ? "border" : "content" ) ) { return 0; } for ( ; i < 4; i += 2 ) { // Both box models exclude margin if ( box === "margin" ) { delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); } // If we get here with a content-box, we're seeking "padding" or "border" or "margin" if ( !isBorderBox ) { // Add padding delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); // For "border" or "margin", add border if ( box !== "padding" ) { delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); // But still keep track of it otherwise } else { extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); } // If we get here with a border-box (content + padding + border), we're seeking "content" or // "padding" or "margin" } else { // For "content", subtract padding if ( box === "content" ) { delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); } // For "content" or "padding", subtract border if ( box !== "margin" ) { delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); } } } // Account for positive content-box scroll gutter when requested by providing computedVal if ( !isBorderBox && computedVal >= 0 ) { // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border // Assuming integer scroll gutter, subtract the rest and round down delta += Math.max( 0, Math.ceil( elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - computedVal - delta - extra - 0.5 ) ); } return delta; } function getWidthOrHeight( elem, dimension, extra ) { // Start with computed style var styles = getStyles( elem ), val = curCSS( elem, dimension, styles ), isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box", valueIsBorderBox = isBorderBox; // Support: Firefox <=54 // Return a confounding non-pixel value or feign ignorance, as appropriate. if ( rnumnonpx.test( val ) ) { if ( !extra ) { return val; } val = "auto"; } // Check for style in case a browser which returns unreliable values // for getComputedStyle silently falls back to the reliable elem.style valueIsBorderBox = valueIsBorderBox && ( support.boxSizingReliable() || val === elem.style[ dimension ] ); // Fall back to offsetWidth/offsetHeight when value is "auto" // This happens for inline elements with no explicit setting (gh-3571) // Support: Android <=4.1 - 4.3 only // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) if ( val === "auto" || !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) { val = elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ]; // offsetWidth/offsetHeight provide border-box values valueIsBorderBox = true; } // Normalize "" and auto val = parseFloat( val ) || 0; // Adjust for the element's box model return ( val + boxModelAdjustment( elem, dimension, extra || ( isBorderBox ? "border" : "content" ), valueIsBorderBox, styles, // Provide the current computed size to request scroll gutter calculation (gh-3589) val ) ) + "px"; } jQuery.extend( { // Add in style property hooks for overriding the default // behavior of getting and setting a style property cssHooks: { opacity: { get: function( elem, computed ) { if ( computed ) { // We should always get a number back from opacity var ret = curCSS( elem, "opacity" ); return ret === "" ? "1" : ret; } } } }, // Don't automatically add "px" to these possibly-unitless properties cssNumber: { "animationIterationCount": true, "columnCount": true, "fillOpacity": true, "flexGrow": true, "flexShrink": true, "fontWeight": true, "lineHeight": true, "opacity": true, "order": true, "orphans": true, "widows": true, "zIndex": true, "zoom": true }, // Add in properties whose names you wish to fix before // setting or getting the value cssProps: {}, // Get and set the style property on a DOM Node style: function( elem, name, value, extra ) { // Don't set styles on text and comment nodes if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { return; } // Make sure that we're working with the right name var ret, type, hooks, origName = camelCase( name ), isCustomProp = rcustomProp.test( name ), style = elem.style; // Make sure that we're working with the right name. We don't // want to query the value if it is a CSS custom property // since they are user-defined. if ( !isCustomProp ) { name = finalPropName( origName ); } // Gets hook for the prefixed version, then unprefixed version hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; // Check if we're setting a value if ( value !== undefined ) { type = typeof value; // Convert "+=" or "-=" to relative numbers (#7345) if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { value = adjustCSS( elem, name, ret ); // Fixes bug #9237 type = "number"; } // Make sure that null and NaN values aren't set (#7116) if ( value == null || value !== value ) { return; } // If a number was passed in, add the unit (except for certain CSS properties) if ( type === "number" ) { value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); } // background-* props affect original clone's values if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { style[ name ] = "inherit"; } // If a hook was provided, use that value, otherwise just set the specified value if ( !hooks || !( "set" in hooks ) || ( value = hooks.set( elem, value, extra ) ) !== undefined ) { if ( isCustomProp ) { style.setProperty( name, value ); } else { style[ name ] = value; } } } else { // If a hook was provided get the non-computed value from there if ( hooks && "get" in hooks && ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { return ret; } // Otherwise just get the value from the style object return style[ name ]; } }, css: function( elem, name, extra, styles ) { var val, num, hooks, origName = camelCase( name ), isCustomProp = rcustomProp.test( name ); // Make sure that we're working with the right name. We don't // want to modify the value if it is a CSS custom property // since they are user-defined. if ( !isCustomProp ) { name = finalPropName( origName ); } // Try prefixed name followed by the unprefixed name hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; // If a hook was provided get the computed value from there if ( hooks && "get" in hooks ) { val = hooks.get( elem, true, extra ); } // Otherwise, if a way to get the computed value exists, use that if ( val === undefined ) { val = curCSS( elem, name, styles ); } // Convert "normal" to computed value if ( val === "normal" && name in cssNormalTransform ) { val = cssNormalTransform[ name ]; } // Make numeric if forced or a qualifier was provided and val looks numeric if ( extra === "" || extra ) { num = parseFloat( val ); return extra === true || isFinite( num ) ? num || 0 : val; } return val; } } ); jQuery.each( [ "height", "width" ], function( i, dimension ) { jQuery.cssHooks[ dimension ] = { get: function( elem, computed, extra ) { if ( computed ) { // Certain elements can have dimension info if we invisibly show them // but it must have a current display style that would benefit return rdisplayswap.test( jQuery.css( elem, "display" ) ) && // Support: Safari 8+ // Table columns in Safari have non-zero offsetWidth & zero // getBoundingClientRect().width unless display is changed. // Support: IE <=11 only // Running getBoundingClientRect on a disconnected node // in IE throws an error. ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? swap( elem, cssShow, function() { return getWidthOrHeight( elem, dimension, extra ); } ) : getWidthOrHeight( elem, dimension, extra ); } }, set: function( elem, value, extra ) { var matches, styles = getStyles( elem ), isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box", subtract = extra && boxModelAdjustment( elem, dimension, extra, isBorderBox, styles ); // Account for unreliable border-box dimensions by comparing offset* to computed and // faking a content-box to get border and padding (gh-3699) if ( isBorderBox && support.scrollboxSize() === styles.position ) { subtract -= Math.ceil( elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - parseFloat( styles[ dimension ] ) - boxModelAdjustment( elem, dimension, "border", false, styles ) - 0.5 ); } // Convert to pixels if value adjustment is needed if ( subtract && ( matches = rcssNum.exec( value ) ) && ( matches[ 3 ] || "px" ) !== "px" ) { elem.style[ dimension ] = value; value = jQuery.css( elem, dimension ); } return setPositiveNumber( elem, value, subtract ); } }; } ); jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, function( elem, computed ) { if ( computed ) { return ( parseFloat( curCSS( elem, "marginLeft" ) ) || elem.getBoundingClientRect().left - swap( elem, { marginLeft: 0 }, function() { return elem.getBoundingClientRect().left; } ) ) + "px"; } } ); // These hooks are used by animate to expand properties jQuery.each( { margin: "", padding: "", border: "Width" }, function( prefix, suffix ) { jQuery.cssHooks[ prefix + suffix ] = { expand: function( value ) { var i = 0, expanded = {}, // Assumes a single number if not a string parts = typeof value === "string" ? value.split( " " ) : [ value ]; for ( ; i < 4; i++ ) { expanded[ prefix + cssExpand[ i ] + suffix ] = parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; } return expanded; } }; if ( prefix !== "margin" ) { jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; } } ); jQuery.fn.extend( { css: function( name, value ) { return access( this, function( elem, name, value ) { var styles, len, map = {}, i = 0; if ( Array.isArray( name ) ) { styles = getStyles( elem ); len = name.length; for ( ; i < len; i++ ) { map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); } return map; } return value !== undefined ? jQuery.style( elem, name, value ) : jQuery.css( elem, name ); }, name, value, arguments.length > 1 ); } } ); function Tween( elem, options, prop, end, easing ) { return new Tween.prototype.init( elem, options, prop, end, easing ); } jQuery.Tween = Tween; Tween.prototype = { constructor: Tween, init: function( elem, options, prop, end, easing, unit ) { this.elem = elem; this.prop = prop; this.easing = easing || jQuery.easing._default; this.options = options; this.start = this.now = this.cur(); this.end = end; this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); }, cur: function() { var hooks = Tween.propHooks[ this.prop ]; return hooks && hooks.get ? hooks.get( this ) : Tween.propHooks._default.get( this ); }, run: function( percent ) { var eased, hooks = Tween.propHooks[ this.prop ]; if ( this.options.duration ) { this.pos = eased = jQuery.easing[ this.easing ]( percent, this.options.duration * percent, 0, 1, this.options.duration ); } else { this.pos = eased = percent; } this.now = ( this.end - this.start ) * eased + this.start; if ( this.options.step ) { this.options.step.call( this.elem, this.now, this ); } if ( hooks && hooks.set ) { hooks.set( this ); } else { Tween.propHooks._default.set( this ); } return this; } }; Tween.prototype.init.prototype = Tween.prototype; Tween.propHooks = { _default: { get: function( tween ) { var result; // Use a property on the element directly when it is not a DOM element, // or when there is no matching style property that exists. if ( tween.elem.nodeType !== 1 || tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { return tween.elem[ tween.prop ]; } // Passing an empty string as a 3rd parameter to .css will automatically // attempt a parseFloat and fallback to a string if the parse fails. // Simple values such as "10px" are parsed to Float; // complex values such as "rotate(1rad)" are returned as-is. result = jQuery.css( tween.elem, tween.prop, "" ); // Empty strings, null, undefined and "auto" are converted to 0. return !result || result === "auto" ? 0 : result; }, set: function( tween ) { // Use step hook for back compat. // Use cssHook if its there. // Use .style if available and use plain properties where available. if ( jQuery.fx.step[ tween.prop ] ) { jQuery.fx.step[ tween.prop ]( tween ); } else if ( tween.elem.nodeType === 1 && ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || jQuery.cssHooks[ tween.prop ] ) ) { jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); } else { tween.elem[ tween.prop ] = tween.now; } } } }; // Support: IE <=9 only // Panic based approach to setting things on disconnected nodes Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { set: function( tween ) { if ( tween.elem.nodeType && tween.elem.parentNode ) { tween.elem[ tween.prop ] = tween.now; } } }; jQuery.easing = { linear: function( p ) { return p; }, swing: function( p ) { return 0.5 - Math.cos( p * Math.PI ) / 2; }, _default: "swing" }; jQuery.fx = Tween.prototype.init; // Back compat <1.8 extension point jQuery.fx.step = {}; var fxNow, inProgress, rfxtypes = /^(?:toggle|show|hide)$/, rrun = /queueHooks$/; function schedule() { if ( inProgress ) { if ( document.hidden === false && window.requestAnimationFrame ) { window.requestAnimationFrame( schedule ); } else { window.setTimeout( schedule, jQuery.fx.interval ); } jQuery.fx.tick(); } } // Animations created synchronously will run synchronously function createFxNow() { window.setTimeout( function() { fxNow = undefined; } ); return ( fxNow = Date.now() ); } // Generate parameters to create a standard animation function genFx( type, includeWidth ) { var which, i = 0, attrs = { height: type }; // If we include width, step value is 1 to do all cssExpand values, // otherwise step value is 2 to skip over Left and Right includeWidth = includeWidth ? 1 : 0; for ( ; i < 4; i += 2 - includeWidth ) { which = cssExpand[ i ]; attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; } if ( includeWidth ) { attrs.opacity = attrs.width = type; } return attrs; } function createTween( value, prop, animation ) { var tween, collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), index = 0, length = collection.length; for ( ; index < length; index++ ) { if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { // We're done with this property return tween; } } } function defaultPrefilter( elem, props, opts ) { var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, isBox = "width" in props || "height" in props, anim = this, orig = {}, style = elem.style, hidden = elem.nodeType && isHiddenWithinTree( elem ), dataShow = dataPriv.get( elem, "fxshow" ); // Queue-skipping animations hijack the fx hooks if ( !opts.queue ) { hooks = jQuery._queueHooks( elem, "fx" ); if ( hooks.unqueued == null ) { hooks.unqueued = 0; oldfire = hooks.empty.fire; hooks.empty.fire = function() { if ( !hooks.unqueued ) { oldfire(); } }; } hooks.unqueued++; anim.always( function() { // Ensure the complete handler is called before this completes anim.always( function() { hooks.unqueued--; if ( !jQuery.queue( elem, "fx" ).length ) { hooks.empty.fire(); } } ); } ); } // Detect show/hide animations for ( prop in props ) { value = props[ prop ]; if ( rfxtypes.test( value ) ) { delete props[ prop ]; toggle = toggle || value === "toggle"; if ( value === ( hidden ? "hide" : "show" ) ) { // Pretend to be hidden if this is a "show" and // there is still data from a stopped show/hide if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { hidden = true; // Ignore all other no-op show/hide data } else { continue; } } orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); } } // Bail out if this is a no-op like .hide().hide() propTween = !jQuery.isEmptyObject( props ); if ( !propTween && jQuery.isEmptyObject( orig ) ) { return; } // Restrict "overflow" and "display" styles during box animations if ( isBox && elem.nodeType === 1 ) { // Support: IE <=9 - 11, Edge 12 - 15 // Record all 3 overflow attributes because IE does not infer the shorthand // from identically-valued overflowX and overflowY and Edge just mirrors // the overflowX value there. opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; // Identify a display type, preferring old show/hide data over the CSS cascade restoreDisplay = dataShow && dataShow.display; if ( restoreDisplay == null ) { restoreDisplay = dataPriv.get( elem, "display" ); } display = jQuery.css( elem, "display" ); if ( display === "none" ) { if ( restoreDisplay ) { display = restoreDisplay; } else { // Get nonempty value(s) by temporarily forcing visibility showHide( [ elem ], true ); restoreDisplay = elem.style.display || restoreDisplay; display = jQuery.css( elem, "display" ); showHide( [ elem ] ); } } // Animate inline elements as inline-block if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { if ( jQuery.css( elem, "float" ) === "none" ) { // Restore the original display value at the end of pure show/hide animations if ( !propTween ) { anim.done( function() { style.display = restoreDisplay; } ); if ( restoreDisplay == null ) { display = style.display; restoreDisplay = display === "none" ? "" : display; } } style.display = "inline-block"; } } } if ( opts.overflow ) { style.overflow = "hidden"; anim.always( function() { style.overflow = opts.overflow[ 0 ]; style.overflowX = opts.overflow[ 1 ]; style.overflowY = opts.overflow[ 2 ]; } ); } // Implement show/hide animations propTween = false; for ( prop in orig ) { // General show/hide setup for this element animation if ( !propTween ) { if ( dataShow ) { if ( "hidden" in dataShow ) { hidden = dataShow.hidden; } } else { dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); } // Store hidden/visible for toggle so `.stop().toggle()` "reverses" if ( toggle ) { dataShow.hidden = !hidden; } // Show elements before animating them if ( hidden ) { showHide( [ elem ], true ); } /* eslint-disable no-loop-func */ anim.done( function() { /* eslint-enable no-loop-func */ // The final step of a "hide" animation is actually hiding the element if ( !hidden ) { showHide( [ elem ] ); } dataPriv.remove( elem, "fxshow" ); for ( prop in orig ) { jQuery.style( elem, prop, orig[ prop ] ); } } ); } // Per-property setup propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); if ( !( prop in dataShow ) ) { dataShow[ prop ] = propTween.start; if ( hidden ) { propTween.end = propTween.start; propTween.start = 0; } } } } function propFilter( props, specialEasing ) { var index, name, easing, value, hooks; // camelCase, specialEasing and expand cssHook pass for ( index in props ) { name = camelCase( index ); easing = specialEasing[ name ]; value = props[ index ]; if ( Array.isArray( value ) ) { easing = value[ 1 ]; value = props[ index ] = value[ 0 ]; } if ( index !== name ) { props[ name ] = value; delete props[ index ]; } hooks = jQuery.cssHooks[ name ]; if ( hooks && "expand" in hooks ) { value = hooks.expand( value ); delete props[ name ]; // Not quite $.extend, this won't overwrite existing keys. // Reusing 'index' because we have the correct "name" for ( index in value ) { if ( !( index in props ) ) { props[ index ] = value[ index ]; specialEasing[ index ] = easing; } } } else { specialEasing[ name ] = easing; } } } function Animation( elem, properties, options ) { var result, stopped, index = 0, length = Animation.prefilters.length, deferred = jQuery.Deferred().always( function() { // Don't match elem in the :animated selector delete tick.elem; } ), tick = function() { if ( stopped ) { return false; } var currentTime = fxNow || createFxNow(), remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), // Support: Android 2.3 only // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) temp = remaining / animation.duration || 0, percent = 1 - temp, index = 0, length = animation.tweens.length; for ( ; index < length; index++ ) { animation.tweens[ index ].run( percent ); } deferred.notifyWith( elem, [ animation, percent, remaining ] ); // If there's more to do, yield if ( percent < 1 && length ) { return remaining; } // If this was an empty animation, synthesize a final progress notification if ( !length ) { deferred.notifyWith( elem, [ animation, 1, 0 ] ); } // Resolve the animation and report its conclusion deferred.resolveWith( elem, [ animation ] ); return false; }, animation = deferred.promise( { elem: elem, props: jQuery.extend( {}, properties ), opts: jQuery.extend( true, { specialEasing: {}, easing: jQuery.easing._default }, options ), originalProperties: properties, originalOptions: options, startTime: fxNow || createFxNow(), duration: options.duration, tweens: [], createTween: function( prop, end ) { var tween = jQuery.Tween( elem, animation.opts, prop, end, animation.opts.specialEasing[ prop ] || animation.opts.easing ); animation.tweens.push( tween ); return tween; }, stop: function( gotoEnd ) { var index = 0, // If we are going to the end, we want to run all the tweens // otherwise we skip this part length = gotoEnd ? animation.tweens.length : 0; if ( stopped ) { return this; } stopped = true; for ( ; index < length; index++ ) { animation.tweens[ index ].run( 1 ); } // Resolve when we played the last frame; otherwise, reject if ( gotoEnd ) { deferred.notifyWith( elem, [ animation, 1, 0 ] ); deferred.resolveWith( elem, [ animation, gotoEnd ] ); } else { deferred.rejectWith( elem, [ animation, gotoEnd ] ); } return this; } } ), props = animation.props; propFilter( props, animation.opts.specialEasing ); for ( ; index < length; index++ ) { result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); if ( result ) { if ( isFunction( result.stop ) ) { jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = result.stop.bind( result ); } return result; } } jQuery.map( props, createTween, animation ); if ( isFunction( animation.opts.start ) ) { animation.opts.start.call( elem, animation ); } // Attach callbacks from options animation .progress( animation.opts.progress ) .done( animation.opts.done, animation.opts.complete ) .fail( animation.opts.fail ) .always( animation.opts.always ); jQuery.fx.timer( jQuery.extend( tick, { elem: elem, anim: animation, queue: animation.opts.queue } ) ); return animation; } jQuery.Animation = jQuery.extend( Animation, { tweeners: { "*": [ function( prop, value ) { var tween = this.createTween( prop, value ); adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); return tween; } ] }, tweener: function( props, callback ) { if ( isFunction( props ) ) { callback = props; props = [ "*" ]; } else { props = props.match( rnothtmlwhite ); } var prop, index = 0, length = props.length; for ( ; index < length; index++ ) { prop = props[ index ]; Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; Animation.tweeners[ prop ].unshift( callback ); } }, prefilters: [ defaultPrefilter ], prefilter: function( callback, prepend ) { if ( prepend ) { Animation.prefilters.unshift( callback ); } else { Animation.prefilters.push( callback ); } } } ); jQuery.speed = function( speed, easing, fn ) { var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { complete: fn || !fn && easing || isFunction( speed ) && speed, duration: speed, easing: fn && easing || easing && !isFunction( easing ) && easing }; // Go to the end state if fx are off if ( jQuery.fx.off ) { opt.duration = 0; } else { if ( typeof opt.duration !== "number" ) { if ( opt.duration in jQuery.fx.speeds ) { opt.duration = jQuery.fx.speeds[ opt.duration ]; } else { opt.duration = jQuery.fx.speeds._default; } } } // Normalize opt.queue - true/undefined/null -> "fx" if ( opt.queue == null || opt.queue === true ) { opt.queue = "fx"; } // Queueing opt.old = opt.complete; opt.complete = function() { if ( isFunction( opt.old ) ) { opt.old.call( this ); } if ( opt.queue ) { jQuery.dequeue( this, opt.queue ); } }; return opt; }; jQuery.fn.extend( { fadeTo: function( speed, to, easing, callback ) { // Show any hidden elements after setting opacity to 0 return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() // Animate to the value specified .end().animate( { opacity: to }, speed, easing, callback ); }, animate: function( prop, speed, easing, callback ) { var empty = jQuery.isEmptyObject( prop ), optall = jQuery.speed( speed, easing, callback ), doAnimation = function() { // Operate on a copy of prop so per-property easing won't be lost var anim = Animation( this, jQuery.extend( {}, prop ), optall ); // Empty animations, or finishing resolves immediately if ( empty || dataPriv.get( this, "finish" ) ) { anim.stop( true ); } }; doAnimation.finish = doAnimation; return empty || optall.queue === false ? this.each( doAnimation ) : this.queue( optall.queue, doAnimation ); }, stop: function( type, clearQueue, gotoEnd ) { var stopQueue = function( hooks ) { var stop = hooks.stop; delete hooks.stop; stop( gotoEnd ); }; if ( typeof type !== "string" ) { gotoEnd = clearQueue; clearQueue = type; type = undefined; } if ( clearQueue && type !== false ) { this.queue( type || "fx", [] ); } return this.each( function() { var dequeue = true, index = type != null && type + "queueHooks", timers = jQuery.timers, data = dataPriv.get( this ); if ( index ) { if ( data[ index ] && data[ index ].stop ) { stopQueue( data[ index ] ); } } else { for ( index in data ) { if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { stopQueue( data[ index ] ); } } } for ( index = timers.length; index--; ) { if ( timers[ index ].elem === this && ( type == null || timers[ index ].queue === type ) ) { timers[ index ].anim.stop( gotoEnd ); dequeue = false; timers.splice( index, 1 ); } } // Start the next in the queue if the last step wasn't forced. // Timers currently will call their complete callbacks, which // will dequeue but only if they were gotoEnd. if ( dequeue || !gotoEnd ) { jQuery.dequeue( this, type ); } } ); }, finish: function( type ) { if ( type !== false ) { type = type || "fx"; } return this.each( function() { var index, data = dataPriv.get( this ), queue = data[ type + "queue" ], hooks = data[ type + "queueHooks" ], timers = jQuery.timers, length = queue ? queue.length : 0; // Enable finishing flag on private data data.finish = true; // Empty the queue first jQuery.queue( this, type, [] ); if ( hooks && hooks.stop ) { hooks.stop.call( this, true ); } // Look for any active animations, and finish them for ( index = timers.length; index--; ) { if ( timers[ index ].elem === this && timers[ index ].queue === type ) { timers[ index ].anim.stop( true ); timers.splice( index, 1 ); } } // Look for any animations in the old queue and finish them for ( index = 0; index < length; index++ ) { if ( queue[ index ] && queue[ index ].finish ) { queue[ index ].finish.call( this ); } } // Turn off finishing flag delete data.finish; } ); } } ); jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { var cssFn = jQuery.fn[ name ]; jQuery.fn[ name ] = function( speed, easing, callback ) { return speed == null || typeof speed === "boolean" ? cssFn.apply( this, arguments ) : this.animate( genFx( name, true ), speed, easing, callback ); }; } ); // Generate shortcuts for custom animations jQuery.each( { slideDown: genFx( "show" ), slideUp: genFx( "hide" ), slideToggle: genFx( "toggle" ), fadeIn: { opacity: "show" }, fadeOut: { opacity: "hide" }, fadeToggle: { opacity: "toggle" } }, function( name, props ) { jQuery.fn[ name ] = function( speed, easing, callback ) { return this.animate( props, speed, easing, callback ); }; } ); jQuery.timers = []; jQuery.fx.tick = function() { var timer, i = 0, timers = jQuery.timers; fxNow = Date.now(); for ( ; i < timers.length; i++ ) { timer = timers[ i ]; // Run the timer and safely remove it when done (allowing for external removal) if ( !timer() && timers[ i ] === timer ) { timers.splice( i--, 1 ); } } if ( !timers.length ) { jQuery.fx.stop(); } fxNow = undefined; }; jQuery.fx.timer = function( timer ) { jQuery.timers.push( timer ); jQuery.fx.start(); }; jQuery.fx.interval = 13; jQuery.fx.start = function() { if ( inProgress ) { return; } inProgress = true; schedule(); }; jQuery.fx.stop = function() { inProgress = null; }; jQuery.fx.speeds = { slow: 600, fast: 200, // Default speed _default: 400 }; // Based off of the plugin by Clint Helfers, with permission. // https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ jQuery.fn.delay = function( time, type ) { time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; type = type || "fx"; return this.queue( type, function( next, hooks ) { var timeout = window.setTimeout( next, time ); hooks.stop = function() { window.clearTimeout( timeout ); }; } ); }; ( function() { var input = document.createElement( "input" ), select = document.createElement( "select" ), opt = select.appendChild( document.createElement( "option" ) ); input.type = "checkbox"; // Support: Android <=4.3 only // Default value for a checkbox should be "on" support.checkOn = input.value !== ""; // Support: IE <=11 only // Must access selectedIndex to make default options select support.optSelected = opt.selected; // Support: IE <=11 only // An input loses its value after becoming a radio input = document.createElement( "input" ); input.value = "t"; input.type = "radio"; support.radioValue = input.value === "t"; } )(); var boolHook, attrHandle = jQuery.expr.attrHandle; jQuery.fn.extend( { attr: function( name, value ) { return access( this, jQuery.attr, name, value, arguments.length > 1 ); }, removeAttr: function( name ) { return this.each( function() { jQuery.removeAttr( this, name ); } ); } } ); jQuery.extend( { attr: function( elem, name, value ) { var ret, hooks, nType = elem.nodeType; // Don't get/set attributes on text, comment and attribute nodes if ( nType === 3 || nType === 8 || nType === 2 ) { return; } // Fallback to prop when attributes are not supported if ( typeof elem.getAttribute === "undefined" ) { return jQuery.prop( elem, name, value ); } // Attribute hooks are determined by the lowercase version // Grab necessary hook if one is defined if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { hooks = jQuery.attrHooks[ name.toLowerCase() ] || ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); } if ( value !== undefined ) { if ( value === null ) { jQuery.removeAttr( elem, name ); return; } if ( hooks && "set" in hooks && ( ret = hooks.set( elem, value, name ) ) !== undefined ) { return ret; } elem.setAttribute( name, value + "" ); return value; } if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { return ret; } ret = jQuery.find.attr( elem, name ); // Non-existent attributes return null, we normalize to undefined return ret == null ? undefined : ret; }, attrHooks: { type: { set: function( elem, value ) { if ( !support.radioValue && value === "radio" && nodeName( elem, "input" ) ) { var val = elem.value; elem.setAttribute( "type", value ); if ( val ) { elem.value = val; } return value; } } } }, removeAttr: function( elem, value ) { var name, i = 0, // Attribute names can contain non-HTML whitespace characters // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 attrNames = value && value.match( rnothtmlwhite ); if ( attrNames && elem.nodeType === 1 ) { while ( ( name = attrNames[ i++ ] ) ) { elem.removeAttribute( name ); } } } } ); // Hooks for boolean attributes boolHook = { set: function( elem, value, name ) { if ( value === false ) { // Remove boolean attributes when set to false jQuery.removeAttr( elem, name ); } else { elem.setAttribute( name, name ); } return name; } }; jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { var getter = attrHandle[ name ] || jQuery.find.attr; attrHandle[ name ] = function( elem, name, isXML ) { var ret, handle, lowercaseName = name.toLowerCase(); if ( !isXML ) { // Avoid an infinite loop by temporarily removing this function from the getter handle = attrHandle[ lowercaseName ]; attrHandle[ lowercaseName ] = ret; ret = getter( elem, name, isXML ) != null ? lowercaseName : null; attrHandle[ lowercaseName ] = handle; } return ret; }; } ); var rfocusable = /^(?:input|select|textarea|button)$/i, rclickable = /^(?:a|area)$/i; jQuery.fn.extend( { prop: function( name, value ) { return access( this, jQuery.prop, name, value, arguments.length > 1 ); }, removeProp: function( name ) { return this.each( function() { delete this[ jQuery.propFix[ name ] || name ]; } ); } } ); jQuery.extend( { prop: function( elem, name, value ) { var ret, hooks, nType = elem.nodeType; // Don't get/set properties on text, comment and attribute nodes if ( nType === 3 || nType === 8 || nType === 2 ) { return; } if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { // Fix name and attach hooks name = jQuery.propFix[ name ] || name; hooks = jQuery.propHooks[ name ]; } if ( value !== undefined ) { if ( hooks && "set" in hooks && ( ret = hooks.set( elem, value, name ) ) !== undefined ) { return ret; } return ( elem[ name ] = value ); } if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { return ret; } return elem[ name ]; }, propHooks: { tabIndex: { get: function( elem ) { // Support: IE <=9 - 11 only // elem.tabIndex doesn't always return the // correct value when it hasn't been explicitly set // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ // Use proper attribute retrieval(#12072) var tabindex = jQuery.find.attr( elem, "tabindex" ); if ( tabindex ) { return parseInt( tabindex, 10 ); } if ( rfocusable.test( elem.nodeName ) || rclickable.test( elem.nodeName ) && elem.href ) { return 0; } return -1; } } }, propFix: { "for": "htmlFor", "class": "className" } } ); // Support: IE <=11 only // Accessing the selectedIndex property // forces the browser to respect setting selected // on the option // The getter ensures a default option is selected // when in an optgroup // eslint rule "no-unused-expressions" is disabled for this code // since it considers such accessions noop if ( !support.optSelected ) { jQuery.propHooks.selected = { get: function( elem ) { /* eslint no-unused-expressions: "off" */ var parent = elem.parentNode; if ( parent && parent.parentNode ) { parent.parentNode.selectedIndex; } return null; }, set: function( elem ) { /* eslint no-unused-expressions: "off" */ var parent = elem.parentNode; if ( parent ) { parent.selectedIndex; if ( parent.parentNode ) { parent.parentNode.selectedIndex; } } } }; } jQuery.each( [ "tabIndex", "readOnly", "maxLength", "cellSpacing", "cellPadding", "rowSpan", "colSpan", "useMap", "frameBorder", "contentEditable" ], function() { jQuery.propFix[ this.toLowerCase() ] = this; } ); // Strip and collapse whitespace according to HTML spec // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace function stripAndCollapse( value ) { var tokens = value.match( rnothtmlwhite ) || []; return tokens.join( " " ); } function getClass( elem ) { return elem.getAttribute && elem.getAttribute( "class" ) || ""; } function classesToArray( value ) { if ( Array.isArray( value ) ) { return value; } if ( typeof value === "string" ) { return value.match( rnothtmlwhite ) || []; } return []; } jQuery.fn.extend( { addClass: function( value ) { var classes, elem, cur, curValue, clazz, j, finalValue, i = 0; if ( isFunction( value ) ) { return this.each( function( j ) { jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); } ); } classes = classesToArray( value ); if ( classes.length ) { while ( ( elem = this[ i++ ] ) ) { curValue = getClass( elem ); cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); if ( cur ) { j = 0; while ( ( clazz = classes[ j++ ] ) ) { if ( cur.indexOf( " " + clazz + " " ) < 0 ) { cur += clazz + " "; } } // Only assign if different to avoid unneeded rendering. finalValue = stripAndCollapse( cur ); if ( curValue !== finalValue ) { elem.setAttribute( "class", finalValue ); } } } } return this; }, removeClass: function( value ) { var classes, elem, cur, curValue, clazz, j, finalValue, i = 0; if ( isFunction( value ) ) { return this.each( function( j ) { jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); } ); } if ( !arguments.length ) { return this.attr( "class", "" ); } classes = classesToArray( value ); if ( classes.length ) { while ( ( elem = this[ i++ ] ) ) { curValue = getClass( elem ); // This expression is here for better compressibility (see addClass) cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); if ( cur ) { j = 0; while ( ( clazz = classes[ j++ ] ) ) { // Remove *all* instances while ( cur.indexOf( " " + clazz + " " ) > -1 ) { cur = cur.replace( " " + clazz + " ", " " ); } } // Only assign if different to avoid unneeded rendering. finalValue = stripAndCollapse( cur ); if ( curValue !== finalValue ) { elem.setAttribute( "class", finalValue ); } } } } return this; }, toggleClass: function( value, stateVal ) { var type = typeof value, isValidValue = type === "string" || Array.isArray( value ); if ( typeof stateVal === "boolean" && isValidValue ) { return stateVal ? this.addClass( value ) : this.removeClass( value ); } if ( isFunction( value ) ) { return this.each( function( i ) { jQuery( this ).toggleClass( value.call( this, i, getClass( this ), stateVal ), stateVal ); } ); } return this.each( function() { var className, i, self, classNames; if ( isValidValue ) { // Toggle individual class names i = 0; self = jQuery( this ); classNames = classesToArray( value ); while ( ( className = classNames[ i++ ] ) ) { // Check each className given, space separated list if ( self.hasClass( className ) ) { self.removeClass( className ); } else { self.addClass( className ); } } // Toggle whole class name } else if ( value === undefined || type === "boolean" ) { className = getClass( this ); if ( className ) { // Store className if set dataPriv.set( this, "__className__", className ); } // If the element has a class name or if we're passed `false`, // then remove the whole classname (if there was one, the above saved it). // Otherwise bring back whatever was previously saved (if anything), // falling back to the empty string if nothing was stored. if ( this.setAttribute ) { this.setAttribute( "class", className || value === false ? "" : dataPriv.get( this, "__className__" ) || "" ); } } } ); }, hasClass: function( selector ) { var className, elem, i = 0; className = " " + selector + " "; while ( ( elem = this[ i++ ] ) ) { if ( elem.nodeType === 1 && ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { return true; } } return false; } } ); var rreturn = /\r/g; jQuery.fn.extend( { val: function( value ) { var hooks, ret, valueIsFunction, elem = this[ 0 ]; if ( !arguments.length ) { if ( elem ) { hooks = jQuery.valHooks[ elem.type ] || jQuery.valHooks[ elem.nodeName.toLowerCase() ]; if ( hooks && "get" in hooks && ( ret = hooks.get( elem, "value" ) ) !== undefined ) { return ret; } ret = elem.value; // Handle most common string cases if ( typeof ret === "string" ) { return ret.replace( rreturn, "" ); } // Handle cases where value is null/undef or number return ret == null ? "" : ret; } return; } valueIsFunction = isFunction( value ); return this.each( function( i ) { var val; if ( this.nodeType !== 1 ) { return; } if ( valueIsFunction ) { val = value.call( this, i, jQuery( this ).val() ); } else { val = value; } // Treat null/undefined as ""; convert numbers to string if ( val == null ) { val = ""; } else if ( typeof val === "number" ) { val += ""; } else if ( Array.isArray( val ) ) { val = jQuery.map( val, function( value ) { return value == null ? "" : value + ""; } ); } hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; // If set returns undefined, fall back to normal setting if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { this.value = val; } } ); } } ); jQuery.extend( { valHooks: { option: { get: function( elem ) { var val = jQuery.find.attr( elem, "value" ); return val != null ? val : // Support: IE <=10 - 11 only // option.text throws exceptions (#14686, #14858) // Strip and collapse whitespace // https://html.spec.whatwg.org/#strip-and-collapse-whitespace stripAndCollapse( jQuery.text( elem ) ); } }, select: { get: function( elem ) { var value, option, i, options = elem.options, index = elem.selectedIndex, one = elem.type === "select-one", values = one ? null : [], max = one ? index + 1 : options.length; if ( index < 0 ) { i = max; } else { i = one ? index : 0; } // Loop through all the selected options for ( ; i < max; i++ ) { option = options[ i ]; // Support: IE <=9 only // IE8-9 doesn't update selected after form reset (#2551) if ( ( option.selected || i === index ) && // Don't return options that are disabled or in a disabled optgroup !option.disabled && ( !option.parentNode.disabled || !nodeName( option.parentNode, "optgroup" ) ) ) { // Get the specific value for the option value = jQuery( option ).val(); // We don't need an array for one selects if ( one ) { return value; } // Multi-Selects return an array values.push( value ); } } return values; }, set: function( elem, value ) { var optionSet, option, options = elem.options, values = jQuery.makeArray( value ), i = options.length; while ( i-- ) { option = options[ i ]; /* eslint-disable no-cond-assign */ if ( option.selected = jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 ) { optionSet = true; } /* eslint-enable no-cond-assign */ } // Force browsers to behave consistently when non-matching value is set if ( !optionSet ) { elem.selectedIndex = -1; } return values; } } } } ); // Radios and checkboxes getter/setter jQuery.each( [ "radio", "checkbox" ], function() { jQuery.valHooks[ this ] = { set: function( elem, value ) { if ( Array.isArray( value ) ) { return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); } } }; if ( !support.checkOn ) { jQuery.valHooks[ this ].get = function( elem ) { return elem.getAttribute( "value" ) === null ? "on" : elem.value; }; } } ); // Return jQuery for attributes-only inclusion support.focusin = "onfocusin" in window; var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, stopPropagationCallback = function( e ) { e.stopPropagation(); }; jQuery.extend( jQuery.event, { trigger: function( event, data, elem, onlyHandlers ) { var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, eventPath = [ elem || document ], type = hasOwn.call( event, "type" ) ? event.type : event, namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; cur = lastElement = tmp = elem = elem || document; // Don't do events on text and comment nodes if ( elem.nodeType === 3 || elem.nodeType === 8 ) { return; } // focus/blur morphs to focusin/out; ensure we're not firing them right now if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { return; } if ( type.indexOf( "." ) > -1 ) { // Namespaced trigger; create a regexp to match event type in handle() namespaces = type.split( "." ); type = namespaces.shift(); namespaces.sort(); } ontype = type.indexOf( ":" ) < 0 && "on" + type; // Caller can pass in a jQuery.Event object, Object, or just an event type string event = event[ jQuery.expando ] ? event : new jQuery.Event( type, typeof event === "object" && event ); // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) event.isTrigger = onlyHandlers ? 2 : 3; event.namespace = namespaces.join( "." ); event.rnamespace = event.namespace ? new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : null; // Clean up the event in case it is being reused event.result = undefined; if ( !event.target ) { event.target = elem; } // Clone any incoming data and prepend the event, creating the handler arg list data = data == null ? [ event ] : jQuery.makeArray( data, [ event ] ); // Allow special events to draw outside the lines special = jQuery.event.special[ type ] || {}; if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { return; } // Determine event propagation path in advance, per W3C events spec (#9951) // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { bubbleType = special.delegateType || type; if ( !rfocusMorph.test( bubbleType + type ) ) { cur = cur.parentNode; } for ( ; cur; cur = cur.parentNode ) { eventPath.push( cur ); tmp = cur; } // Only add window if we got to document (e.g., not plain obj or detached DOM) if ( tmp === ( elem.ownerDocument || document ) ) { eventPath.push( tmp.defaultView || tmp.parentWindow || window ); } } // Fire handlers on the event path i = 0; while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { lastElement = cur; event.type = i > 1 ? bubbleType : special.bindType || type; // jQuery handler handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && dataPriv.get( cur, "handle" ); if ( handle ) { handle.apply( cur, data ); } // Native handler handle = ontype && cur[ ontype ]; if ( handle && handle.apply && acceptData( cur ) ) { event.result = handle.apply( cur, data ); if ( event.result === false ) { event.preventDefault(); } } } event.type = type; // If nobody prevented the default action, do it now if ( !onlyHandlers && !event.isDefaultPrevented() ) { if ( ( !special._default || special._default.apply( eventPath.pop(), data ) === false ) && acceptData( elem ) ) { // Call a native DOM method on the target with the same name as the event. // Don't do default actions on window, that's where global variables be (#6170) if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { // Don't re-trigger an onFOO event when we call its FOO() method tmp = elem[ ontype ]; if ( tmp ) { elem[ ontype ] = null; } // Prevent re-triggering of the same event, since we already bubbled it above jQuery.event.triggered = type; if ( event.isPropagationStopped() ) { lastElement.addEventListener( type, stopPropagationCallback ); } elem[ type ](); if ( event.isPropagationStopped() ) { lastElement.removeEventListener( type, stopPropagationCallback ); } jQuery.event.triggered = undefined; if ( tmp ) { elem[ ontype ] = tmp; } } } } return event.result; }, // Piggyback on a donor event to simulate a different one // Used only for `focus(in | out)` events simulate: function( type, elem, event ) { var e = jQuery.extend( new jQuery.Event(), event, { type: type, isSimulated: true } ); jQuery.event.trigger( e, null, elem ); } } ); jQuery.fn.extend( { trigger: function( type, data ) { return this.each( function() { jQuery.event.trigger( type, data, this ); } ); }, triggerHandler: function( type, data ) { var elem = this[ 0 ]; if ( elem ) { return jQuery.event.trigger( type, data, elem, true ); } } } ); // Support: Firefox <=44 // Firefox doesn't have focus(in | out) events // Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 // // Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 // focus(in | out) events fire after focus & blur events, // which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order // Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 if ( !support.focusin ) { jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { // Attach a single capturing handler on the document while someone wants focusin/focusout var handler = function( event ) { jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); }; jQuery.event.special[ fix ] = { setup: function() { var doc = this.ownerDocument || this, attaches = dataPriv.access( doc, fix ); if ( !attaches ) { doc.addEventListener( orig, handler, true ); } dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); }, teardown: function() { var doc = this.ownerDocument || this, attaches = dataPriv.access( doc, fix ) - 1; if ( !attaches ) { doc.removeEventListener( orig, handler, true ); dataPriv.remove( doc, fix ); } else { dataPriv.access( doc, fix, attaches ); } } }; } ); } var location = window.location; var nonce = Date.now(); var rquery = ( /\?/ ); // Cross-browser xml parsing jQuery.parseXML = function( data ) { var xml; if ( !data || typeof data !== "string" ) { return null; } // Support: IE 9 - 11 only // IE throws on parseFromString with invalid input. try { xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); } catch ( e ) { xml = undefined; } if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { jQuery.error( "Invalid XML: " + data ); } return xml; }; var rbracket = /\[\]$/, rCRLF = /\r?\n/g, rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, rsubmittable = /^(?:input|select|textarea|keygen)/i; function buildParams( prefix, obj, traditional, add ) { var name; if ( Array.isArray( obj ) ) { // Serialize array item. jQuery.each( obj, function( i, v ) { if ( traditional || rbracket.test( prefix ) ) { // Treat each array item as a scalar. add( prefix, v ); } else { // Item is non-scalar (array or object), encode its numeric index. buildParams( prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", v, traditional, add ); } } ); } else if ( !traditional && toType( obj ) === "object" ) { // Serialize object item. for ( name in obj ) { buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); } } else { // Serialize scalar item. add( prefix, obj ); } } // Serialize an array of form elements or a set of // key/values into a query string jQuery.param = function( a, traditional ) { var prefix, s = [], add = function( key, valueOrFunction ) { // If value is a function, invoke it and use its return value var value = isFunction( valueOrFunction ) ? valueOrFunction() : valueOrFunction; s[ s.length ] = encodeURIComponent( key ) + "=" + encodeURIComponent( value == null ? "" : value ); }; // If an array was passed in, assume that it is an array of form elements. if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { // Serialize the form elements jQuery.each( a, function() { add( this.name, this.value ); } ); } else { // If traditional, encode the "old" way (the way 1.3.2 or older // did it), otherwise encode params recursively. for ( prefix in a ) { buildParams( prefix, a[ prefix ], traditional, add ); } } // Return the resulting serialization return s.join( "&" ); }; jQuery.fn.extend( { serialize: function() { return jQuery.param( this.serializeArray() ); }, serializeArray: function() { return this.map( function() { // Can add propHook for "elements" to filter or add form elements var elements = jQuery.prop( this, "elements" ); return elements ? jQuery.makeArray( elements ) : this; } ) .filter( function() { var type = this.type; // Use .is( ":disabled" ) so that fieldset[disabled] works return this.name && !jQuery( this ).is( ":disabled" ) && rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && ( this.checked || !rcheckableType.test( type ) ); } ) .map( function( i, elem ) { var val = jQuery( this ).val(); if ( val == null ) { return null; } if ( Array.isArray( val ) ) { return jQuery.map( val, function( val ) { return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; } ); } return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; } ).get(); } } ); var r20 = /%20/g, rhash = /#.*$/, rantiCache = /([?&])_=[^&]*/, rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, // #7653, #8125, #8152: local protocol detection rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, rnoContent = /^(?:GET|HEAD)$/, rprotocol = /^\/\//, /* Prefilters * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) * 2) These are called: * - BEFORE asking for a transport * - AFTER param serialization (s.data is a string if s.processData is true) * 3) key is the dataType * 4) the catchall symbol "*" can be used * 5) execution will start with transport dataType and THEN continue down to "*" if needed */ prefilters = {}, /* Transports bindings * 1) key is the dataType * 2) the catchall symbol "*" can be used * 3) selection will start with transport dataType and THEN go to "*" if needed */ transports = {}, // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression allTypes = "*/".concat( "*" ), // Anchor tag for parsing the document origin originAnchor = document.createElement( "a" ); originAnchor.href = location.href; // Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport function addToPrefiltersOrTransports( structure ) { // dataTypeExpression is optional and defaults to "*" return function( dataTypeExpression, func ) { if ( typeof dataTypeExpression !== "string" ) { func = dataTypeExpression; dataTypeExpression = "*"; } var dataType, i = 0, dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; if ( isFunction( func ) ) { // For each dataType in the dataTypeExpression while ( ( dataType = dataTypes[ i++ ] ) ) { // Prepend if requested if ( dataType[ 0 ] === "+" ) { dataType = dataType.slice( 1 ) || "*"; ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); // Otherwise append } else { ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); } } } }; } // Base inspection function for prefilters and transports function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { var inspected = {}, seekingTransport = ( structure === transports ); function inspect( dataType ) { var selected; inspected[ dataType ] = true; jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); if ( typeof dataTypeOrTransport === "string" && !seekingTransport && !inspected[ dataTypeOrTransport ] ) { options.dataTypes.unshift( dataTypeOrTransport ); inspect( dataTypeOrTransport ); return false; } else if ( seekingTransport ) { return !( selected = dataTypeOrTransport ); } } ); return selected; } return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); } // A special extend for ajax options // that takes "flat" options (not to be deep extended) // Fixes #9887 function ajaxExtend( target, src ) { var key, deep, flatOptions = jQuery.ajaxSettings.flatOptions || {}; for ( key in src ) { if ( src[ key ] !== undefined ) { ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; } } if ( deep ) { jQuery.extend( true, target, deep ); } return target; } /* Handles responses to an ajax request: * - finds the right dataType (mediates between content-type and expected dataType) * - returns the corresponding response */ function ajaxHandleResponses( s, jqXHR, responses ) { var ct, type, finalDataType, firstDataType, contents = s.contents, dataTypes = s.dataTypes; // Remove auto dataType and get content-type in the process while ( dataTypes[ 0 ] === "*" ) { dataTypes.shift(); if ( ct === undefined ) { ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); } } // Check if we're dealing with a known content-type if ( ct ) { for ( type in contents ) { if ( contents[ type ] && contents[ type ].test( ct ) ) { dataTypes.unshift( type ); break; } } } // Check to see if we have a response for the expected dataType if ( dataTypes[ 0 ] in responses ) { finalDataType = dataTypes[ 0 ]; } else { // Try convertible dataTypes for ( type in responses ) { if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { finalDataType = type; break; } if ( !firstDataType ) { firstDataType = type; } } // Or just use first one finalDataType = finalDataType || firstDataType; } // If we found a dataType // We add the dataType to the list if needed // and return the corresponding response if ( finalDataType ) { if ( finalDataType !== dataTypes[ 0 ] ) { dataTypes.unshift( finalDataType ); } return responses[ finalDataType ]; } } /* Chain conversions given the request and the original response * Also sets the responseXXX fields on the jqXHR instance */ function ajaxConvert( s, response, jqXHR, isSuccess ) { var conv2, current, conv, tmp, prev, converters = {}, // Work with a copy of dataTypes in case we need to modify it for conversion dataTypes = s.dataTypes.slice(); // Create converters map with lowercased keys if ( dataTypes[ 1 ] ) { for ( conv in s.converters ) { converters[ conv.toLowerCase() ] = s.converters[ conv ]; } } current = dataTypes.shift(); // Convert to each sequential dataType while ( current ) { if ( s.responseFields[ current ] ) { jqXHR[ s.responseFields[ current ] ] = response; } // Apply the dataFilter if provided if ( !prev && isSuccess && s.dataFilter ) { response = s.dataFilter( response, s.dataType ); } prev = current; current = dataTypes.shift(); if ( current ) { // There's only work to do if current dataType is non-auto if ( current === "*" ) { current = prev; // Convert response if prev dataType is non-auto and differs from current } else if ( prev !== "*" && prev !== current ) { // Seek a direct converter conv = converters[ prev + " " + current ] || converters[ "* " + current ]; // If none found, seek a pair if ( !conv ) { for ( conv2 in converters ) { // If conv2 outputs current tmp = conv2.split( " " ); if ( tmp[ 1 ] === current ) { // If prev can be converted to accepted input conv = converters[ prev + " " + tmp[ 0 ] ] || converters[ "* " + tmp[ 0 ] ]; if ( conv ) { // Condense equivalence converters if ( conv === true ) { conv = converters[ conv2 ]; // Otherwise, insert the intermediate dataType } else if ( converters[ conv2 ] !== true ) { current = tmp[ 0 ]; dataTypes.unshift( tmp[ 1 ] ); } break; } } } } // Apply converter (if not an equivalence) if ( conv !== true ) { // Unless errors are allowed to bubble, catch and return them if ( conv && s.throws ) { response = conv( response ); } else { try { response = conv( response ); } catch ( e ) { return { state: "parsererror", error: conv ? e : "No conversion from " + prev + " to " + current }; } } } } } } return { state: "success", data: response }; } jQuery.extend( { // Counter for holding the number of active queries active: 0, // Last-Modified header cache for next request lastModified: {}, etag: {}, ajaxSettings: { url: location.href, type: "GET", isLocal: rlocalProtocol.test( location.protocol ), global: true, processData: true, async: true, contentType: "application/x-www-form-urlencoded; charset=UTF-8", /* timeout: 0, data: null, dataType: null, username: null, password: null, cache: null, throws: false, traditional: false, headers: {}, */ accepts: { "*": allTypes, text: "text/plain", html: "text/html", xml: "application/xml, text/xml", json: "application/json, text/javascript" }, contents: { xml: /\bxml\b/, html: /\bhtml/, json: /\bjson\b/ }, responseFields: { xml: "responseXML", text: "responseText", json: "responseJSON" }, // Data converters // Keys separate source (or catchall "*") and destination types with a single space converters: { // Convert anything to text "* text": String, // Text to html (true = no transformation) "text html": true, // Evaluate text as a json expression "text json": JSON.parse, // Parse text as xml "text xml": jQuery.parseXML }, // For options that shouldn't be deep extended: // you can add your own custom options here if // and when you create one that shouldn't be // deep extended (see ajaxExtend) flatOptions: { url: true, context: true } }, // Creates a full fledged settings object into target // with both ajaxSettings and settings fields. // If target is omitted, writes into ajaxSettings. ajaxSetup: function( target, settings ) { return settings ? // Building a settings object ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : // Extending ajaxSettings ajaxExtend( jQuery.ajaxSettings, target ); }, ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), ajaxTransport: addToPrefiltersOrTransports( transports ), // Main method ajax: function( url, options ) { // If url is an object, simulate pre-1.5 signature if ( typeof url === "object" ) { options = url; url = undefined; } // Force options to be an object options = options || {}; var transport, // URL without anti-cache param cacheURL, // Response headers responseHeadersString, responseHeaders, // timeout handle timeoutTimer, // Url cleanup var urlAnchor, // Request state (becomes false upon send and true upon completion) completed, // To know if global events are to be dispatched fireGlobals, // Loop variable i, // uncached part of the url uncached, // Create the final options object s = jQuery.ajaxSetup( {}, options ), // Callbacks context callbackContext = s.context || s, // Context for global events is callbackContext if it is a DOM node or jQuery collection globalEventContext = s.context && ( callbackContext.nodeType || callbackContext.jquery ) ? jQuery( callbackContext ) : jQuery.event, // Deferreds deferred = jQuery.Deferred(), completeDeferred = jQuery.Callbacks( "once memory" ), // Status-dependent callbacks statusCode = s.statusCode || {}, // Headers (they are sent all at once) requestHeaders = {}, requestHeadersNames = {}, // Default abort message strAbort = "canceled", // Fake xhr jqXHR = { readyState: 0, // Builds headers hashtable if needed getResponseHeader: function( key ) { var match; if ( completed ) { if ( !responseHeaders ) { responseHeaders = {}; while ( ( match = rheaders.exec( responseHeadersString ) ) ) { responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ]; } } match = responseHeaders[ key.toLowerCase() ]; } return match == null ? null : match; }, // Raw string getAllResponseHeaders: function() { return completed ? responseHeadersString : null; }, // Caches the header setRequestHeader: function( name, value ) { if ( completed == null ) { name = requestHeadersNames[ name.toLowerCase() ] = requestHeadersNames[ name.toLowerCase() ] || name; requestHeaders[ name ] = value; } return this; }, // Overrides response content-type header overrideMimeType: function( type ) { if ( completed == null ) { s.mimeType = type; } return this; }, // Status-dependent callbacks statusCode: function( map ) { var code; if ( map ) { if ( completed ) { // Execute the appropriate callbacks jqXHR.always( map[ jqXHR.status ] ); } else { // Lazy-add the new callbacks in a way that preserves old ones for ( code in map ) { statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; } } } return this; }, // Cancel the request abort: function( statusText ) { var finalText = statusText || strAbort; if ( transport ) { transport.abort( finalText ); } done( 0, finalText ); return this; } }; // Attach deferreds deferred.promise( jqXHR ); // Add protocol if not provided (prefilters might expect it) // Handle falsy url in the settings object (#10093: consistency with old signature) // We also use the url parameter if available s.url = ( ( url || s.url || location.href ) + "" ) .replace( rprotocol, location.protocol + "//" ); // Alias method option to type as per ticket #12004 s.type = options.method || options.type || s.method || s.type; // Extract dataTypes list s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; // A cross-domain request is in order when the origin doesn't match the current origin. if ( s.crossDomain == null ) { urlAnchor = document.createElement( "a" ); // Support: IE <=8 - 11, Edge 12 - 15 // IE throws exception on accessing the href property if url is malformed, // e.g. http://example.com:80x/ try { urlAnchor.href = s.url; // Support: IE <=8 - 11 only // Anchor's host property isn't correctly set when s.url is relative urlAnchor.href = urlAnchor.href; s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== urlAnchor.protocol + "//" + urlAnchor.host; } catch ( e ) { // If there is an error parsing the URL, assume it is crossDomain, // it can be rejected by the transport if it is invalid s.crossDomain = true; } } // Convert data if not already a string if ( s.data && s.processData && typeof s.data !== "string" ) { s.data = jQuery.param( s.data, s.traditional ); } // Apply prefilters inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); // If request was aborted inside a prefilter, stop there if ( completed ) { return jqXHR; } // We can fire global events as of now if asked to // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) fireGlobals = jQuery.event && s.global; // Watch for a new set of requests if ( fireGlobals && jQuery.active++ === 0 ) { jQuery.event.trigger( "ajaxStart" ); } // Uppercase the type s.type = s.type.toUpperCase(); // Determine if request has content s.hasContent = !rnoContent.test( s.type ); // Save the URL in case we're toying with the If-Modified-Since // and/or If-None-Match header later on // Remove hash to simplify url manipulation cacheURL = s.url.replace( rhash, "" ); // More options handling for requests with no content if ( !s.hasContent ) { // Remember the hash so we can put it back uncached = s.url.slice( cacheURL.length ); // If data is available and should be processed, append data to url if ( s.data && ( s.processData || typeof s.data === "string" ) ) { cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; // #9682: remove data so that it's not used in an eventual retry delete s.data; } // Add or update anti-cache param if needed if ( s.cache === false ) { cacheURL = cacheURL.replace( rantiCache, "$1" ); uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; } // Put hash and anti-cache on the URL that will be requested (gh-1732) s.url = cacheURL + uncached; // Change '%20' to '+' if this is encoded form body content (gh-2658) } else if ( s.data && s.processData && ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { s.data = s.data.replace( r20, "+" ); } // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. if ( s.ifModified ) { if ( jQuery.lastModified[ cacheURL ] ) { jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); } if ( jQuery.etag[ cacheURL ] ) { jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); } } // Set the correct header, if data is being sent if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { jqXHR.setRequestHeader( "Content-Type", s.contentType ); } // Set the Accepts header for the server, depending on the dataType jqXHR.setRequestHeader( "Accept", s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? s.accepts[ s.dataTypes[ 0 ] ] + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : s.accepts[ "*" ] ); // Check for headers option for ( i in s.headers ) { jqXHR.setRequestHeader( i, s.headers[ i ] ); } // Allow custom headers/mimetypes and early abort if ( s.beforeSend && ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { // Abort if not done already and return return jqXHR.abort(); } // Aborting is no longer a cancellation strAbort = "abort"; // Install callbacks on deferreds completeDeferred.add( s.complete ); jqXHR.done( s.success ); jqXHR.fail( s.error ); // Get transport transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); // If no transport, we auto-abort if ( !transport ) { done( -1, "No Transport" ); } else { jqXHR.readyState = 1; // Send global event if ( fireGlobals ) { globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); } // If request was aborted inside ajaxSend, stop there if ( completed ) { return jqXHR; } // Timeout if ( s.async && s.timeout > 0 ) { timeoutTimer = window.setTimeout( function() { jqXHR.abort( "timeout" ); }, s.timeout ); } try { completed = false; transport.send( requestHeaders, done ); } catch ( e ) { // Rethrow post-completion exceptions if ( completed ) { throw e; } // Propagate others as results done( -1, e ); } } // Callback for when everything is done function done( status, nativeStatusText, responses, headers ) { var isSuccess, success, error, response, modified, statusText = nativeStatusText; // Ignore repeat invocations if ( completed ) { return; } completed = true; // Clear timeout if it exists if ( timeoutTimer ) { window.clearTimeout( timeoutTimer ); } // Dereference transport for early garbage collection // (no matter how long the jqXHR object will be used) transport = undefined; // Cache response headers responseHeadersString = headers || ""; // Set readyState jqXHR.readyState = status > 0 ? 4 : 0; // Determine if successful isSuccess = status >= 200 && status < 300 || status === 304; // Get response data if ( responses ) { response = ajaxHandleResponses( s, jqXHR, responses ); } // Convert no matter what (that way responseXXX fields are always set) response = ajaxConvert( s, response, jqXHR, isSuccess ); // If successful, handle type chaining if ( isSuccess ) { // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. if ( s.ifModified ) { modified = jqXHR.getResponseHeader( "Last-Modified" ); if ( modified ) { jQuery.lastModified[ cacheURL ] = modified; } modified = jqXHR.getResponseHeader( "etag" ); if ( modified ) { jQuery.etag[ cacheURL ] = modified; } } // if no content if ( status === 204 || s.type === "HEAD" ) { statusText = "nocontent"; // if not modified } else if ( status === 304 ) { statusText = "notmodified"; // If we have data, let's convert it } else { statusText = response.state; success = response.data; error = response.error; isSuccess = !error; } } else { // Extract error from statusText and normalize for non-aborts error = statusText; if ( status || !statusText ) { statusText = "error"; if ( status < 0 ) { status = 0; } } } // Set data for the fake xhr object jqXHR.status = status; jqXHR.statusText = ( nativeStatusText || statusText ) + ""; // Success/Error if ( isSuccess ) { deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); } else { deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); } // Status-dependent callbacks jqXHR.statusCode( statusCode ); statusCode = undefined; if ( fireGlobals ) { globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", [ jqXHR, s, isSuccess ? success : error ] ); } // Complete completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); if ( fireGlobals ) { globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); // Handle the global AJAX counter if ( !( --jQuery.active ) ) { jQuery.event.trigger( "ajaxStop" ); } } } return jqXHR; }, getJSON: function( url, data, callback ) { return jQuery.get( url, data, callback, "json" ); }, getScript: function( url, callback ) { return jQuery.get( url, undefined, callback, "script" ); } } ); jQuery.each( [ "get", "post" ], function( i, method ) { jQuery[ method ] = function( url, data, callback, type ) { // Shift arguments if data argument was omitted if ( isFunction( data ) ) { type = type || callback; callback = data; data = undefined; } // The url can be an options object (which then must have .url) return jQuery.ajax( jQuery.extend( { url: url, type: method, dataType: type, data: data, success: callback }, jQuery.isPlainObject( url ) && url ) ); }; } ); jQuery._evalUrl = function( url ) { return jQuery.ajax( { url: url, // Make this explicit, since user can override this through ajaxSetup (#11264) type: "GET", dataType: "script", cache: true, async: false, global: false, "throws": true } ); }; jQuery.fn.extend( { wrapAll: function( html ) { var wrap; if ( this[ 0 ] ) { if ( isFunction( html ) ) { html = html.call( this[ 0 ] ); } // The elements to wrap the target around wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); if ( this[ 0 ].parentNode ) { wrap.insertBefore( this[ 0 ] ); } wrap.map( function() { var elem = this; while ( elem.firstElementChild ) { elem = elem.firstElementChild; } return elem; } ).append( this ); } return this; }, wrapInner: function( html ) { if ( isFunction( html ) ) { return this.each( function( i ) { jQuery( this ).wrapInner( html.call( this, i ) ); } ); } return this.each( function() { var self = jQuery( this ), contents = self.contents(); if ( contents.length ) { contents.wrapAll( html ); } else { self.append( html ); } } ); }, wrap: function( html ) { var htmlIsFunction = isFunction( html ); return this.each( function( i ) { jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); } ); }, unwrap: function( selector ) { this.parent( selector ).not( "body" ).each( function() { jQuery( this ).replaceWith( this.childNodes ); } ); return this; } } ); jQuery.expr.pseudos.hidden = function( elem ) { return !jQuery.expr.pseudos.visible( elem ); }; jQuery.expr.pseudos.visible = function( elem ) { return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); }; jQuery.ajaxSettings.xhr = function() { try { return new window.XMLHttpRequest(); } catch ( e ) {} }; var xhrSuccessStatus = { // File protocol always yields status code 0, assume 200 0: 200, // Support: IE <=9 only // #1450: sometimes IE returns 1223 when it should be 204 1223: 204 }, xhrSupported = jQuery.ajaxSettings.xhr(); support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); support.ajax = xhrSupported = !!xhrSupported; jQuery.ajaxTransport( function( options ) { var callback, errorCallback; // Cross domain only allowed if supported through XMLHttpRequest if ( support.cors || xhrSupported && !options.crossDomain ) { return { send: function( headers, complete ) { var i, xhr = options.xhr(); xhr.open( options.type, options.url, options.async, options.username, options.password ); // Apply custom fields if provided if ( options.xhrFields ) { for ( i in options.xhrFields ) { xhr[ i ] = options.xhrFields[ i ]; } } // Override mime type if needed if ( options.mimeType && xhr.overrideMimeType ) { xhr.overrideMimeType( options.mimeType ); } // X-Requested-With header // For cross-domain requests, seeing as conditions for a preflight are // akin to a jigsaw puzzle, we simply never set it to be sure. // (it can always be set on a per-request basis or even using ajaxSetup) // For same-domain requests, won't change header if already provided. if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { headers[ "X-Requested-With" ] = "XMLHttpRequest"; } // Set headers for ( i in headers ) { xhr.setRequestHeader( i, headers[ i ] ); } // Callback callback = function( type ) { return function() { if ( callback ) { callback = errorCallback = xhr.onload = xhr.onerror = xhr.onabort = xhr.ontimeout = xhr.onreadystatechange = null; if ( type === "abort" ) { xhr.abort(); } else if ( type === "error" ) { // Support: IE <=9 only // On a manual native abort, IE9 throws // errors on any property access that is not readyState if ( typeof xhr.status !== "number" ) { complete( 0, "error" ); } else { complete( // File: protocol always yields status 0; see #8605, #14207 xhr.status, xhr.statusText ); } } else { complete( xhrSuccessStatus[ xhr.status ] || xhr.status, xhr.statusText, // Support: IE <=9 only // IE9 has no XHR2 but throws on binary (trac-11426) // For XHR2 non-text, let the caller handle it (gh-2498) ( xhr.responseType || "text" ) !== "text" || typeof xhr.responseText !== "string" ? { binary: xhr.response } : { text: xhr.responseText }, xhr.getAllResponseHeaders() ); } } }; }; // Listen to events xhr.onload = callback(); errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); // Support: IE 9 only // Use onreadystatechange to replace onabort // to handle uncaught aborts if ( xhr.onabort !== undefined ) { xhr.onabort = errorCallback; } else { xhr.onreadystatechange = function() { // Check readyState before timeout as it changes if ( xhr.readyState === 4 ) { // Allow onerror to be called first, // but that will not handle a native abort // Also, save errorCallback to a variable // as xhr.onerror cannot be accessed window.setTimeout( function() { if ( callback ) { errorCallback(); } } ); } }; } // Create the abort callback callback = callback( "abort" ); try { // Do send the request (this may raise an exception) xhr.send( options.hasContent && options.data || null ); } catch ( e ) { // #14683: Only rethrow if this hasn't been notified as an error yet if ( callback ) { throw e; } } }, abort: function() { if ( callback ) { callback(); } } }; } } ); // Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) jQuery.ajaxPrefilter( function( s ) { if ( s.crossDomain ) { s.contents.script = false; } } ); // Install script dataType jQuery.ajaxSetup( { accepts: { script: "text/javascript, application/javascript, " + "application/ecmascript, application/x-ecmascript" }, contents: { script: /\b(?:java|ecma)script\b/ }, converters: { "text script": function( text ) { jQuery.globalEval( text ); return text; } } } ); // Handle cache's special case and crossDomain jQuery.ajaxPrefilter( "script", function( s ) { if ( s.cache === undefined ) { s.cache = false; } if ( s.crossDomain ) { s.type = "GET"; } } ); // Bind script tag hack transport jQuery.ajaxTransport( "script", function( s ) { // This transport only deals with cross domain requests if ( s.crossDomain ) { var script, callback; return { send: function( _, complete ) { script = jQuery( "<script>" ).prop( { charset: s.scriptCharset, src: s.url } ).on( "load error", callback = function( evt ) { script.remove(); callback = null; if ( evt ) { complete( evt.type === "error" ? 404 : 200, evt.type ); } } ); // Use native DOM manipulation to avoid our domManip AJAX trickery document.head.appendChild( script[ 0 ] ); }, abort: function() { if ( callback ) { callback(); } } }; } } ); var oldCallbacks = [], rjsonp = /(=)\?(?=&|$)|\?\?/; // Default jsonp settings jQuery.ajaxSetup( { jsonp: "callback", jsonpCallback: function() { var callback = oldCallbacks.pop() || ( jQuery.expando + "_" + ( nonce++ ) ); this[ callback ] = true; return callback; } } ); // Detect, normalize options and install callbacks for jsonp requests jQuery.ajaxPrefilter( "json jsonp", function( s, originalSettings, jqXHR ) { var callbackName, overwritten, responseContainer, jsonProp = s.jsonp !== false && ( rjsonp.test( s.url ) ? "url" : typeof s.data === "string" && ( s.contentType || "" ) .indexOf( "application/x-www-form-urlencoded" ) === 0 && rjsonp.test( s.data ) && "data" ); // Handle iff the expected data type is "jsonp" or we have a parameter to set if ( jsonProp || s.dataTypes[ 0 ] === "jsonp" ) { // Get callback name, remembering preexisting value associated with it callbackName = s.jsonpCallback = isFunction( s.jsonpCallback ) ? s.jsonpCallback() : s.jsonpCallback; // Insert callback into url or form data if ( jsonProp ) { s[ jsonProp ] = s[ jsonProp ].replace( rjsonp, "$1" + callbackName ); } else if ( s.jsonp !== false ) { s.url += ( rquery.test( s.url ) ? "&" : "?" ) + s.jsonp + "=" + callbackName; } // Use data converter to retrieve json after script execution s.converters[ "script json" ] = function() { if ( !responseContainer ) { jQuery.error( callbackName + " was not called" ); } return responseContainer[ 0 ]; }; // Force json dataType s.dataTypes[ 0 ] = "json"; // Install callback overwritten = window[ callbackName ]; window[ callbackName ] = function() { responseContainer = arguments; }; // Clean-up function (fires after converters) jqXHR.always( function() { // If previous value didn't exist - remove it if ( overwritten === undefined ) { jQuery( window ).removeProp( callbackName ); // Otherwise restore preexisting value } else { window[ callbackName ] = overwritten; } // Save back as free if ( s[ callbackName ] ) { // Make sure that re-using the options doesn't screw things around s.jsonpCallback = originalSettings.jsonpCallback; // Save the callback name for future use oldCallbacks.push( callbackName ); } // Call if it was a function and we have a response if ( responseContainer && isFunction( overwritten ) ) { overwritten( responseContainer[ 0 ] ); } responseContainer = overwritten = undefined; } ); // Delegate to script return "script"; } } ); // Support: Safari 8 only // In Safari 8 documents created via document.implementation.createHTMLDocument // collapse sibling forms: the second one becomes a child of the first one. // Because of that, this security measure has to be disabled in Safari 8. // https://bugs.webkit.org/show_bug.cgi?id=137337 support.createHTMLDocument = ( function() { var body = document.implementation.createHTMLDocument( "" ).body; body.innerHTML = "<form></form><form></form>"; return body.childNodes.length === 2; } )(); // Argument "data" should be string of html // context (optional): If specified, the fragment will be created in this context, // defaults to document // keepScripts (optional): If true, will include scripts passed in the html string jQuery.parseHTML = function( data, context, keepScripts ) { if ( typeof data !== "string" ) { return []; } if ( typeof context === "boolean" ) { keepScripts = context; context = false; } var base, parsed, scripts; if ( !context ) { // Stop scripts or inline event handlers from being executed immediately // by using document.implementation if ( support.createHTMLDocument ) { context = document.implementation.createHTMLDocument( "" ); // Set the base href for the created document // so any parsed elements with URLs // are based on the document's URL (gh-2965) base = context.createElement( "base" ); base.href = document.location.href; context.head.appendChild( base ); } else { context = document; } } parsed = rsingleTag.exec( data ); scripts = !keepScripts && []; // Single tag if ( parsed ) { return [ context.createElement( parsed[ 1 ] ) ]; } parsed = buildFragment( [ data ], context, scripts ); if ( scripts && scripts.length ) { jQuery( scripts ).remove(); } return jQuery.merge( [], parsed.childNodes ); }; /** * Load a url into a page */ jQuery.fn.load = function( url, params, callback ) { var selector, type, response, self = this, off = url.indexOf( " " ); if ( off > -1 ) { selector = stripAndCollapse( url.slice( off ) ); url = url.slice( 0, off ); } // If it's a function if ( isFunction( params ) ) { // We assume that it's the callback callback = params; params = undefined; // Otherwise, build a param string } else if ( params && typeof params === "object" ) { type = "POST"; } // If we have elements to modify, make the request if ( self.length > 0 ) { jQuery.ajax( { url: url, // If "type" variable is undefined, then "GET" method will be used. // Make value of this field explicit since // user can override it through ajaxSetup method type: type || "GET", dataType: "html", data: params } ).done( function( responseText ) { // Save response for use in complete callback response = arguments; self.html( selector ? // If a selector was specified, locate the right elements in a dummy div // Exclude scripts to avoid IE 'Permission Denied' errors jQuery( "<div>" ).append( jQuery.parseHTML( responseText ) ).find( selector ) : // Otherwise use the full result responseText ); // If the request succeeds, this function gets "data", "status", "jqXHR" // but they are ignored because response was set above. // If it fails, this function gets "jqXHR", "status", "error" } ).always( callback && function( jqXHR, status ) { self.each( function() { callback.apply( this, response || [ jqXHR.responseText, status, jqXHR ] ); } ); } ); } return this; }; // Attach a bunch of functions for handling common AJAX events jQuery.each( [ "ajaxStart", "ajaxStop", "ajaxComplete", "ajaxError", "ajaxSuccess", "ajaxSend" ], function( i, type ) { jQuery.fn[ type ] = function( fn ) { return this.on( type, fn ); }; } ); jQuery.expr.pseudos.animated = function( elem ) { return jQuery.grep( jQuery.timers, function( fn ) { return elem === fn.elem; } ).length; }; jQuery.offset = { setOffset: function( elem, options, i ) { var curPosition, curLeft, curCSSTop, curTop, curOffset, curCSSLeft, calculatePosition, position = jQuery.css( elem, "position" ), curElem = jQuery( elem ), props = {}; // Set position first, in-case top/left are set even on static elem if ( position === "static" ) { elem.style.position = "relative"; } curOffset = curElem.offset(); curCSSTop = jQuery.css( elem, "top" ); curCSSLeft = jQuery.css( elem, "left" ); calculatePosition = ( position === "absolute" || position === "fixed" ) && ( curCSSTop + curCSSLeft ).indexOf( "auto" ) > -1; // Need to be able to calculate position if either // top or left is auto and position is either absolute or fixed if ( calculatePosition ) { curPosition = curElem.position(); curTop = curPosition.top; curLeft = curPosition.left; } else { curTop = parseFloat( curCSSTop ) || 0; curLeft = parseFloat( curCSSLeft ) || 0; } if ( isFunction( options ) ) { // Use jQuery.extend here to allow modification of coordinates argument (gh-1848) options = options.call( elem, i, jQuery.extend( {}, curOffset ) ); } if ( options.top != null ) { props.top = ( options.top - curOffset.top ) + curTop; } if ( options.left != null ) { props.left = ( options.left - curOffset.left ) + curLeft; } if ( "using" in options ) { options.using.call( elem, props ); } else { curElem.css( props ); } } }; jQuery.fn.extend( { // offset() relates an element's border box to the document origin offset: function( options ) { // Preserve chaining for setter if ( arguments.length ) { return options === undefined ? this : this.each( function( i ) { jQuery.offset.setOffset( this, options, i ); } ); } var rect, win, elem = this[ 0 ]; if ( !elem ) { return; } // Return zeros for disconnected and hidden (display: none) elements (gh-2310) // Support: IE <=11 only // Running getBoundingClientRect on a // disconnected node in IE throws an error if ( !elem.getClientRects().length ) { return { top: 0, left: 0 }; } // Get document-relative position by adding viewport scroll to viewport-relative gBCR rect = elem.getBoundingClientRect(); win = elem.ownerDocument.defaultView; return { top: rect.top + win.pageYOffset, left: rect.left + win.pageXOffset }; }, // position() relates an element's margin box to its offset parent's padding box // This corresponds to the behavior of CSS absolute positioning position: function() { if ( !this[ 0 ] ) { return; } var offsetParent, offset, doc, elem = this[ 0 ], parentOffset = { top: 0, left: 0 }; // position:fixed elements are offset from the viewport, which itself always has zero offset if ( jQuery.css( elem, "position" ) === "fixed" ) { // Assume position:fixed implies availability of getBoundingClientRect offset = elem.getBoundingClientRect(); } else { offset = this.offset(); // Account for the *real* offset parent, which can be the document or its root element // when a statically positioned element is identified doc = elem.ownerDocument; offsetParent = elem.offsetParent || doc.documentElement; while ( offsetParent && ( offsetParent === doc.body || offsetParent === doc.documentElement ) && jQuery.css( offsetParent, "position" ) === "static" ) { offsetParent = offsetParent.parentNode; } if ( offsetParent && offsetParent !== elem && offsetParent.nodeType === 1 ) { // Incorporate borders into its offset, since they are outside its content origin parentOffset = jQuery( offsetParent ).offset(); parentOffset.top += jQuery.css( offsetParent, "borderTopWidth", true ); parentOffset.left += jQuery.css( offsetParent, "borderLeftWidth", true ); } } // Subtract parent offsets and element margins return { top: offset.top - parentOffset.top - jQuery.css( elem, "marginTop", true ), left: offset.left - parentOffset.left - jQuery.css( elem, "marginLeft", true ) }; }, // This method will return documentElement in the following cases: // 1) For the element inside the iframe without offsetParent, this method will return // documentElement of the parent window // 2) For the hidden or detached element // 3) For body or html element, i.e. in case of the html node - it will return itself // // but those exceptions were never presented as a real life use-cases // and might be considered as more preferable results. // // This logic, however, is not guaranteed and can change at any point in the future offsetParent: function() { return this.map( function() { var offsetParent = this.offsetParent; while ( offsetParent && jQuery.css( offsetParent, "position" ) === "static" ) { offsetParent = offsetParent.offsetParent; } return offsetParent || documentElement; } ); } } ); // Create scrollLeft and scrollTop methods jQuery.each( { scrollLeft: "pageXOffset", scrollTop: "pageYOffset" }, function( method, prop ) { var top = "pageYOffset" === prop; jQuery.fn[ method ] = function( val ) { return access( this, function( elem, method, val ) { // Coalesce documents and windows var win; if ( isWindow( elem ) ) { win = elem; } else if ( elem.nodeType === 9 ) { win = elem.defaultView; } if ( val === undefined ) { return win ? win[ prop ] : elem[ method ]; } if ( win ) { win.scrollTo( !top ? val : win.pageXOffset, top ? val : win.pageYOffset ); } else { elem[ method ] = val; } }, method, val, arguments.length ); }; } ); // Support: Safari <=7 - 9.1, Chrome <=37 - 49 // Add the top/left cssHooks using jQuery.fn.position // Webkit bug: https://bugs.webkit.org/show_bug.cgi?id=29084 // Blink bug: https://bugs.chromium.org/p/chromium/issues/detail?id=589347 // getComputedStyle returns percent when specified for top/left/bottom/right; // rather than make the css module depend on the offset module, just check for it here jQuery.each( [ "top", "left" ], function( i, prop ) { jQuery.cssHooks[ prop ] = addGetHookIf( support.pixelPosition, function( elem, computed ) { if ( computed ) { computed = curCSS( elem, prop ); // If curCSS returns percentage, fallback to offset return rnumnonpx.test( computed ) ? jQuery( elem ).position()[ prop ] + "px" : computed; } } ); } ); // Create innerHeight, innerWidth, height, width, outerHeight and outerWidth methods jQuery.each( { Height: "height", Width: "width" }, function( name, type ) { jQuery.each( { padding: "inner" + name, content: type, "": "outer" + name }, function( defaultExtra, funcName ) { // Margin is only for outerHeight, outerWidth jQuery.fn[ funcName ] = function( margin, value ) { var chainable = arguments.length && ( defaultExtra || typeof margin !== "boolean" ), extra = defaultExtra || ( margin === true || value === true ? "margin" : "border" ); return access( this, function( elem, type, value ) { var doc; if ( isWindow( elem ) ) { // $( window ).outerWidth/Height return w/h including scrollbars (gh-1729) return funcName.indexOf( "outer" ) === 0 ? elem[ "inner" + name ] : elem.document.documentElement[ "client" + name ]; } // Get document width or height if ( elem.nodeType === 9 ) { doc = elem.documentElement; // Either scroll[Width/Height] or offset[Width/Height] or client[Width/Height], // whichever is greatest return Math.max( elem.body[ "scroll" + name ], doc[ "scroll" + name ], elem.body[ "offset" + name ], doc[ "offset" + name ], doc[ "client" + name ] ); } return value === undefined ? // Get width or height on the element, requesting but not forcing parseFloat jQuery.css( elem, type, extra ) : // Set width or height on the element jQuery.style( elem, type, value, extra ); }, type, chainable ? margin : undefined, chainable ); }; } ); } ); jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + "change select submit keydown keypress keyup contextmenu" ).split( " " ), function( i, name ) { // Handle event binding jQuery.fn[ name ] = function( data, fn ) { return arguments.length > 0 ? this.on( name, null, data, fn ) : this.trigger( name ); }; } ); jQuery.fn.extend( { hover: function( fnOver, fnOut ) { return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); } } ); jQuery.fn.extend( { bind: function( types, data, fn ) { return this.on( types, null, data, fn ); }, unbind: function( types, fn ) { return this.off( types, null, fn ); }, delegate: function( selector, types, data, fn ) { return this.on( types, selector, data, fn ); }, undelegate: function( selector, types, fn ) { // ( namespace ) or ( selector, types [, fn] ) return arguments.length === 1 ? this.off( selector, "**" ) : this.off( types, selector || "**", fn ); } } ); // Bind a function to a context, optionally partially applying any // arguments. // jQuery.proxy is deprecated to promote standards (specifically Function#bind) // However, it is not slated for removal any time soon jQuery.proxy = function( fn, context ) { var tmp, args, proxy; if ( typeof context === "string" ) { tmp = fn[ context ]; context = fn; fn = tmp; } // Quick check to determine if target is callable, in the spec // this throws a TypeError, but we will just return undefined. if ( !isFunction( fn ) ) { return undefined; } // Simulated bind args = slice.call( arguments, 2 ); proxy = function() { return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); }; // Set the guid of unique handler to the same of original handler, so it can be removed proxy.guid = fn.guid = fn.guid || jQuery.guid++; return proxy; }; jQuery.holdReady = function( hold ) { if ( hold ) { jQuery.readyWait++; } else { jQuery.ready( true ); } }; jQuery.isArray = Array.isArray; jQuery.parseJSON = JSON.parse; jQuery.nodeName = nodeName; jQuery.isFunction = isFunction; jQuery.isWindow = isWindow; jQuery.camelCase = camelCase; jQuery.type = toType; jQuery.now = Date.now; jQuery.isNumeric = function( obj ) { // As of jQuery 3.0, isNumeric is limited to // strings and numbers (primitives or objects) // that can be coerced to finite numbers (gh-2662) var type = jQuery.type( obj ); return ( type === "number" || type === "string" ) && // parseFloat NaNs numeric-cast false positives ("") // ...but misinterprets leading-number strings, particularly hex literals ("0x...") // subtraction forces infinities to NaN !isNaN( obj - parseFloat( obj ) ); }; // Register as a named AMD module, since jQuery can be concatenated with other // files that may use define, but not via a proper concatenation script that // understands anonymous AMD modules. A named AMD is safest and most robust // way to register. Lowercase jquery is used because AMD module names are // derived from file names, and jQuery is normally delivered in a lowercase // file name. Do this after creating the global so that if an AMD module wants // to call noConflict to hide this version of jQuery, it will work. // Note that for maximum portability, libraries that are not jQuery should // declare themselves as anonymous modules, and avoid setting a global if an // AMD loader is present. jQuery is a special case. For more information, see // https://github.com/jrburke/requirejs/wiki/Updating-existing-libraries#wiki-anon if ( true ) { !(__WEBPACK_AMD_DEFINE_ARRAY__ = [], __WEBPACK_AMD_DEFINE_RESULT__ = (function() { return jQuery; }).apply(exports, __WEBPACK_AMD_DEFINE_ARRAY__), __WEBPACK_AMD_DEFINE_RESULT__ !== undefined && (module.exports = __WEBPACK_AMD_DEFINE_RESULT__)); } var // Map over jQuery in case of overwrite _jQuery = window.jQuery, // Map over the $ in case of overwrite _$ = window.$; jQuery.noConflict = function( deep ) { if ( window.$ === jQuery ) { window.$ = _$; } if ( deep && window.jQuery === jQuery ) { window.jQuery = _jQuery; } return jQuery; }; // Expose jQuery and $ identifiers, even in AMD // (#7102#comment:10, https://github.com/jquery/jquery/pull/557) // and CommonJS for browser emulators (#13566) if ( !noGlobal ) { window.jQuery = window.$ = jQuery; } return jQuery; } ); /***/ }), /* 1 */ /***/ (function(module, __webpack_exports__, __webpack_require__) { "use strict"; Object.defineProperty(__webpack_exports__, "__esModule", { value: true }); /* WEBPACK VAR INJECTION */(function(global) {/**! * @fileOverview Kickass library to create and place poppers near their reference elements. * @version 1.14.4 * @license * Copyright (c) 2016 Federico Zivolo and contributors * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ var isBrowser = typeof window !== 'undefined' && typeof document !== 'undefined'; var longerTimeoutBrowsers = ['Edge', 'Trident', 'Firefox']; var timeoutDuration = 0; for (var i = 0; i < longerTimeoutBrowsers.length; i += 1) { if (isBrowser && navigator.userAgent.indexOf(longerTimeoutBrowsers[i]) >= 0) { timeoutDuration = 1; break; } } function microtaskDebounce(fn) { var called = false; return function () { if (called) { return; } called = true; window.Promise.resolve().then(function () { called = false; fn(); }); }; } function taskDebounce(fn) { var scheduled = false; return function () { if (!scheduled) { scheduled = true; setTimeout(function () { scheduled = false; fn(); }, timeoutDuration); } }; } var supportsMicroTasks = isBrowser && window.Promise; /** * Create a debounced version of a method, that's asynchronously deferred * but called in the minimum time possible. * * @method * @memberof Popper.Utils * @argument {Function} fn * @returns {Function} */ var debounce = supportsMicroTasks ? microtaskDebounce : taskDebounce; /** * Check if the given variable is a function * @method * @memberof Popper.Utils * @argument {Any} functionToCheck - variable to check * @returns {Boolean} answer to: is a function? */ function isFunction(functionToCheck) { var getType = {}; return functionToCheck && getType.toString.call(functionToCheck) === '[object Function]'; } /** * Get CSS computed property of the given element * @method * @memberof Popper.Utils * @argument {Eement} element * @argument {String} property */ function getStyleComputedProperty(element, property) { if (element.nodeType !== 1) { return []; } // NOTE: 1 DOM access here var css = getComputedStyle(element, null); return property ? css[property] : css; } /** * Returns the parentNode or the host of the element * @method * @memberof Popper.Utils * @argument {Element} element * @returns {Element} parent */ function getParentNode(element) { if (element.nodeName === 'HTML') { return element; } return element.parentNode || element.host; } /** * Returns the scrolling parent of the given element * @method * @memberof Popper.Utils * @argument {Element} element * @returns {Element} scroll parent */ function getScrollParent(element) { // Return body, `getScroll` will take care to get the correct `scrollTop` from it if (!element) { return document.body; } switch (element.nodeName) { case 'HTML': case 'BODY': return element.ownerDocument.body; case '#document': return element.body; } // Firefox want us to check `-x` and `-y` variations as well var _getStyleComputedProp = getStyleComputedProperty(element), overflow = _getStyleComputedProp.overflow, overflowX = _getStyleComputedProp.overflowX, overflowY = _getStyleComputedProp.overflowY; if (/(auto|scroll|overlay)/.test(overflow + overflowY + overflowX)) { return element; } return getScrollParent(getParentNode(element)); } var isIE11 = isBrowser && !!(window.MSInputMethodContext && document.documentMode); var isIE10 = isBrowser && /MSIE 10/.test(navigator.userAgent); /** * Determines if the browser is Internet Explorer * @method * @memberof Popper.Utils * @param {Number} version to check * @returns {Boolean} isIE */ function isIE(version) { if (version === 11) { return isIE11; } if (version === 10) { return isIE10; } return isIE11 || isIE10; } /** * Returns the offset parent of the given element * @method * @memberof Popper.Utils * @argument {Element} element * @returns {Element} offset parent */ function getOffsetParent(element) { if (!element) { return document.documentElement; } var noOffsetParent = isIE(10) ? document.body : null; // NOTE: 1 DOM access here var offsetParent = element.offsetParent; // Skip hidden elements which don't have an offsetParent while (offsetParent === noOffsetParent && element.nextElementSibling) { offsetParent = (element = element.nextElementSibling).offsetParent; } var nodeName = offsetParent && offsetParent.nodeName; if (!nodeName || nodeName === 'BODY' || nodeName === 'HTML') { return element ? element.ownerDocument.documentElement : document.documentElement; } // .offsetParent will return the closest TD or TABLE in case // no offsetParent is present, I hate this job... if (['TD', 'TABLE'].indexOf(offsetParent.nodeName) !== -1 && getStyleComputedProperty(offsetParent, 'position') === 'static') { return getOffsetParent(offsetParent); } return offsetParent; } function isOffsetContainer(element) { var nodeName = element.nodeName; if (nodeName === 'BODY') { return false; } return nodeName === 'HTML' || getOffsetParent(element.firstElementChild) === element; } /** * Finds the root node (document, shadowDOM root) of the given element * @method * @memberof Popper.Utils * @argument {Element} node * @returns {Element} root node */ function getRoot(node) { if (node.parentNode !== null) { return getRoot(node.parentNode); } return node; } /** * Finds the offset parent common to the two provided nodes * @method * @memberof Popper.Utils * @argument {Element} element1 * @argument {Element} element2 * @returns {Element} common offset parent */ function findCommonOffsetParent(element1, element2) { // This check is needed to avoid errors in case one of the elements isn't defined for any reason if (!element1 || !element1.nodeType || !element2 || !element2.nodeType) { return document.documentElement; } // Here we make sure to give as "start" the element that comes first in the DOM var order = element1.compareDocumentPosition(element2) & Node.DOCUMENT_POSITION_FOLLOWING; var start = order ? element1 : element2; var end = order ? element2 : element1; // Get common ancestor container var range = document.createRange(); range.setStart(start, 0); range.setEnd(end, 0); var commonAncestorContainer = range.commonAncestorContainer; // Both nodes are inside #document if (element1 !== commonAncestorContainer && element2 !== commonAncestorContainer || start.contains(end)) { if (isOffsetContainer(commonAncestorContainer)) { return commonAncestorContainer; } return getOffsetParent(commonAncestorContainer); } // one of the nodes is inside shadowDOM, find which one var element1root = getRoot(element1); if (element1root.host) { return findCommonOffsetParent(element1root.host, element2); } else { return findCommonOffsetParent(element1, getRoot(element2).host); } } /** * Gets the scroll value of the given element in the given side (top and left) * @method * @memberof Popper.Utils * @argument {Element} element * @argument {String} side `top` or `left` * @returns {number} amount of scrolled pixels */ function getScroll(element) { var side = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'top'; var upperSide = side === 'top' ? 'scrollTop' : 'scrollLeft'; var nodeName = element.nodeName; if (nodeName === 'BODY' || nodeName === 'HTML') { var html = element.ownerDocument.documentElement; var scrollingElement = element.ownerDocument.scrollingElement || html; return scrollingElement[upperSide]; } return element[upperSide]; } /* * Sum or subtract the element scroll values (left and top) from a given rect object * @method * @memberof Popper.Utils * @param {Object} rect - Rect object you want to change * @param {HTMLElement} element - The element from the function reads the scroll values * @param {Boolean} subtract - set to true if you want to subtract the scroll values * @return {Object} rect - The modifier rect object */ function includeScroll(rect, element) { var subtract = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : false; var scrollTop = getScroll(element, 'top'); var scrollLeft = getScroll(element, 'left'); var modifier = subtract ? -1 : 1; rect.top += scrollTop * modifier; rect.bottom += scrollTop * modifier; rect.left += scrollLeft * modifier; rect.right += scrollLeft * modifier; return rect; } /* * Helper to detect borders of a given element * @method * @memberof Popper.Utils * @param {CSSStyleDeclaration} styles * Result of `getStyleComputedProperty` on the given element * @param {String} axis - `x` or `y` * @return {number} borders - The borders size of the given axis */ function getBordersSize(styles, axis) { var sideA = axis === 'x' ? 'Left' : 'Top'; var sideB = sideA === 'Left' ? 'Right' : 'Bottom'; return parseFloat(styles['border' + sideA + 'Width'], 10) + parseFloat(styles['border' + sideB + 'Width'], 10); } function getSize(axis, body, html, computedStyle) { return Math.max(body['offset' + axis], body['scroll' + axis], html['client' + axis], html['offset' + axis], html['scroll' + axis], isIE(10) ? parseInt(html['offset' + axis]) + parseInt(computedStyle['margin' + (axis === 'Height' ? 'Top' : 'Left')]) + parseInt(computedStyle['margin' + (axis === 'Height' ? 'Bottom' : 'Right')]) : 0); } function getWindowSizes(document) { var body = document.body; var html = document.documentElement; var computedStyle = isIE(10) && getComputedStyle(html); return { height: getSize('Height', body, html, computedStyle), width: getSize('Width', body, html, computedStyle) }; } var classCallCheck = function (instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }; var createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); var defineProperty = function (obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }; var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; /** * Given element offsets, generate an output similar to getBoundingClientRect * @method * @memberof Popper.Utils * @argument {Object} offsets * @returns {Object} ClientRect like output */ function getClientRect(offsets) { return _extends({}, offsets, { right: offsets.left + offsets.width, bottom: offsets.top + offsets.height }); } /** * Get bounding client rect of given element * @method * @memberof Popper.Utils * @param {HTMLElement} element * @return {Object} client rect */ function getBoundingClientRect(element) { var rect = {}; // IE10 10 FIX: Please, don't ask, the element isn't // considered in DOM in some circumstances... // This isn't reproducible in IE10 compatibility mode of IE11 try { if (isIE(10)) { rect = element.getBoundingClientRect(); var scrollTop = getScroll(element, 'top'); var scrollLeft = getScroll(element, 'left'); rect.top += scrollTop; rect.left += scrollLeft; rect.bottom += scrollTop; rect.right += scrollLeft; } else { rect = element.getBoundingClientRect(); } } catch (e) {} var result = { left: rect.left, top: rect.top, width: rect.right - rect.left, height: rect.bottom - rect.top }; // subtract scrollbar size from sizes var sizes = element.nodeName === 'HTML' ? getWindowSizes(element.ownerDocument) : {}; var width = sizes.width || element.clientWidth || result.right - result.left; var height = sizes.height || element.clientHeight || result.bottom - result.top; var horizScrollbar = element.offsetWidth - width; var vertScrollbar = element.offsetHeight - height; // if an hypothetical scrollbar is detected, we must be sure it's not a `border` // we make this check conditional for performance reasons if (horizScrollbar || vertScrollbar) { var styles = getStyleComputedProperty(element); horizScrollbar -= getBordersSize(styles, 'x'); vertScrollbar -= getBordersSize(styles, 'y'); result.width -= horizScrollbar; result.height -= vertScrollbar; } return getClientRect(result); } function getOffsetRectRelativeToArbitraryNode(children, parent) { var fixedPosition = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : false; var isIE10 = isIE(10); var isHTML = parent.nodeName === 'HTML'; var childrenRect = getBoundingClientRect(children); var parentRect = getBoundingClientRect(parent); var scrollParent = getScrollParent(children); var styles = getStyleComputedProperty(parent); var borderTopWidth = parseFloat(styles.borderTopWidth, 10); var borderLeftWidth = parseFloat(styles.borderLeftWidth, 10); // In cases where the parent is fixed, we must ignore negative scroll in offset calc if (fixedPosition && isHTML) { parentRect.top = Math.max(parentRect.top, 0); parentRect.left = Math.max(parentRect.left, 0); } var offsets = getClientRect({ top: childrenRect.top - parentRect.top - borderTopWidth, left: childrenRect.left - parentRect.left - borderLeftWidth, width: childrenRect.width, height: childrenRect.height }); offsets.marginTop = 0; offsets.marginLeft = 0; // Subtract margins of documentElement in case it's being used as parent // we do this only on HTML because it's the only element that behaves // differently when margins are applied to it. The margins are included in // the box of the documentElement, in the other cases not. if (!isIE10 && isHTML) { var marginTop = parseFloat(styles.marginTop, 10); var marginLeft = parseFloat(styles.marginLeft, 10); offsets.top -= borderTopWidth - marginTop; offsets.bottom -= borderTopWidth - marginTop; offsets.left -= borderLeftWidth - marginLeft; offsets.right -= borderLeftWidth - marginLeft; // Attach marginTop and marginLeft because in some circumstances we may need them offsets.marginTop = marginTop; offsets.marginLeft = marginLeft; } if (isIE10 && !fixedPosition ? parent.contains(scrollParent) : parent === scrollParent && scrollParent.nodeName !== 'BODY') { offsets = includeScroll(offsets, parent); } return offsets; } function getViewportOffsetRectRelativeToArtbitraryNode(element) { var excludeScroll = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false; var html = element.ownerDocument.documentElement; var relativeOffset = getOffsetRectRelativeToArbitraryNode(element, html); var width = Math.max(html.clientWidth, window.innerWidth || 0); var height = Math.max(html.clientHeight, window.innerHeight || 0); var scrollTop = !excludeScroll ? getScroll(html) : 0; var scrollLeft = !excludeScroll ? getScroll(html, 'left') : 0; var offset = { top: scrollTop - relativeOffset.top + relativeOffset.marginTop, left: scrollLeft - relativeOffset.left + relativeOffset.marginLeft, width: width, height: height }; return getClientRect(offset); } /** * Check if the given element is fixed or is inside a fixed parent * @method * @memberof Popper.Utils * @argument {Element} element * @argument {Element} customContainer * @returns {Boolean} answer to "isFixed?" */ function isFixed(element) { var nodeName = element.nodeName; if (nodeName === 'BODY' || nodeName === 'HTML') { return false; } if (getStyleComputedProperty(element, 'position') === 'fixed') { return true; } return isFixed(getParentNode(element)); } /** * Finds the first parent of an element that has a transformed property defined * @method * @memberof Popper.Utils * @argument {Element} element * @returns {Element} first transformed parent or documentElement */ function getFixedPositionOffsetParent(element) { // This check is needed to avoid errors in case one of the elements isn't defined for any reason if (!element || !element.parentElement || isIE()) { return document.documentElement; } var el = element.parentElement; while (el && getStyleComputedProperty(el, 'transform') === 'none') { el = el.parentElement; } return el || document.documentElement; } /** * Computed the boundaries limits and return them * @method * @memberof Popper.Utils * @param {HTMLElement} popper * @param {HTMLElement} reference * @param {number} padding * @param {HTMLElement} boundariesElement - Element used to define the boundaries * @param {Boolean} fixedPosition - Is in fixed position mode * @returns {Object} Coordinates of the boundaries */ function getBoundaries(popper, reference, padding, boundariesElement) { var fixedPosition = arguments.length > 4 && arguments[4] !== undefined ? arguments[4] : false; // NOTE: 1 DOM access here var boundaries = { top: 0, left: 0 }; var offsetParent = fixedPosition ? getFixedPositionOffsetParent(popper) : findCommonOffsetParent(popper, reference); // Handle viewport case if (boundariesElement === 'viewport') { boundaries = getViewportOffsetRectRelativeToArtbitraryNode(offsetParent, fixedPosition); } else { // Handle other cases based on DOM element used as boundaries var boundariesNode = void 0; if (boundariesElement === 'scrollParent') { boundariesNode = getScrollParent(getParentNode(reference)); if (boundariesNode.nodeName === 'BODY') { boundariesNode = popper.ownerDocument.documentElement; } } else if (boundariesElement === 'window') { boundariesNode = popper.ownerDocument.documentElement; } else { boundariesNode = boundariesElement; } var offsets = getOffsetRectRelativeToArbitraryNode(boundariesNode, offsetParent, fixedPosition); // In case of HTML, we need a different computation if (boundariesNode.nodeName === 'HTML' && !isFixed(offsetParent)) { var _getWindowSizes = getWindowSizes(popper.ownerDocument), height = _getWindowSizes.height, width = _getWindowSizes.width; boundaries.top += offsets.top - offsets.marginTop; boundaries.bottom = height + offsets.top; boundaries.left += offsets.left - offsets.marginLeft; boundaries.right = width + offsets.left; } else { // for all the other DOM elements, this one is good boundaries = offsets; } } // Add paddings padding = padding || 0; var isPaddingNumber = typeof padding === 'number'; boundaries.left += isPaddingNumber ? padding : padding.left || 0; boundaries.top += isPaddingNumber ? padding : padding.top || 0; boundaries.right -= isPaddingNumber ? padding : padding.right || 0; boundaries.bottom -= isPaddingNumber ? padding : padding.bottom || 0; return boundaries; } function getArea(_ref) { var width = _ref.width, height = _ref.height; return width * height; } /** * Utility used to transform the `auto` placement to the placement with more * available space. * @method * @memberof Popper.Utils * @argument {Object} data - The data object generated by update method * @argument {Object} options - Modifiers configuration and options * @returns {Object} The data object, properly modified */ function computeAutoPlacement(placement, refRect, popper, reference, boundariesElement) { var padding = arguments.length > 5 && arguments[5] !== undefined ? arguments[5] : 0; if (placement.indexOf('auto') === -1) { return placement; } var boundaries = getBoundaries(popper, reference, padding, boundariesElement); var rects = { top: { width: boundaries.width, height: refRect.top - boundaries.top }, right: { width: boundaries.right - refRect.right, height: boundaries.height }, bottom: { width: boundaries.width, height: boundaries.bottom - refRect.bottom }, left: { width: refRect.left - boundaries.left, height: boundaries.height } }; var sortedAreas = Object.keys(rects).map(function (key) { return _extends({ key: key }, rects[key], { area: getArea(rects[key]) }); }).sort(function (a, b) { return b.area - a.area; }); var filteredAreas = sortedAreas.filter(function (_ref2) { var width = _ref2.width, height = _ref2.height; return width >= popper.clientWidth && height >= popper.clientHeight; }); var computedPlacement = filteredAreas.length > 0 ? filteredAreas[0].key : sortedAreas[0].key; var variation = placement.split('-')[1]; return computedPlacement + (variation ? '-' + variation : ''); } /** * Get offsets to the reference element * @method * @memberof Popper.Utils * @param {Object} state * @param {Element} popper - the popper element * @param {Element} reference - the reference element (the popper will be relative to this) * @param {Element} fixedPosition - is in fixed position mode * @returns {Object} An object containing the offsets which will be applied to the popper */ function getReferenceOffsets(state, popper, reference) { var fixedPosition = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : null; var commonOffsetParent = fixedPosition ? getFixedPositionOffsetParent(popper) : findCommonOffsetParent(popper, reference); return getOffsetRectRelativeToArbitraryNode(reference, commonOffsetParent, fixedPosition); } /** * Get the outer sizes of the given element (offset size + margins) * @method * @memberof Popper.Utils * @argument {Element} element * @returns {Object} object containing width and height properties */ function getOuterSizes(element) { var styles = getComputedStyle(element); var x = parseFloat(styles.marginTop) + parseFloat(styles.marginBottom); var y = parseFloat(styles.marginLeft) + parseFloat(styles.marginRight); var result = { width: element.offsetWidth + y, height: element.offsetHeight + x }; return result; } /** * Get the opposite placement of the given one * @method * @memberof Popper.Utils * @argument {String} placement * @returns {String} flipped placement */ function getOppositePlacement(placement) { var hash = { left: 'right', right: 'left', bottom: 'top', top: 'bottom' }; return placement.replace(/left|right|bottom|top/g, function (matched) { return hash[matched]; }); } /** * Get offsets to the popper * @method * @memberof Popper.Utils * @param {Object} position - CSS position the Popper will get applied * @param {HTMLElement} popper - the popper element * @param {Object} referenceOffsets - the reference offsets (the popper will be relative to this) * @param {String} placement - one of the valid placement options * @returns {Object} popperOffsets - An object containing the offsets which will be applied to the popper */ function getPopperOffsets(popper, referenceOffsets, placement) { placement = placement.split('-')[0]; // Get popper node sizes var popperRect = getOuterSizes(popper); // Add position, width and height to our offsets object var popperOffsets = { width: popperRect.width, height: popperRect.height }; // depending by the popper placement we have to compute its offsets slightly differently var isHoriz = ['right', 'left'].indexOf(placement) !== -1; var mainSide = isHoriz ? 'top' : 'left'; var secondarySide = isHoriz ? 'left' : 'top'; var measurement = isHoriz ? 'height' : 'width'; var secondaryMeasurement = !isHoriz ? 'height' : 'width'; popperOffsets[mainSide] = referenceOffsets[mainSide] + referenceOffsets[measurement] / 2 - popperRect[measurement] / 2; if (placement === secondarySide) { popperOffsets[secondarySide] = referenceOffsets[secondarySide] - popperRect[secondaryMeasurement]; } else { popperOffsets[secondarySide] = referenceOffsets[getOppositePlacement(secondarySide)]; } return popperOffsets; } /** * Mimics the `find` method of Array * @method * @memberof Popper.Utils * @argument {Array} arr * @argument prop * @argument value * @returns index or -1 */ function find(arr, check) { // use native find if supported if (Array.prototype.find) { return arr.find(check); } // use `filter` to obtain the same behavior of `find` return arr.filter(check)[0]; } /** * Return the index of the matching object * @method * @memberof Popper.Utils * @argument {Array} arr * @argument prop * @argument value * @returns index or -1 */ function findIndex(arr, prop, value) { // use native findIndex if supported if (Array.prototype.findIndex) { return arr.findIndex(function (cur) { return cur[prop] === value; }); } // use `find` + `indexOf` if `findIndex` isn't supported var match = find(arr, function (obj) { return obj[prop] === value; }); return arr.indexOf(match); } /** * Loop trough the list of modifiers and run them in order, * each of them will then edit the data object. * @method * @memberof Popper.Utils * @param {dataObject} data * @param {Array} modifiers * @param {String} ends - Optional modifier name used as stopper * @returns {dataObject} */ function runModifiers(modifiers, data, ends) { var modifiersToRun = ends === undefined ? modifiers : modifiers.slice(0, findIndex(modifiers, 'name', ends)); modifiersToRun.forEach(function (modifier) { if (modifier['function']) { // eslint-disable-line dot-notation console.warn('`modifier.function` is deprecated, use `modifier.fn`!'); } var fn = modifier['function'] || modifier.fn; // eslint-disable-line dot-notation if (modifier.enabled && isFunction(fn)) { // Add properties to offsets to make them a complete clientRect object // we do this before each modifier to make sure the previous one doesn't // mess with these values data.offsets.popper = getClientRect(data.offsets.popper); data.offsets.reference = getClientRect(data.offsets.reference); data = fn(data, modifier); } }); return data; } /** * Updates the position of the popper, computing the new offsets and applying * the new style.<br /> * Prefer `scheduleUpdate` over `update` because of performance reasons. * @method * @memberof Popper */ function update() { // if popper is destroyed, don't perform any further update if (this.state.isDestroyed) { return; } var data = { instance: this, styles: {}, arrowStyles: {}, attributes: {}, flipped: false, offsets: {} }; // compute reference element offsets data.offsets.reference = getReferenceOffsets(this.state, this.popper, this.reference, this.options.positionFixed); // compute auto placement, store placement inside the data object, // modifiers will be able to edit `placement` if needed // and refer to originalPlacement to know the original value data.placement = computeAutoPlacement(this.options.placement, data.offsets.reference, this.popper, this.reference, this.options.modifiers.flip.boundariesElement, this.options.modifiers.flip.padding); // store the computed placement inside `originalPlacement` data.originalPlacement = data.placement; data.positionFixed = this.options.positionFixed; // compute the popper offsets data.offsets.popper = getPopperOffsets(this.popper, data.offsets.reference, data.placement); data.offsets.popper.position = this.options.positionFixed ? 'fixed' : 'absolute'; // run the modifiers data = runModifiers(this.modifiers, data); // the first `update` will call `onCreate` callback // the other ones will call `onUpdate` callback if (!this.state.isCreated) { this.state.isCreated = true; this.options.onCreate(data); } else { this.options.onUpdate(data); } } /** * Helper used to know if the given modifier is enabled. * @method * @memberof Popper.Utils * @returns {Boolean} */ function isModifierEnabled(modifiers, modifierName) { return modifiers.some(function (_ref) { var name = _ref.name, enabled = _ref.enabled; return enabled && name === modifierName; }); } /** * Get the prefixed supported property name * @method * @memberof Popper.Utils * @argument {String} property (camelCase) * @returns {String} prefixed property (camelCase or PascalCase, depending on the vendor prefix) */ function getSupportedPropertyName(property) { var prefixes = [false, 'ms', 'Webkit', 'Moz', 'O']; var upperProp = property.charAt(0).toUpperCase() + property.slice(1); for (var i = 0; i < prefixes.length; i++) { var prefix = prefixes[i]; var toCheck = prefix ? '' + prefix + upperProp : property; if (typeof document.body.style[toCheck] !== 'undefined') { return toCheck; } } return null; } /** * Destroys the popper. * @method * @memberof Popper */ function destroy() { this.state.isDestroyed = true; // touch DOM only if `applyStyle` modifier is enabled if (isModifierEnabled(this.modifiers, 'applyStyle')) { this.popper.removeAttribute('x-placement'); this.popper.style.position = ''; this.popper.style.top = ''; this.popper.style.left = ''; this.popper.style.right = ''; this.popper.style.bottom = ''; this.popper.style.willChange = ''; this.popper.style[getSupportedPropertyName('transform')] = ''; } this.disableEventListeners(); // remove the popper if user explicity asked for the deletion on destroy // do not use `remove` because IE11 doesn't support it if (this.options.removeOnDestroy) { this.popper.parentNode.removeChild(this.popper); } return this; } /** * Get the window associated with the element * @argument {Element} element * @returns {Window} */ function getWindow(element) { var ownerDocument = element.ownerDocument; return ownerDocument ? ownerDocument.defaultView : window; } function attachToScrollParents(scrollParent, event, callback, scrollParents) { var isBody = scrollParent.nodeName === 'BODY'; var target = isBody ? scrollParent.ownerDocument.defaultView : scrollParent; target.addEventListener(event, callback, { passive: true }); if (!isBody) { attachToScrollParents(getScrollParent(target.parentNode), event, callback, scrollParents); } scrollParents.push(target); } /** * Setup needed event listeners used to update the popper position * @method * @memberof Popper.Utils * @private */ function setupEventListeners(reference, options, state, updateBound) { // Resize event listener on window state.updateBound = updateBound; getWindow(reference).addEventListener('resize', state.updateBound, { passive: true }); // Scroll event listener on scroll parents var scrollElement = getScrollParent(reference); attachToScrollParents(scrollElement, 'scroll', state.updateBound, state.scrollParents); state.scrollElement = scrollElement; state.eventsEnabled = true; return state; } /** * It will add resize/scroll events and start recalculating * position of the popper element when they are triggered. * @method * @memberof Popper */ function enableEventListeners() { if (!this.state.eventsEnabled) { this.state = setupEventListeners(this.reference, this.options, this.state, this.scheduleUpdate); } } /** * Remove event listeners used to update the popper position * @method * @memberof Popper.Utils * @private */ function removeEventListeners(reference, state) { // Remove resize event listener on window getWindow(reference).removeEventListener('resize', state.updateBound); // Remove scroll event listener on scroll parents state.scrollParents.forEach(function (target) { target.removeEventListener('scroll', state.updateBound); }); // Reset state state.updateBound = null; state.scrollParents = []; state.scrollElement = null; state.eventsEnabled = false; return state; } /** * It will remove resize/scroll events and won't recalculate popper position * when they are triggered. It also won't trigger `onUpdate` callback anymore, * unless you call `update` method manually. * @method * @memberof Popper */ function disableEventListeners() { if (this.state.eventsEnabled) { cancelAnimationFrame(this.scheduleUpdate); this.state = removeEventListeners(this.reference, this.state); } } /** * Tells if a given input is a number * @method * @memberof Popper.Utils * @param {*} input to check * @return {Boolean} */ function isNumeric(n) { return n !== '' && !isNaN(parseFloat(n)) && isFinite(n); } /** * Set the style to the given popper * @method * @memberof Popper.Utils * @argument {Element} element - Element to apply the style to * @argument {Object} styles * Object with a list of properties and values which will be applied to the element */ function setStyles(element, styles) { Object.keys(styles).forEach(function (prop) { var unit = ''; // add unit if the value is numeric and is one of the following if (['width', 'height', 'top', 'right', 'bottom', 'left'].indexOf(prop) !== -1 && isNumeric(styles[prop])) { unit = 'px'; } element.style[prop] = styles[prop] + unit; }); } /** * Set the attributes to the given popper * @method * @memberof Popper.Utils * @argument {Element} element - Element to apply the attributes to * @argument {Object} styles * Object with a list of properties and values which will be applied to the element */ function setAttributes(element, attributes) { Object.keys(attributes).forEach(function (prop) { var value = attributes[prop]; if (value !== false) { element.setAttribute(prop, attributes[prop]); } else { element.removeAttribute(prop); } }); } /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by `update` method * @argument {Object} data.styles - List of style properties - values to apply to popper element * @argument {Object} data.attributes - List of attribute properties - values to apply to popper element * @argument {Object} options - Modifiers configuration and options * @returns {Object} The same data object */ function applyStyle(data) { // any property present in `data.styles` will be applied to the popper, // in this way we can make the 3rd party modifiers add custom styles to it // Be aware, modifiers could override the properties defined in the previous // lines of this modifier! setStyles(data.instance.popper, data.styles); // any property present in `data.attributes` will be applied to the popper, // they will be set as HTML attributes of the element setAttributes(data.instance.popper, data.attributes); // if arrowElement is defined and arrowStyles has some properties if (data.arrowElement && Object.keys(data.arrowStyles).length) { setStyles(data.arrowElement, data.arrowStyles); } return data; } /** * Set the x-placement attribute before everything else because it could be used * to add margins to the popper margins needs to be calculated to get the * correct popper offsets. * @method * @memberof Popper.modifiers * @param {HTMLElement} reference - The reference element used to position the popper * @param {HTMLElement} popper - The HTML element used as popper * @param {Object} options - Popper.js options */ function applyStyleOnLoad(reference, popper, options, modifierOptions, state) { // compute reference element offsets var referenceOffsets = getReferenceOffsets(state, popper, reference, options.positionFixed); // compute auto placement, store placement inside the data object, // modifiers will be able to edit `placement` if needed // and refer to originalPlacement to know the original value var placement = computeAutoPlacement(options.placement, referenceOffsets, popper, reference, options.modifiers.flip.boundariesElement, options.modifiers.flip.padding); popper.setAttribute('x-placement', placement); // Apply `position` to popper before anything else because // without the position applied we can't guarantee correct computations setStyles(popper, { position: options.positionFixed ? 'fixed' : 'absolute' }); return options; } /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by `update` method * @argument {Object} options - Modifiers configuration and options * @returns {Object} The data object, properly modified */ function computeStyle(data, options) { var x = options.x, y = options.y; var popper = data.offsets.popper; // Remove this legacy support in Popper.js v2 var legacyGpuAccelerationOption = find(data.instance.modifiers, function (modifier) { return modifier.name === 'applyStyle'; }).gpuAcceleration; if (legacyGpuAccelerationOption !== undefined) { console.warn('WARNING: `gpuAcceleration` option moved to `computeStyle` modifier and will not be supported in future versions of Popper.js!'); } var gpuAcceleration = legacyGpuAccelerationOption !== undefined ? legacyGpuAccelerationOption : options.gpuAcceleration; var offsetParent = getOffsetParent(data.instance.popper); var offsetParentRect = getBoundingClientRect(offsetParent); // Styles var styles = { position: popper.position }; // Avoid blurry text by using full pixel integers. // For pixel-perfect positioning, top/bottom prefers rounded // values, while left/right prefers floored values. var offsets = { left: Math.floor(popper.left), top: Math.round(popper.top), bottom: Math.round(popper.bottom), right: Math.floor(popper.right) }; var sideA = x === 'bottom' ? 'top' : 'bottom'; var sideB = y === 'right' ? 'left' : 'right'; // if gpuAcceleration is set to `true` and transform is supported, // we use `translate3d` to apply the position to the popper we // automatically use the supported prefixed version if needed var prefixedProperty = getSupportedPropertyName('transform'); // now, let's make a step back and look at this code closely (wtf?) // If the content of the popper grows once it's been positioned, it // may happen that the popper gets misplaced because of the new content // overflowing its reference element // To avoid this problem, we provide two options (x and y), which allow // the consumer to define the offset origin. // If we position a popper on top of a reference element, we can set // `x` to `top` to make the popper grow towards its top instead of // its bottom. var left = void 0, top = void 0; if (sideA === 'bottom') { // when offsetParent is <html> the positioning is relative to the bottom of the screen (excluding the scrollbar) // and not the bottom of the html element if (offsetParent.nodeName === 'HTML') { top = -offsetParent.clientHeight + offsets.bottom; } else { top = -offsetParentRect.height + offsets.bottom; } } else { top = offsets.top; } if (sideB === 'right') { if (offsetParent.nodeName === 'HTML') { left = -offsetParent.clientWidth + offsets.right; } else { left = -offsetParentRect.width + offsets.right; } } else { left = offsets.left; } if (gpuAcceleration && prefixedProperty) { styles[prefixedProperty] = 'translate3d(' + left + 'px, ' + top + 'px, 0)'; styles[sideA] = 0; styles[sideB] = 0; styles.willChange = 'transform'; } else { // othwerise, we use the standard `top`, `left`, `bottom` and `right` properties var invertTop = sideA === 'bottom' ? -1 : 1; var invertLeft = sideB === 'right' ? -1 : 1; styles[sideA] = top * invertTop; styles[sideB] = left * invertLeft; styles.willChange = sideA + ', ' + sideB; } // Attributes var attributes = { 'x-placement': data.placement }; // Update `data` attributes, styles and arrowStyles data.attributes = _extends({}, attributes, data.attributes); data.styles = _extends({}, styles, data.styles); data.arrowStyles = _extends({}, data.offsets.arrow, data.arrowStyles); return data; } /** * Helper used to know if the given modifier depends from another one.<br /> * It checks if the needed modifier is listed and enabled. * @method * @memberof Popper.Utils * @param {Array} modifiers - list of modifiers * @param {String} requestingName - name of requesting modifier * @param {String} requestedName - name of requested modifier * @returns {Boolean} */ function isModifierRequired(modifiers, requestingName, requestedName) { var requesting = find(modifiers, function (_ref) { var name = _ref.name; return name === requestingName; }); var isRequired = !!requesting && modifiers.some(function (modifier) { return modifier.name === requestedName && modifier.enabled && modifier.order < requesting.order; }); if (!isRequired) { var _requesting = '`' + requestingName + '`'; var requested = '`' + requestedName + '`'; console.warn(requested + ' modifier is required by ' + _requesting + ' modifier in order to work, be sure to include it before ' + _requesting + '!'); } return isRequired; } /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by update method * @argument {Object} options - Modifiers configuration and options * @returns {Object} The data object, properly modified */ function arrow(data, options) { var _data$offsets$arrow; // arrow depends on keepTogether in order to work if (!isModifierRequired(data.instance.modifiers, 'arrow', 'keepTogether')) { return data; } var arrowElement = options.element; // if arrowElement is a string, suppose it's a CSS selector if (typeof arrowElement === 'string') { arrowElement = data.instance.popper.querySelector(arrowElement); // if arrowElement is not found, don't run the modifier if (!arrowElement) { return data; } } else { // if the arrowElement isn't a query selector we must check that the // provided DOM node is child of its popper node if (!data.instance.popper.contains(arrowElement)) { console.warn('WARNING: `arrow.element` must be child of its popper element!'); return data; } } var placement = data.placement.split('-')[0]; var _data$offsets = data.offsets, popper = _data$offsets.popper, reference = _data$offsets.reference; var isVertical = ['left', 'right'].indexOf(placement) !== -1; var len = isVertical ? 'height' : 'width'; var sideCapitalized = isVertical ? 'Top' : 'Left'; var side = sideCapitalized.toLowerCase(); var altSide = isVertical ? 'left' : 'top'; var opSide = isVertical ? 'bottom' : 'right'; var arrowElementSize = getOuterSizes(arrowElement)[len]; // // extends keepTogether behavior making sure the popper and its // reference have enough pixels in conjunction // // top/left side if (reference[opSide] - arrowElementSize < popper[side]) { data.offsets.popper[side] -= popper[side] - (reference[opSide] - arrowElementSize); } // bottom/right side if (reference[side] + arrowElementSize > popper[opSide]) { data.offsets.popper[side] += reference[side] + arrowElementSize - popper[opSide]; } data.offsets.popper = getClientRect(data.offsets.popper); // compute center of the popper var center = reference[side] + reference[len] / 2 - arrowElementSize / 2; // Compute the sideValue using the updated popper offsets // take popper margin in account because we don't have this info available var css = getStyleComputedProperty(data.instance.popper); var popperMarginSide = parseFloat(css['margin' + sideCapitalized], 10); var popperBorderSide = parseFloat(css['border' + sideCapitalized + 'Width'], 10); var sideValue = center - data.offsets.popper[side] - popperMarginSide - popperBorderSide; // prevent arrowElement from being placed not contiguously to its popper sideValue = Math.max(Math.min(popper[len] - arrowElementSize, sideValue), 0); data.arrowElement = arrowElement; data.offsets.arrow = (_data$offsets$arrow = {}, defineProperty(_data$offsets$arrow, side, Math.round(sideValue)), defineProperty(_data$offsets$arrow, altSide, ''), _data$offsets$arrow); return data; } /** * Get the opposite placement variation of the given one * @method * @memberof Popper.Utils * @argument {String} placement variation * @returns {String} flipped placement variation */ function getOppositeVariation(variation) { if (variation === 'end') { return 'start'; } else if (variation === 'start') { return 'end'; } return variation; } /** * List of accepted placements to use as values of the `placement` option.<br /> * Valid placements are: * - `auto` * - `top` * - `right` * - `bottom` * - `left` * * Each placement can have a variation from this list: * - `-start` * - `-end` * * Variations are interpreted easily if you think of them as the left to right * written languages. Horizontally (`top` and `bottom`), `start` is left and `end` * is right.<br /> * Vertically (`left` and `right`), `start` is top and `end` is bottom. * * Some valid examples are: * - `top-end` (on top of reference, right aligned) * - `right-start` (on right of reference, top aligned) * - `bottom` (on bottom, centered) * - `auto-end` (on the side with more space available, alignment depends by placement) * * @static * @type {Array} * @enum {String} * @readonly * @method placements * @memberof Popper */ var placements = ['auto-start', 'auto', 'auto-end', 'top-start', 'top', 'top-end', 'right-start', 'right', 'right-end', 'bottom-end', 'bottom', 'bottom-start', 'left-end', 'left', 'left-start']; // Get rid of `auto` `auto-start` and `auto-end` var validPlacements = placements.slice(3); /** * Given an initial placement, returns all the subsequent placements * clockwise (or counter-clockwise). * * @method * @memberof Popper.Utils * @argument {String} placement - A valid placement (it accepts variations) * @argument {Boolean} counter - Set to true to walk the placements counterclockwise * @returns {Array} placements including their variations */ function clockwise(placement) { var counter = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false; var index = validPlacements.indexOf(placement); var arr = validPlacements.slice(index + 1).concat(validPlacements.slice(0, index)); return counter ? arr.reverse() : arr; } var BEHAVIORS = { FLIP: 'flip', CLOCKWISE: 'clockwise', COUNTERCLOCKWISE: 'counterclockwise' }; /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by update method * @argument {Object} options - Modifiers configuration and options * @returns {Object} The data object, properly modified */ function flip(data, options) { // if `inner` modifier is enabled, we can't use the `flip` modifier if (isModifierEnabled(data.instance.modifiers, 'inner')) { return data; } if (data.flipped && data.placement === data.originalPlacement) { // seems like flip is trying to loop, probably there's not enough space on any of the flippable sides return data; } var boundaries = getBoundaries(data.instance.popper, data.instance.reference, options.padding, options.boundariesElement, data.positionFixed); var placement = data.placement.split('-')[0]; var placementOpposite = getOppositePlacement(placement); var variation = data.placement.split('-')[1] || ''; var flipOrder = []; switch (options.behavior) { case BEHAVIORS.FLIP: flipOrder = [placement, placementOpposite]; break; case BEHAVIORS.CLOCKWISE: flipOrder = clockwise(placement); break; case BEHAVIORS.COUNTERCLOCKWISE: flipOrder = clockwise(placement, true); break; default: flipOrder = options.behavior; } flipOrder.forEach(function (step, index) { if (placement !== step || flipOrder.length === index + 1) { return data; } placement = data.placement.split('-')[0]; placementOpposite = getOppositePlacement(placement); var popperOffsets = data.offsets.popper; var refOffsets = data.offsets.reference; // using floor because the reference offsets may contain decimals we are not going to consider here var floor = Math.floor; var overlapsRef = placement === 'left' && floor(popperOffsets.right) > floor(refOffsets.left) || placement === 'right' && floor(popperOffsets.left) < floor(refOffsets.right) || placement === 'top' && floor(popperOffsets.bottom) > floor(refOffsets.top) || placement === 'bottom' && floor(popperOffsets.top) < floor(refOffsets.bottom); var overflowsLeft = floor(popperOffsets.left) < floor(boundaries.left); var overflowsRight = floor(popperOffsets.right) > floor(boundaries.right); var overflowsTop = floor(popperOffsets.top) < floor(boundaries.top); var overflowsBottom = floor(popperOffsets.bottom) > floor(boundaries.bottom); var overflowsBoundaries = placement === 'left' && overflowsLeft || placement === 'right' && overflowsRight || placement === 'top' && overflowsTop || placement === 'bottom' && overflowsBottom; // flip the variation if required var isVertical = ['top', 'bottom'].indexOf(placement) !== -1; var flippedVariation = !!options.flipVariations && (isVertical && variation === 'start' && overflowsLeft || isVertical && variation === 'end' && overflowsRight || !isVertical && variation === 'start' && overflowsTop || !isVertical && variation === 'end' && overflowsBottom); if (overlapsRef || overflowsBoundaries || flippedVariation) { // this boolean to detect any flip loop data.flipped = true; if (overlapsRef || overflowsBoundaries) { placement = flipOrder[index + 1]; } if (flippedVariation) { variation = getOppositeVariation(variation); } data.placement = placement + (variation ? '-' + variation : ''); // this object contains `position`, we want to preserve it along with // any additional property we may add in the future data.offsets.popper = _extends({}, data.offsets.popper, getPopperOffsets(data.instance.popper, data.offsets.reference, data.placement)); data = runModifiers(data.instance.modifiers, data, 'flip'); } }); return data; } /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by update method * @argument {Object} options - Modifiers configuration and options * @returns {Object} The data object, properly modified */ function keepTogether(data) { var _data$offsets = data.offsets, popper = _data$offsets.popper, reference = _data$offsets.reference; var placement = data.placement.split('-')[0]; var floor = Math.floor; var isVertical = ['top', 'bottom'].indexOf(placement) !== -1; var side = isVertical ? 'right' : 'bottom'; var opSide = isVertical ? 'left' : 'top'; var measurement = isVertical ? 'width' : 'height'; if (popper[side] < floor(reference[opSide])) { data.offsets.popper[opSide] = floor(reference[opSide]) - popper[measurement]; } if (popper[opSide] > floor(reference[side])) { data.offsets.popper[opSide] = floor(reference[side]); } return data; } /** * Converts a string containing value + unit into a px value number * @function * @memberof {modifiers~offset} * @private * @argument {String} str - Value + unit string * @argument {String} measurement - `height` or `width` * @argument {Object} popperOffsets * @argument {Object} referenceOffsets * @returns {Number|String} * Value in pixels, or original string if no values were extracted */ function toValue(str, measurement, popperOffsets, referenceOffsets) { // separate value from unit var split = str.match(/((?:\-|\+)?\d*\.?\d*)(.*)/); var value = +split[1]; var unit = split[2]; // If it's not a number it's an operator, I guess if (!value) { return str; } if (unit.indexOf('%') === 0) { var element = void 0; switch (unit) { case '%p': element = popperOffsets; break; case '%': case '%r': default: element = referenceOffsets; } var rect = getClientRect(element); return rect[measurement] / 100 * value; } else if (unit === 'vh' || unit === 'vw') { // if is a vh or vw, we calculate the size based on the viewport var size = void 0; if (unit === 'vh') { size = Math.max(document.documentElement.clientHeight, window.innerHeight || 0); } else { size = Math.max(document.documentElement.clientWidth, window.innerWidth || 0); } return size / 100 * value; } else { // if is an explicit pixel unit, we get rid of the unit and keep the value // if is an implicit unit, it's px, and we return just the value return value; } } /** * Parse an `offset` string to extrapolate `x` and `y` numeric offsets. * @function * @memberof {modifiers~offset} * @private * @argument {String} offset * @argument {Object} popperOffsets * @argument {Object} referenceOffsets * @argument {String} basePlacement * @returns {Array} a two cells array with x and y offsets in numbers */ function parseOffset(offset, popperOffsets, referenceOffsets, basePlacement) { var offsets = [0, 0]; // Use height if placement is left or right and index is 0 otherwise use width // in this way the first offset will use an axis and the second one // will use the other one var useHeight = ['right', 'left'].indexOf(basePlacement) !== -1; // Split the offset string to obtain a list of values and operands // The regex addresses values with the plus or minus sign in front (+10, -20, etc) var fragments = offset.split(/(\+|\-)/).map(function (frag) { return frag.trim(); }); // Detect if the offset string contains a pair of values or a single one // they could be separated by comma or space var divider = fragments.indexOf(find(fragments, function (frag) { return frag.search(/,|\s/) !== -1; })); if (fragments[divider] && fragments[divider].indexOf(',') === -1) { console.warn('Offsets separated by white space(s) are deprecated, use a comma (,) instead.'); } // If divider is found, we divide the list of values and operands to divide // them by ofset X and Y. var splitRegex = /\s*,\s*|\s+/; var ops = divider !== -1 ? [fragments.slice(0, divider).concat([fragments[divider].split(splitRegex)[0]]), [fragments[divider].split(splitRegex)[1]].concat(fragments.slice(divider + 1))] : [fragments]; // Convert the values with units to absolute pixels to allow our computations ops = ops.map(function (op, index) { // Most of the units rely on the orientation of the popper var measurement = (index === 1 ? !useHeight : useHeight) ? 'height' : 'width'; var mergeWithPrevious = false; return op // This aggregates any `+` or `-` sign that aren't considered operators // e.g.: 10 + +5 => [10, +, +5] .reduce(function (a, b) { if (a[a.length - 1] === '' && ['+', '-'].indexOf(b) !== -1) { a[a.length - 1] = b; mergeWithPrevious = true; return a; } else if (mergeWithPrevious) { a[a.length - 1] += b; mergeWithPrevious = false; return a; } else { return a.concat(b); } }, []) // Here we convert the string values into number values (in px) .map(function (str) { return toValue(str, measurement, popperOffsets, referenceOffsets); }); }); // Loop trough the offsets arrays and execute the operations ops.forEach(function (op, index) { op.forEach(function (frag, index2) { if (isNumeric(frag)) { offsets[index] += frag * (op[index2 - 1] === '-' ? -1 : 1); } }); }); return offsets; } /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by update method * @argument {Object} options - Modifiers configuration and options * @argument {Number|String} options.offset=0 * The offset value as described in the modifier description * @returns {Object} The data object, properly modified */ function offset(data, _ref) { var offset = _ref.offset; var placement = data.placement, _data$offsets = data.offsets, popper = _data$offsets.popper, reference = _data$offsets.reference; var basePlacement = placement.split('-')[0]; var offsets = void 0; if (isNumeric(+offset)) { offsets = [+offset, 0]; } else { offsets = parseOffset(offset, popper, reference, basePlacement); } if (basePlacement === 'left') { popper.top += offsets[0]; popper.left -= offsets[1]; } else if (basePlacement === 'right') { popper.top += offsets[0]; popper.left += offsets[1]; } else if (basePlacement === 'top') { popper.left += offsets[0]; popper.top -= offsets[1]; } else if (basePlacement === 'bottom') { popper.left += offsets[0]; popper.top += offsets[1]; } data.popper = popper; return data; } /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by `update` method * @argument {Object} options - Modifiers configuration and options * @returns {Object} The data object, properly modified */ function preventOverflow(data, options) { var boundariesElement = options.boundariesElement || getOffsetParent(data.instance.popper); // If offsetParent is the reference element, we really want to // go one step up and use the next offsetParent as reference to // avoid to make this modifier completely useless and look like broken if (data.instance.reference === boundariesElement) { boundariesElement = getOffsetParent(boundariesElement); } // NOTE: DOM access here // resets the popper's position so that the document size can be calculated excluding // the size of the popper element itself var transformProp = getSupportedPropertyName('transform'); var popperStyles = data.instance.popper.style; // assignment to help minification var top = popperStyles.top, left = popperStyles.left, transform = popperStyles[transformProp]; popperStyles.top = ''; popperStyles.left = ''; popperStyles[transformProp] = ''; var boundaries = getBoundaries(data.instance.popper, data.instance.reference, options.padding, boundariesElement, data.positionFixed); // NOTE: DOM access here // restores the original style properties after the offsets have been computed popperStyles.top = top; popperStyles.left = left; popperStyles[transformProp] = transform; options.boundaries = boundaries; var order = options.priority; var popper = data.offsets.popper; var check = { primary: function primary(placement) { var value = popper[placement]; if (popper[placement] < boundaries[placement] && !options.escapeWithReference) { value = Math.max(popper[placement], boundaries[placement]); } return defineProperty({}, placement, value); }, secondary: function secondary(placement) { var mainSide = placement === 'right' ? 'left' : 'top'; var value = popper[mainSide]; if (popper[placement] > boundaries[placement] && !options.escapeWithReference) { value = Math.min(popper[mainSide], boundaries[placement] - (placement === 'right' ? popper.width : popper.height)); } return defineProperty({}, mainSide, value); } }; order.forEach(function (placement) { var side = ['left', 'top'].indexOf(placement) !== -1 ? 'primary' : 'secondary'; popper = _extends({}, popper, check[side](placement)); }); data.offsets.popper = popper; return data; } /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by `update` method * @argument {Object} options - Modifiers configuration and options * @returns {Object} The data object, properly modified */ function shift(data) { var placement = data.placement; var basePlacement = placement.split('-')[0]; var shiftvariation = placement.split('-')[1]; // if shift shiftvariation is specified, run the modifier if (shiftvariation) { var _data$offsets = data.offsets, reference = _data$offsets.reference, popper = _data$offsets.popper; var isVertical = ['bottom', 'top'].indexOf(basePlacement) !== -1; var side = isVertical ? 'left' : 'top'; var measurement = isVertical ? 'width' : 'height'; var shiftOffsets = { start: defineProperty({}, side, reference[side]), end: defineProperty({}, side, reference[side] + reference[measurement] - popper[measurement]) }; data.offsets.popper = _extends({}, popper, shiftOffsets[shiftvariation]); } return data; } /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by update method * @argument {Object} options - Modifiers configuration and options * @returns {Object} The data object, properly modified */ function hide(data) { if (!isModifierRequired(data.instance.modifiers, 'hide', 'preventOverflow')) { return data; } var refRect = data.offsets.reference; var bound = find(data.instance.modifiers, function (modifier) { return modifier.name === 'preventOverflow'; }).boundaries; if (refRect.bottom < bound.top || refRect.left > bound.right || refRect.top > bound.bottom || refRect.right < bound.left) { // Avoid unnecessary DOM access if visibility hasn't changed if (data.hide === true) { return data; } data.hide = true; data.attributes['x-out-of-boundaries'] = ''; } else { // Avoid unnecessary DOM access if visibility hasn't changed if (data.hide === false) { return data; } data.hide = false; data.attributes['x-out-of-boundaries'] = false; } return data; } /** * @function * @memberof Modifiers * @argument {Object} data - The data object generated by `update` method * @argument {Object} options - Modifiers configuration and options * @returns {Object} The data object, properly modified */ function inner(data) { var placement = data.placement; var basePlacement = placement.split('-')[0]; var _data$offsets = data.offsets, popper = _data$offsets.popper, reference = _data$offsets.reference; var isHoriz = ['left', 'right'].indexOf(basePlacement) !== -1; var subtractLength = ['top', 'left'].indexOf(basePlacement) === -1; popper[isHoriz ? 'left' : 'top'] = reference[basePlacement] - (subtractLength ? popper[isHoriz ? 'width' : 'height'] : 0); data.placement = getOppositePlacement(placement); data.offsets.popper = getClientRect(popper); return data; } /** * Modifier function, each modifier can have a function of this type assigned * to its `fn` property.<br /> * These functions will be called on each update, this means that you must * make sure they are performant enough to avoid performance bottlenecks. * * @function ModifierFn * @argument {dataObject} data - The data object generated by `update` method * @argument {Object} options - Modifiers configuration and options * @returns {dataObject} The data object, properly modified */ /** * Modifiers are plugins used to alter the behavior of your poppers.<br /> * Popper.js uses a set of 9 modifiers to provide all the basic functionalities * needed by the library. * * Usually you don't want to override the `order`, `fn` and `onLoad` props. * All the other properties are configurations that could be tweaked. * @namespace modifiers */ var modifiers = { /** * Modifier used to shift the popper on the start or end of its reference * element.<br /> * It will read the variation of the `placement` property.<br /> * It can be one either `-end` or `-start`. * @memberof modifiers * @inner */ shift: { /** @prop {number} order=100 - Index used to define the order of execution */ order: 100, /** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */ enabled: true, /** @prop {ModifierFn} */ fn: shift }, /** * The `offset` modifier can shift your popper on both its axis. * * It accepts the following units: * - `px` or unit-less, interpreted as pixels * - `%` or `%r`, percentage relative to the length of the reference element * - `%p`, percentage relative to the length of the popper element * - `vw`, CSS viewport width unit * - `vh`, CSS viewport height unit * * For length is intended the main axis relative to the placement of the popper.<br /> * This means that if the placement is `top` or `bottom`, the length will be the * `width`. In case of `left` or `right`, it will be the `height`. * * You can provide a single value (as `Number` or `String`), or a pair of values * as `String` divided by a comma or one (or more) white spaces.<br /> * The latter is a deprecated method because it leads to confusion and will be * removed in v2.<br /> * Additionally, it accepts additions and subtractions between different units. * Note that multiplications and divisions aren't supported. * * Valid examples are: * ``` * 10 * '10%' * '10, 10' * '10%, 10' * '10 + 10%' * '10 - 5vh + 3%' * '-10px + 5vh, 5px - 6%' * ``` * > **NB**: If you desire to apply offsets to your poppers in a way that may make them overlap * > with their reference element, unfortunately, you will have to disable the `flip` modifier. * > You can read more on this at this [issue](https://github.com/FezVrasta/popper.js/issues/373). * * @memberof modifiers * @inner */ offset: { /** @prop {number} order=200 - Index used to define the order of execution */ order: 200, /** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */ enabled: true, /** @prop {ModifierFn} */ fn: offset, /** @prop {Number|String} offset=0 * The offset value as described in the modifier description */ offset: 0 }, /** * Modifier used to prevent the popper from being positioned outside the boundary. * * A scenario exists where the reference itself is not within the boundaries.<br /> * We can say it has "escaped the boundaries" — or just "escaped".<br /> * In this case we need to decide whether the popper should either: * * - detach from the reference and remain "trapped" in the boundaries, or * - if it should ignore the boundary and "escape with its reference" * * When `escapeWithReference` is set to`true` and reference is completely * outside its boundaries, the popper will overflow (or completely leave) * the boundaries in order to remain attached to the edge of the reference. * * @memberof modifiers * @inner */ preventOverflow: { /** @prop {number} order=300 - Index used to define the order of execution */ order: 300, /** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */ enabled: true, /** @prop {ModifierFn} */ fn: preventOverflow, /** * @prop {Array} [priority=['left','right','top','bottom']] * Popper will try to prevent overflow following these priorities by default, * then, it could overflow on the left and on top of the `boundariesElement` */ priority: ['left', 'right', 'top', 'bottom'], /** * @prop {number} padding=5 * Amount of pixel used to define a minimum distance between the boundaries * and the popper. This makes sure the popper always has a little padding * between the edges of its container */ padding: 5, /** * @prop {String|HTMLElement} boundariesElement='scrollParent' * Boundaries used by the modifier. Can be `scrollParent`, `window`, * `viewport` or any DOM element. */ boundariesElement: 'scrollParent' }, /** * Modifier used to make sure the reference and its popper stay near each other * without leaving any gap between the two. Especially useful when the arrow is * enabled and you want to ensure that it points to its reference element. * It cares only about the first axis. You can still have poppers with margin * between the popper and its reference element. * @memberof modifiers * @inner */ keepTogether: { /** @prop {number} order=400 - Index used to define the order of execution */ order: 400, /** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */ enabled: true, /** @prop {ModifierFn} */ fn: keepTogether }, /** * This modifier is used to move the `arrowElement` of the popper to make * sure it is positioned between the reference element and its popper element. * It will read the outer size of the `arrowElement` node to detect how many * pixels of conjunction are needed. * * It has no effect if no `arrowElement` is provided. * @memberof modifiers * @inner */ arrow: { /** @prop {number} order=500 - Index used to define the order of execution */ order: 500, /** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */ enabled: true, /** @prop {ModifierFn} */ fn: arrow, /** @prop {String|HTMLElement} element='[x-arrow]' - Selector or node used as arrow */ element: '[x-arrow]' }, /** * Modifier used to flip the popper's placement when it starts to overlap its * reference element. * * Requires the `preventOverflow` modifier before it in order to work. * * **NOTE:** this modifier will interrupt the current update cycle and will * restart it if it detects the need to flip the placement. * @memberof modifiers * @inner */ flip: { /** @prop {number} order=600 - Index used to define the order of execution */ order: 600, /** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */ enabled: true, /** @prop {ModifierFn} */ fn: flip, /** * @prop {String|Array} behavior='flip' * The behavior used to change the popper's placement. It can be one of * `flip`, `clockwise`, `counterclockwise` or an array with a list of valid * placements (with optional variations) */ behavior: 'flip', /** * @prop {number} padding=5 * The popper will flip if it hits the edges of the `boundariesElement` */ padding: 5, /** * @prop {String|HTMLElement} boundariesElement='viewport' * The element which will define the boundaries of the popper position. * The popper will never be placed outside of the defined boundaries * (except if `keepTogether` is enabled) */ boundariesElement: 'viewport' }, /** * Modifier used to make the popper flow toward the inner of the reference element. * By default, when this modifier is disabled, the popper will be placed outside * the reference element. * @memberof modifiers * @inner */ inner: { /** @prop {number} order=700 - Index used to define the order of execution */ order: 700, /** @prop {Boolean} enabled=false - Whether the modifier is enabled or not */ enabled: false, /** @prop {ModifierFn} */ fn: inner }, /** * Modifier used to hide the popper when its reference element is outside of the * popper boundaries. It will set a `x-out-of-boundaries` attribute which can * be used to hide with a CSS selector the popper when its reference is * out of boundaries. * * Requires the `preventOverflow` modifier before it in order to work. * @memberof modifiers * @inner */ hide: { /** @prop {number} order=800 - Index used to define the order of execution */ order: 800, /** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */ enabled: true, /** @prop {ModifierFn} */ fn: hide }, /** * Computes the style that will be applied to the popper element to gets * properly positioned. * * Note that this modifier will not touch the DOM, it just prepares the styles * so that `applyStyle` modifier can apply it. This separation is useful * in case you need to replace `applyStyle` with a custom implementation. * * This modifier has `850` as `order` value to maintain backward compatibility * with previous versions of Popper.js. Expect the modifiers ordering method * to change in future major versions of the library. * * @memberof modifiers * @inner */ computeStyle: { /** @prop {number} order=850 - Index used to define the order of execution */ order: 850, /** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */ enabled: true, /** @prop {ModifierFn} */ fn: computeStyle, /** * @prop {Boolean} gpuAcceleration=true * If true, it uses the CSS 3D transformation to position the popper. * Otherwise, it will use the `top` and `left` properties */ gpuAcceleration: true, /** * @prop {string} [x='bottom'] * Where to anchor the X axis (`bottom` or `top`). AKA X offset origin. * Change this if your popper should grow in a direction different from `bottom` */ x: 'bottom', /** * @prop {string} [x='left'] * Where to anchor the Y axis (`left` or `right`). AKA Y offset origin. * Change this if your popper should grow in a direction different from `right` */ y: 'right' }, /** * Applies the computed styles to the popper element. * * All the DOM manipulations are limited to this modifier. This is useful in case * you want to integrate Popper.js inside a framework or view library and you * want to delegate all the DOM manipulations to it. * * Note that if you disable this modifier, you must make sure the popper element * has its position set to `absolute` before Popper.js can do its work! * * Just disable this modifier and define your own to achieve the desired effect. * * @memberof modifiers * @inner */ applyStyle: { /** @prop {number} order=900 - Index used to define the order of execution */ order: 900, /** @prop {Boolean} enabled=true - Whether the modifier is enabled or not */ enabled: true, /** @prop {ModifierFn} */ fn: applyStyle, /** @prop {Function} */ onLoad: applyStyleOnLoad, /** * @deprecated since version 1.10.0, the property moved to `computeStyle` modifier * @prop {Boolean} gpuAcceleration=true * If true, it uses the CSS 3D transformation to position the popper. * Otherwise, it will use the `top` and `left` properties */ gpuAcceleration: undefined } }; /** * The `dataObject` is an object containing all the information used by Popper.js. * This object is passed to modifiers and to the `onCreate` and `onUpdate` callbacks. * @name dataObject * @property {Object} data.instance The Popper.js instance * @property {String} data.placement Placement applied to popper * @property {String} data.originalPlacement Placement originally defined on init * @property {Boolean} data.flipped True if popper has been flipped by flip modifier * @property {Boolean} data.hide True if the reference element is out of boundaries, useful to know when to hide the popper * @property {HTMLElement} data.arrowElement Node used as arrow by arrow modifier * @property {Object} data.styles Any CSS property defined here will be applied to the popper. It expects the JavaScript nomenclature (eg. `marginBottom`) * @property {Object} data.arrowStyles Any CSS property defined here will be applied to the popper arrow. It expects the JavaScript nomenclature (eg. `marginBottom`) * @property {Object} data.boundaries Offsets of the popper boundaries * @property {Object} data.offsets The measurements of popper, reference and arrow elements * @property {Object} data.offsets.popper `top`, `left`, `width`, `height` values * @property {Object} data.offsets.reference `top`, `left`, `width`, `height` values * @property {Object} data.offsets.arrow] `top` and `left` offsets, only one of them will be different from 0 */ /** * Default options provided to Popper.js constructor.<br /> * These can be overridden using the `options` argument of Popper.js.<br /> * To override an option, simply pass an object with the same * structure of the `options` object, as the 3rd argument. For example: * ``` * new Popper(ref, pop, { * modifiers: { * preventOverflow: { enabled: false } * } * }) * ``` * @type {Object} * @static * @memberof Popper */ var Defaults = { /** * Popper's placement. * @prop {Popper.placements} placement='bottom' */ placement: 'bottom', /** * Set this to true if you want popper to position it self in 'fixed' mode * @prop {Boolean} positionFixed=false */ positionFixed: false, /** * Whether events (resize, scroll) are initially enabled. * @prop {Boolean} eventsEnabled=true */ eventsEnabled: true, /** * Set to true if you want to automatically remove the popper when * you call the `destroy` method. * @prop {Boolean} removeOnDestroy=false */ removeOnDestroy: false, /** * Callback called when the popper is created.<br /> * By default, it is set to no-op.<br /> * Access Popper.js instance with `data.instance`. * @prop {onCreate} */ onCreate: function onCreate() {}, /** * Callback called when the popper is updated. This callback is not called * on the initialization/creation of the popper, but only on subsequent * updates.<br /> * By default, it is set to no-op.<br /> * Access Popper.js instance with `data.instance`. * @prop {onUpdate} */ onUpdate: function onUpdate() {}, /** * List of modifiers used to modify the offsets before they are applied to the popper. * They provide most of the functionalities of Popper.js. * @prop {modifiers} */ modifiers: modifiers }; /** * @callback onCreate * @param {dataObject} data */ /** * @callback onUpdate * @param {dataObject} data */ // Utils // Methods var Popper = function () { /** * Creates a new Popper.js instance. * @class Popper * @param {HTMLElement|referenceObject} reference - The reference element used to position the popper * @param {HTMLElement} popper - The HTML element used as the popper * @param {Object} options - Your custom options to override the ones defined in [Defaults](#defaults) * @return {Object} instance - The generated Popper.js instance */ function Popper(reference, popper) { var _this = this; var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {}; classCallCheck(this, Popper); this.scheduleUpdate = function () { return requestAnimationFrame(_this.update); }; // make update() debounced, so that it only runs at most once-per-tick this.update = debounce(this.update.bind(this)); // with {} we create a new object with the options inside it this.options = _extends({}, Popper.Defaults, options); // init state this.state = { isDestroyed: false, isCreated: false, scrollParents: [] }; // get reference and popper elements (allow jQuery wrappers) this.reference = reference && reference.jquery ? reference[0] : reference; this.popper = popper && popper.jquery ? popper[0] : popper; // Deep merge modifiers options this.options.modifiers = {}; Object.keys(_extends({}, Popper.Defaults.modifiers, options.modifiers)).forEach(function (name) { _this.options.modifiers[name] = _extends({}, Popper.Defaults.modifiers[name] || {}, options.modifiers ? options.modifiers[name] : {}); }); // Refactoring modifiers' list (Object => Array) this.modifiers = Object.keys(this.options.modifiers).map(function (name) { return _extends({ name: name }, _this.options.modifiers[name]); }) // sort the modifiers by order .sort(function (a, b) { return a.order - b.order; }); // modifiers have the ability to execute arbitrary code when Popper.js get inited // such code is executed in the same order of its modifier // they could add new properties to their options configuration // BE AWARE: don't add options to `options.modifiers.name` but to `modifierOptions`! this.modifiers.forEach(function (modifierOptions) { if (modifierOptions.enabled && isFunction(modifierOptions.onLoad)) { modifierOptions.onLoad(_this.reference, _this.popper, _this.options, modifierOptions, _this.state); } }); // fire the first update to position the popper in the right place this.update(); var eventsEnabled = this.options.eventsEnabled; if (eventsEnabled) { // setup event listeners, they will take care of update the position in specific situations this.enableEventListeners(); } this.state.eventsEnabled = eventsEnabled; } // We can't use class properties because they don't get listed in the // class prototype and break stuff like Sinon stubs createClass(Popper, [{ key: 'update', value: function update$$1() { return update.call(this); } }, { key: 'destroy', value: function destroy$$1() { return destroy.call(this); } }, { key: 'enableEventListeners', value: function enableEventListeners$$1() { return enableEventListeners.call(this); } }, { key: 'disableEventListeners', value: function disableEventListeners$$1() { return disableEventListeners.call(this); } /** * Schedules an update. It will run on the next UI update available. * @method scheduleUpdate * @memberof Popper */ /** * Collection of utilities useful when writing custom modifiers. * Starting from version 1.7, this method is available only if you * include `popper-utils.js` before `popper.js`. * * **DEPRECATION**: This way to access PopperUtils is deprecated * and will be removed in v2! Use the PopperUtils module directly instead. * Due to the high instability of the methods contained in Utils, we can't * guarantee them to follow semver. Use them at your own risk! * @static * @private * @type {Object} * @deprecated since version 1.8 * @member Utils * @memberof Popper */ }]); return Popper; }(); /** * The `referenceObject` is an object that provides an interface compatible with Popper.js * and lets you use it as replacement of a real DOM node.<br /> * You can use this method to position a popper relatively to a set of coordinates * in case you don't have a DOM node to use as reference. * * ``` * new Popper(referenceObject, popperNode); * ``` * * NB: This feature isn't supported in Internet Explorer 10. * @name referenceObject * @property {Function} data.getBoundingClientRect * A function that returns a set of coordinates compatible with the native `getBoundingClientRect` method. * @property {number} data.clientWidth * An ES6 getter that will return the width of the virtual reference element. * @property {number} data.clientHeight * An ES6 getter that will return the height of the virtual reference element. */ Popper.Utils = (typeof window !== 'undefined' ? window : global).PopperUtils; Popper.placements = placements; Popper.Defaults = Defaults; /* harmony default export */ __webpack_exports__["default"] = (Popper); //# sourceMappingURL=popper.js.map /* WEBPACK VAR INJECTION */}.call(__webpack_exports__, __webpack_require__(4))) /***/ }), /* 2 */ /***/ (function(module, exports, __webpack_require__) { __webpack_require__(3); module.exports = __webpack_require__(10); /***/ }), /* 3 */ /***/ (function(module, __webpack_exports__, __webpack_require__) { "use strict"; Object.defineProperty(__webpack_exports__, "__esModule", { value: true }); /* harmony import */ var __WEBPACK_IMPORTED_MODULE_0_jquery__ = __webpack_require__(0); /* harmony import */ var __WEBPACK_IMPORTED_MODULE_0_jquery___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_0_jquery__); /* harmony import */ var __WEBPACK_IMPORTED_MODULE_1_popper_js__ = __webpack_require__(1); /* harmony import */ var __WEBPACK_IMPORTED_MODULE_2_bootstrap__ = __webpack_require__(5); /* harmony import */ var __WEBPACK_IMPORTED_MODULE_2_bootstrap___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_2_bootstrap__); /* harmony import */ var __WEBPACK_IMPORTED_MODULE_3_waypoints_lib_jquery_waypoints_min_js__ = __webpack_require__(6); /* harmony import */ var __WEBPACK_IMPORTED_MODULE_3_waypoints_lib_jquery_waypoints_min_js___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_3_waypoints_lib_jquery_waypoints_min_js__); /* harmony import */ var __WEBPACK_IMPORTED_MODULE_4_waypoints_lib_shortcuts_inview_min_js__ = __webpack_require__(7); /* harmony import */ var __WEBPACK_IMPORTED_MODULE_4_waypoints_lib_shortcuts_inview_min_js___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_4_waypoints_lib_shortcuts_inview_min_js__); /* harmony import */ var __WEBPACK_IMPORTED_MODULE_5_jssocials_dist_jssocials_min_js__ = __webpack_require__(8); /* harmony import */ var __WEBPACK_IMPORTED_MODULE_5_jssocials_dist_jssocials_min_js___default = __webpack_require__.n(__WEBPACK_IMPORTED_MODULE_5_jssocials_dist_jssocials_min_js__); /* harmony import */ var __WEBPACK_IMPORTED_MODULE_6__functions_js__ = __webpack_require__(9); // Dependencies // JQuery plugins // Functions // Google Maps Api /** * new_map * * This function will render a Google Map onto the selected jQuery element * * @type function * @date 8/11/2013 * @since 4.3.0 * * @param $el (jQuery element) * @return n/a */ function new_map($el) { // var var $markers = $el.find('.marker'); // vars var args = { zoom: 5, center: new google.maps.LatLng(0, 0), zoomControlOptions: { style: google.maps.ZoomControlStyle.SMALL }, streetViewControl: false, styles: [{ featureType: "poi", elementType: "labels", stylers: [{ visibility: "off" }] }], mapTypeControl: false, mapTypeId: google.maps.MapTypeId.ROADMAP }; // create map var map = new google.maps.Map($el[0], args); // add a markers reference map.markers = []; // add markers $markers.each(function () { add_marker(__WEBPACK_IMPORTED_MODULE_0_jquery___default()(this), map); }); // center map center_map(map); // return return map; } /** * add_marker * * This function will add a marker to the selected Google Map * * @type function * @date 8/11/2013 * @since 4.3.0 * * @param $marker (jQuery element) * @param map (Google Map object) * @return n/a */ function add_marker($marker, map) { // var var latlng = new google.maps.LatLng($marker.attr('data-lat'), $marker.attr('data-lng')); // create marker var marker = new google.maps.Marker({ position: latlng, map: map }); // add to array map.markers.push(marker); // if marker contains HTML, add it to an infoWindow if ($marker.html()) { // create info window var infowindow = new google.maps.InfoWindow({ content: $marker.html() }); // show info window when marker is clicked google.maps.event.addListener(marker, 'click', function () { infowindow.open(map, marker); }); } } /** * center_map * * This function will center the map, showing all markers attached to this map * * @type function * @date 8/11/2013 * @since 4.3.0 * * @param map (Google Map object) * @return n/a */ function center_map(map) { // vars var bounds = new google.maps.LatLngBounds(); // loop through all markers and create bounds __WEBPACK_IMPORTED_MODULE_0_jquery___default.a.each(map.markers, function (i, marker) { var latlng = new google.maps.LatLng(marker.position.lat(), marker.position.lng()); bounds.extend(latlng); }); // only 1 marker? if (map.markers.length == 1) { // set center of map map.setCenter(bounds.getCenter()); map.setZoom(8); } else { // fit to bounds map.fitBounds(bounds); } } var map = null; /** * Ready event */ jQuery(document).ready(function ($) { /** * Convert a wp sub-menu to a bootstrap dropdown-menu */ var $navbar = $('nav.navbar'); var $dropdowns = $navbar.find('.menu-item-has-children'); // Iterate over $dropdowns $dropdowns.each(function () { // Get <a> child var $dropdownToggle = $(this).children('a'); // Add class and attributes to <a> $dropdownToggle.attr({ "class": "dropdown-toggle", "href": "", "id": "navbarDropdown", "role": "button", "data-toggle": "dropdown", "aria-haspopup": "true", "aria-expanded": "false" }); // Get sub-menu and its nested <a> elements var $subMenu = $(this).find('.sub-menu'); var $dropdownItems = $subMenu.find('a'); // Create div.dropdown-menu $(this).append('<div class="dropdown-menu" aria-labelledby="navbarDropdown"></div>'); var $dropdownMenu = $(this).find('.dropdown-menu'); // Iterate over $dropdownItems $dropdownItems.each(function () { // Add .dropdown-item class $(this).addClass('dropdown-item'); // Add to $dropdown-menu $dropdownMenu.append(this); }); // Remove sub-menu $subMenu.remove(); }); /** * Prevent the default event from submit inputs and buttons elements */ var $submits = $('input[type=submit], button'); $submits.each(function () { $(this).click(function (event) { event.preventDefault(); }); }); /** * Make visible a hidden element when it is in the viewport */ // Get .hidden elements from the dom var $hidden = $(".hidden"); // Collection of waypoints var waypoints = {}; if ($hidden.length > 0) { // Iterate over each .hidden element and create waypoint object $hidden.each(function (index) { // Create waypoint object waypoints[index] = new Waypoint.Inview({ element: $hidden[index], enter: function enter(direction) { // Make visible $($hidden[index]).css('opacity', '1'); } // exited: function() { // // Make invisible // // $($hidden[index]).css('opacity', '0'); // } }); }); // end .each() } /** * The following code is for the functionality of join section */ // Get the buttons from .join section var $buttonSiblings = $('.join .join__button'); var $buttonStarted = $('.join .join__button--started'); var $buttonConnected = $('.join .join__button--connected'); var $buttonInvolved = $('.join .join__button--involved'); // Get the .get elements from .join section var $getSiblings = $('.join .get'); var $getStarted = $('.join .get--started'); var $getConnected = $('.join .get--connected'); var $getInvolved = $('.join .get--involved'); var activate = 'active'; // Activate the first section in view $($buttonSiblings[0]).addClass(activate); $($getSiblings[0]).addClass(activate); $buttonStarted.click(function () { Object(__WEBPACK_IMPORTED_MODULE_6__functions_js__["a" /* addClassRemoveItFromSiblings */])($(this), $buttonSiblings, activate); Object(__WEBPACK_IMPORTED_MODULE_6__functions_js__["a" /* addClassRemoveItFromSiblings */])($getStarted, $getSiblings, activate); }); $buttonConnected.click(function () { Object(__WEBPACK_IMPORTED_MODULE_6__functions_js__["a" /* addClassRemoveItFromSiblings */])($(this), $buttonSiblings, activate); Object(__WEBPACK_IMPORTED_MODULE_6__functions_js__["a" /* addClassRemoveItFromSiblings */])($getConnected, $getSiblings, activate); }); $buttonInvolved.click(function () { Object(__WEBPACK_IMPORTED_MODULE_6__functions_js__["a" /* addClassRemoveItFromSiblings */])($(this), $buttonSiblings, activate); Object(__WEBPACK_IMPORTED_MODULE_6__functions_js__["a" /* addClassRemoveItFromSiblings */])($getInvolved, $getSiblings, activate); }); /** * On click buttons slide to section in Get Involved Page */ var $btnDevelopers = $('.btn--developers'); var $btnNondevelopers = $('.btn--nondevelopers'); var $btnSuggestions = $('.btn--suggestions'); var $btnSupport = $('.btn--support'); // Check if exists if ($btnDevelopers.length > 0) { var $sectionDevelopers = $('.developers'); $btnDevelopers.click(function () { Object(__WEBPACK_IMPORTED_MODULE_6__functions_js__["b" /* scrollSmoothlyTo */])($sectionDevelopers); }); } if ($btnNondevelopers.length > 0) { var $sectionNondevelopers = $('.nondevelopers'); $btnNondevelopers.click(function () { Object(__WEBPACK_IMPORTED_MODULE_6__functions_js__["b" /* scrollSmoothlyTo */])($sectionNondevelopers); }); } if ($btnSuggestions.length > 0) { var $sectionSuggestions = $('.suggestions'); $btnSuggestions.click(function () { Object(__WEBPACK_IMPORTED_MODULE_6__functions_js__["b" /* scrollSmoothlyTo */])($sectionSuggestions); }); } if ($btnSupport.length > 0) { var $sectionSupport = $('.support'); $btnSupport.click(function () { Object(__WEBPACK_IMPORTED_MODULE_6__functions_js__["b" /* scrollSmoothlyTo */])($sectionSupport); }); } /** * Render Goggle Maps map */ var $map = $(".acf-map"); if ($map.length > 0) { $map.each(function () { // create map map = new_map($(this)); }); } /** * Add share links */ { var $post = $(".post__share"); if ($post.length > 0) { var postUrl = window.location.href; // Short url with Rebrandly api $.ajax({ url: "https://api.rebrandly.com/v1/links", type: "post", data: JSON.stringify({ "destination": postUrl, "domain": { "fullName": "rebrand.ly" } }), headers: { "Content-Type": "application/json", "apikey": "7f5c117299f24f8a88ba759bf4d6ccf9" }, dataType: "json", success: function success(link) { var newUrl = link.shortUrl; // Set JsSocials jquery plugin $post.jsSocials({ url: newUrl, showLabel: false, showCount: false, shareIn: "popup", shares: [{ share: "facebook", logo: "fab fa-facebook" }, { share: "googleplus", logo: "fab fa-google-plus" }, { share: "twitter", logo: "fab fa-twitter" }, { share: "pinterest", logo: "fab fa-pinterest" }, { share: "linkedin", logo: "fab fa-linkedin" }] }); } // end succes }); // end ajax } } }); /***/ }), /* 4 */ /***/ (function(module, exports) { var g; // This works in non-strict mode g = (function() { return this; })(); try { // This works if eval is allowed (see CSP) g = g || Function("return this")() || (1,eval)("this"); } catch(e) { // This works if the window reference is available if(typeof window === "object") g = window; } // g can still be undefined, but nothing to do about it... // We return undefined, instead of nothing here, so it's // easier to handle this case. if(!global) { ...} module.exports = g; /***/ }), /* 5 */ /***/ (function(module, exports, __webpack_require__) { /*! * Bootstrap v4.1.3 (https://getbootstrap.com/) * Copyright 2011-2018 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors) * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) */ (function (global, factory) { true ? factory(exports, __webpack_require__(0), __webpack_require__(1)) : typeof define === 'function' && define.amd ? define(['exports', 'jquery', 'popper.js'], factory) : (factory((global.bootstrap = {}),global.jQuery,global.Popper)); }(this, (function (exports,$,Popper) { 'use strict'; $ = $ && $.hasOwnProperty('default') ? $['default'] : $; Popper = Popper && Popper.hasOwnProperty('default') ? Popper['default'] : Popper; function _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } function _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; } function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; } function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i] != null ? arguments[i] : {}; var ownKeys = Object.keys(source); if (typeof Object.getOwnPropertySymbols === 'function') { ownKeys = ownKeys.concat(Object.getOwnPropertySymbols(source).filter(function (sym) { return Object.getOwnPropertyDescriptor(source, sym).enumerable; })); } ownKeys.forEach(function (key) { _defineProperty(target, key, source[key]); }); } return target; } function _inheritsLoose(subClass, superClass) { subClass.prototype = Object.create(superClass.prototype); subClass.prototype.constructor = subClass; subClass.__proto__ = superClass; } /** * -------------------------------------------------------------------------- * Bootstrap (v4.1.3): util.js * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * -------------------------------------------------------------------------- */ var Util = function ($$$1) { /** * ------------------------------------------------------------------------ * Private TransitionEnd Helpers * ------------------------------------------------------------------------ */ var TRANSITION_END = 'transitionend'; var MAX_UID = 1000000; var MILLISECONDS_MULTIPLIER = 1000; // Shoutout AngusCroll (https://goo.gl/pxwQGp) function toType(obj) { return {}.toString.call(obj).match(/\s([a-z]+)/i)[1].toLowerCase(); } function getSpecialTransitionEndEvent() { return { bindType: TRANSITION_END, delegateType: TRANSITION_END, handle: function handle(event) { if ($$$1(event.target).is(this)) { return event.handleObj.handler.apply(this, arguments); // eslint-disable-line prefer-rest-params } return undefined; // eslint-disable-line no-undefined } }; } function transitionEndEmulator(duration) { var _this = this; var called = false; $$$1(this).one(Util.TRANSITION_END, function () { called = true; }); setTimeout(function () { if (!called) { Util.triggerTransitionEnd(_this); } }, duration); return this; } function setTransitionEndSupport() { $$$1.fn.emulateTransitionEnd = transitionEndEmulator; $$$1.event.special[Util.TRANSITION_END] = getSpecialTransitionEndEvent(); } /** * -------------------------------------------------------------------------- * Public Util Api * -------------------------------------------------------------------------- */ var Util = { TRANSITION_END: 'bsTransitionEnd', getUID: function getUID(prefix) { do { // eslint-disable-next-line no-bitwise prefix += ~~(Math.random() * MAX_UID); // "~~" acts like a faster Math.floor() here } while (document.getElementById(prefix)); return prefix; }, getSelectorFromElement: function getSelectorFromElement(element) { var selector = element.getAttribute('data-target'); if (!selector || selector === '#') { selector = element.getAttribute('href') || ''; } try { return document.querySelector(selector) ? selector : null; } catch (err) { return null; } }, getTransitionDurationFromElement: function getTransitionDurationFromElement(element) { if (!element) { return 0; } // Get transition-duration of the element var transitionDuration = $$$1(element).css('transition-duration'); var floatTransitionDuration = parseFloat(transitionDuration); // Return 0 if element or transition duration is not found if (!floatTransitionDuration) { return 0; } // If multiple durations are defined, take the first transitionDuration = transitionDuration.split(',')[0]; return parseFloat(transitionDuration) * MILLISECONDS_MULTIPLIER; }, reflow: function reflow(element) { return element.offsetHeight; }, triggerTransitionEnd: function triggerTransitionEnd(element) { $$$1(element).trigger(TRANSITION_END); }, // TODO: Remove in v5 supportsTransitionEnd: function supportsTransitionEnd() { return Boolean(TRANSITION_END); }, isElement: function isElement(obj) { return (obj[0] || obj).nodeType; }, typeCheckConfig: function typeCheckConfig(componentName, config, configTypes) { for (var property in configTypes) { if (Object.prototype.hasOwnProperty.call(configTypes, property)) { var expectedTypes = configTypes[property]; var value = config[property]; var valueType = value && Util.isElement(value) ? 'element' : toType(value); if (!new RegExp(expectedTypes).test(valueType)) { throw new Error(componentName.toUpperCase() + ": " + ("Option \"" + property + "\" provided type \"" + valueType + "\" ") + ("but expected type \"" + expectedTypes + "\".")); } } } } }; setTransitionEndSupport(); return Util; }($); /** * -------------------------------------------------------------------------- * Bootstrap (v4.1.3): alert.js * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * -------------------------------------------------------------------------- */ var Alert = function ($$$1) { /** * ------------------------------------------------------------------------ * Constants * ------------------------------------------------------------------------ */ var NAME = 'alert'; var VERSION = '4.1.3'; var DATA_KEY = 'bs.alert'; var EVENT_KEY = "." + DATA_KEY; var DATA_API_KEY = '.data-api'; var JQUERY_NO_CONFLICT = $$$1.fn[NAME]; var Selector = { DISMISS: '[data-dismiss="alert"]' }; var Event = { CLOSE: "close" + EVENT_KEY, CLOSED: "closed" + EVENT_KEY, CLICK_DATA_API: "click" + EVENT_KEY + DATA_API_KEY }; var ClassName = { ALERT: 'alert', FADE: 'fade', SHOW: 'show' /** * ------------------------------------------------------------------------ * Class Definition * ------------------------------------------------------------------------ */ }; var Alert = /*#__PURE__*/ function () { function Alert(element) { this._element = element; } // Getters var _proto = Alert.prototype; // Public _proto.close = function close(element) { var rootElement = this._element; if (element) { rootElement = this._getRootElement(element); } var customEvent = this._triggerCloseEvent(rootElement); if (customEvent.isDefaultPrevented()) { return; } this._removeElement(rootElement); }; _proto.dispose = function dispose() { $$$1.removeData(this._element, DATA_KEY); this._element = null; }; // Private _proto._getRootElement = function _getRootElement(element) { var selector = Util.getSelectorFromElement(element); var parent = false; if (selector) { parent = document.querySelector(selector); } if (!parent) { parent = $$$1(element).closest("." + ClassName.ALERT)[0]; } return parent; }; _proto._triggerCloseEvent = function _triggerCloseEvent(element) { var closeEvent = $$$1.Event(Event.CLOSE); $$$1(element).trigger(closeEvent); return closeEvent; }; _proto._removeElement = function _removeElement(element) { var _this = this; $$$1(element).removeClass(ClassName.SHOW); if (!$$$1(element).hasClass(ClassName.FADE)) { this._destroyElement(element); return; } var transitionDuration = Util.getTransitionDurationFromElement(element); $$$1(element).one(Util.TRANSITION_END, function (event) { return _this._destroyElement(element, event); }).emulateTransitionEnd(transitionDuration); }; _proto._destroyElement = function _destroyElement(element) { $$$1(element).detach().trigger(Event.CLOSED).remove(); }; // Static Alert._jQueryInterface = function _jQueryInterface(config) { return this.each(function () { var $element = $$$1(this); var data = $element.data(DATA_KEY); if (!data) { data = new Alert(this); $element.data(DATA_KEY, data); } if (config === 'close') { data[config](this); } }); }; Alert._handleDismiss = function _handleDismiss(alertInstance) { return function (event) { if (event) { event.preventDefault(); } alertInstance.close(this); }; }; _createClass(Alert, null, [{ key: "VERSION", get: function get() { return VERSION; } }]); return Alert; }(); /** * ------------------------------------------------------------------------ * Data Api implementation * ------------------------------------------------------------------------ */ $$$1(document).on(Event.CLICK_DATA_API, Selector.DISMISS, Alert._handleDismiss(new Alert())); /** * ------------------------------------------------------------------------ * jQuery * ------------------------------------------------------------------------ */ $$$1.fn[NAME] = Alert._jQueryInterface; $$$1.fn[NAME].Constructor = Alert; $$$1.fn[NAME].noConflict = function () { $$$1.fn[NAME] = JQUERY_NO_CONFLICT; return Alert._jQueryInterface; }; return Alert; }($); /** * -------------------------------------------------------------------------- * Bootstrap (v4.1.3): button.js * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * -------------------------------------------------------------------------- */ var Button = function ($$$1) { /** * ------------------------------------------------------------------------ * Constants * ------------------------------------------------------------------------ */ var NAME = 'button'; var VERSION = '4.1.3'; var DATA_KEY = 'bs.button'; var EVENT_KEY = "." + DATA_KEY; var DATA_API_KEY = '.data-api'; var JQUERY_NO_CONFLICT = $$$1.fn[NAME]; var ClassName = { ACTIVE: 'active', BUTTON: 'btn', FOCUS: 'focus' }; var Selector = { DATA_TOGGLE_CARROT: '[data-toggle^="button"]', DATA_TOGGLE: '[data-toggle="buttons"]', INPUT: 'input', ACTIVE: '.active', BUTTON: '.btn' }; var Event = { CLICK_DATA_API: "click" + EVENT_KEY + DATA_API_KEY, FOCUS_BLUR_DATA_API: "focus" + EVENT_KEY + DATA_API_KEY + " " + ("blur" + EVENT_KEY + DATA_API_KEY) /** * ------------------------------------------------------------------------ * Class Definition * ------------------------------------------------------------------------ */ }; var Button = /*#__PURE__*/ function () { function Button(element) { this._element = element; } // Getters var _proto = Button.prototype; // Public _proto.toggle = function toggle() { var triggerChangeEvent = true; var addAriaPressed = true; var rootElement = $$$1(this._element).closest(Selector.DATA_TOGGLE)[0]; if (rootElement) { var input = this._element.querySelector(Selector.INPUT); if (input) { if (input.type === 'radio') { if (input.checked && this._element.classList.contains(ClassName.ACTIVE)) { triggerChangeEvent = false; } else { var activeElement = rootElement.querySelector(Selector.ACTIVE); if (activeElement) { $$$1(activeElement).removeClass(ClassName.ACTIVE); } } } if (triggerChangeEvent) { if (input.hasAttribute('disabled') || rootElement.hasAttribute('disabled') || input.classList.contains('disabled') || rootElement.classList.contains('disabled')) { return; } input.checked = !this._element.classList.contains(ClassName.ACTIVE); $$$1(input).trigger('change'); } input.focus(); addAriaPressed = false; } } if (addAriaPressed) { this._element.setAttribute('aria-pressed', !this._element.classList.contains(ClassName.ACTIVE)); } if (triggerChangeEvent) { $$$1(this._element).toggleClass(ClassName.ACTIVE); } }; _proto.dispose = function dispose() { $$$1.removeData(this._element, DATA_KEY); this._element = null; }; // Static Button._jQueryInterface = function _jQueryInterface(config) { return this.each(function () { var data = $$$1(this).data(DATA_KEY); if (!data) { data = new Button(this); $$$1(this).data(DATA_KEY, data); } if (config === 'toggle') { data[config](); } }); }; _createClass(Button, null, [{ key: "VERSION", get: function get() { return VERSION; } }]); return Button; }(); /** * ------------------------------------------------------------------------ * Data Api implementation * ------------------------------------------------------------------------ */ $$$1(document).on(Event.CLICK_DATA_API, Selector.DATA_TOGGLE_CARROT, function (event) { event.preventDefault(); var button = event.target; if (!$$$1(button).hasClass(ClassName.BUTTON)) { button = $$$1(button).closest(Selector.BUTTON); } Button._jQueryInterface.call($$$1(button), 'toggle'); }).on(Event.FOCUS_BLUR_DATA_API, Selector.DATA_TOGGLE_CARROT, function (event) { var button = $$$1(event.target).closest(Selector.BUTTON)[0]; $$$1(button).toggleClass(ClassName.FOCUS, /^focus(in)?$/.test(event.type)); }); /** * ------------------------------------------------------------------------ * jQuery * ------------------------------------------------------------------------ */ $$$1.fn[NAME] = Button._jQueryInterface; $$$1.fn[NAME].Constructor = Button; $$$1.fn[NAME].noConflict = function () { $$$1.fn[NAME] = JQUERY_NO_CONFLICT; return Button._jQueryInterface; }; return Button; }($); /** * -------------------------------------------------------------------------- * Bootstrap (v4.1.3): carousel.js * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * -------------------------------------------------------------------------- */ var Carousel = function ($$$1) { /** * ------------------------------------------------------------------------ * Constants * ------------------------------------------------------------------------ */ var NAME = 'carousel'; var VERSION = '4.1.3'; var DATA_KEY = 'bs.carousel'; var EVENT_KEY = "." + DATA_KEY; var DATA_API_KEY = '.data-api'; var JQUERY_NO_CONFLICT = $$$1.fn[NAME]; var ARROW_LEFT_KEYCODE = 37; // KeyboardEvent.which value for left arrow key var ARROW_RIGHT_KEYCODE = 39; // KeyboardEvent.which value for right arrow key var TOUCHEVENT_COMPAT_WAIT = 500; // Time for mouse compat events to fire after touch var Default = { interval: 5000, keyboard: true, slide: false, pause: 'hover', wrap: true }; var DefaultType = { interval: '(number|boolean)', keyboard: 'boolean', slide: '(boolean|string)', pause: '(string|boolean)', wrap: 'boolean' }; var Direction = { NEXT: 'next', PREV: 'prev', LEFT: 'left', RIGHT: 'right' }; var Event = { SLIDE: "slide" + EVENT_KEY, SLID: "slid" + EVENT_KEY, KEYDOWN: "keydown" + EVENT_KEY, MOUSEENTER: "mouseenter" + EVENT_KEY, MOUSELEAVE: "mouseleave" + EVENT_KEY, TOUCHEND: "touchend" + EVENT_KEY, LOAD_DATA_API: "load" + EVENT_KEY + DATA_API_KEY, CLICK_DATA_API: "click" + EVENT_KEY + DATA_API_KEY }; var ClassName = { CAROUSEL: 'carousel', ACTIVE: 'active', SLIDE: 'slide', RIGHT: 'carousel-item-right', LEFT: 'carousel-item-left', NEXT: 'carousel-item-next', PREV: 'carousel-item-prev', ITEM: 'carousel-item' }; var Selector = { ACTIVE: '.active', ACTIVE_ITEM: '.active.carousel-item', ITEM: '.carousel-item', NEXT_PREV: '.carousel-item-next, .carousel-item-prev', INDICATORS: '.carousel-indicators', DATA_SLIDE: '[data-slide], [data-slide-to]', DATA_RIDE: '[data-ride="carousel"]' /** * ------------------------------------------------------------------------ * Class Definition * ------------------------------------------------------------------------ */ }; var Carousel = /*#__PURE__*/ function () { function Carousel(element, config) { this._items = null; this._interval = null; this._activeElement = null; this._isPaused = false; this._isSliding = false; this.touchTimeout = null; this._config = this._getConfig(config); this._element = $$$1(element)[0]; this._indicatorsElement = this._element.querySelector(Selector.INDICATORS); this._addEventListeners(); } // Getters var _proto = Carousel.prototype; // Public _proto.next = function next() { if (!this._isSliding) { this._slide(Direction.NEXT); } }; _proto.nextWhenVisible = function nextWhenVisible() { // Don't call next when the page isn't visible // or the carousel or its parent isn't visible if (!document.hidden && $$$1(this._element).is(':visible') && $$$1(this._element).css('visibility') !== 'hidden') { this.next(); } }; _proto.prev = function prev() { if (!this._isSliding) { this._slide(Direction.PREV); } }; _proto.pause = function pause(event) { if (!event) { this._isPaused = true; } if (this._element.querySelector(Selector.NEXT_PREV)) { Util.triggerTransitionEnd(this._element); this.cycle(true); } clearInterval(this._interval); this._interval = null; }; _proto.cycle = function cycle(event) { if (!event) { this._isPaused = false; } if (this._interval) { clearInterval(this._interval); this._interval = null; } if (this._config.interval && !this._isPaused) { this._interval = setInterval((document.visibilityState ? this.nextWhenVisible : this.next).bind(this), this._config.interval); } }; _proto.to = function to(index) { var _this = this; this._activeElement = this._element.querySelector(Selector.ACTIVE_ITEM); var activeIndex = this._getItemIndex(this._activeElement); if (index > this._items.length - 1 || index < 0) { return; } if (this._isSliding) { $$$1(this._element).one(Event.SLID, function () { return _this.to(index); }); return; } if (activeIndex === index) { this.pause(); this.cycle(); return; } var direction = index > activeIndex ? Direction.NEXT : Direction.PREV; this._slide(direction, this._items[index]); }; _proto.dispose = function dispose() { $$$1(this._element).off(EVENT_KEY); $$$1.removeData(this._element, DATA_KEY); this._items = null; this._config = null; this._element = null; this._interval = null; this._isPaused = null; this._isSliding = null; this._activeElement = null; this._indicatorsElement = null; }; // Private _proto._getConfig = function _getConfig(config) { config = _objectSpread({}, Default, config); Util.typeCheckConfig(NAME, config, DefaultType); return config; }; _proto._addEventListeners = function _addEventListeners() { var _this2 = this; if (this._config.keyboard) { $$$1(this._element).on(Event.KEYDOWN, function (event) { return _this2._keydown(event); }); } if (this._config.pause === 'hover') { $$$1(this._element).on(Event.MOUSEENTER, function (event) { return _this2.pause(event); }).on(Event.MOUSELEAVE, function (event) { return _this2.cycle(event); }); if ('ontouchstart' in document.documentElement) { // If it's a touch-enabled device, mouseenter/leave are fired as // part of the mouse compatibility events on first tap - the carousel // would stop cycling until user tapped out of it; // here, we listen for touchend, explicitly pause the carousel // (as if it's the second time we tap on it, mouseenter compat event // is NOT fired) and after a timeout (to allow for mouse compatibility // events to fire) we explicitly restart cycling $$$1(this._element).on(Event.TOUCHEND, function () { _this2.pause(); if (_this2.touchTimeout) { clearTimeout(_this2.touchTimeout); } _this2.touchTimeout = setTimeout(function (event) { return _this2.cycle(event); }, TOUCHEVENT_COMPAT_WAIT + _this2._config.interval); }); } } }; _proto._keydown = function _keydown(event) { if (/input|textarea/i.test(event.target.tagName)) { return; } switch (event.which) { case ARROW_LEFT_KEYCODE: event.preventDefault(); this.prev(); break; case ARROW_RIGHT_KEYCODE: event.preventDefault(); this.next(); break; default: } }; _proto._getItemIndex = function _getItemIndex(element) { this._items = element && element.parentNode ? [].slice.call(element.parentNode.querySelectorAll(Selector.ITEM)) : []; return this._items.indexOf(element); }; _proto._getItemByDirection = function _getItemByDirection(direction, activeElement) { var isNextDirection = direction === Direction.NEXT; var isPrevDirection = direction === Direction.PREV; var activeIndex = this._getItemIndex(activeElement); var lastItemIndex = this._items.length - 1; var isGoingToWrap = isPrevDirection && activeIndex === 0 || isNextDirection && activeIndex === lastItemIndex; if (isGoingToWrap && !this._config.wrap) { return activeElement; } var delta = direction === Direction.PREV ? -1 : 1; var itemIndex = (activeIndex + delta) % this._items.length; return itemIndex === -1 ? this._items[this._items.length - 1] : this._items[itemIndex]; }; _proto._triggerSlideEvent = function _triggerSlideEvent(relatedTarget, eventDirectionName) { var targetIndex = this._getItemIndex(relatedTarget); var fromIndex = this._getItemIndex(this._element.querySelector(Selector.ACTIVE_ITEM)); var slideEvent = $$$1.Event(Event.SLIDE, { relatedTarget: relatedTarget, direction: eventDirectionName, from: fromIndex, to: targetIndex }); $$$1(this._element).trigger(slideEvent); return slideEvent; }; _proto._setActiveIndicatorElement = function _setActiveIndicatorElement(element) { if (this._indicatorsElement) { var indicators = [].slice.call(this._indicatorsElement.querySelectorAll(Selector.ACTIVE)); $$$1(indicators).removeClass(ClassName.ACTIVE); var nextIndicator = this._indicatorsElement.children[this._getItemIndex(element)]; if (nextIndicator) { $$$1(nextIndicator).addClass(ClassName.ACTIVE); } } }; _proto._slide = function _slide(direction, element) { var _this3 = this; var activeElement = this._element.querySelector(Selector.ACTIVE_ITEM); var activeElementIndex = this._getItemIndex(activeElement); var nextElement = element || activeElement && this._getItemByDirection(direction, activeElement); var nextElementIndex = this._getItemIndex(nextElement); var isCycling = Boolean(this._interval); var directionalClassName; var orderClassName; var eventDirectionName; if (direction === Direction.NEXT) { directionalClassName = ClassName.LEFT; orderClassName = ClassName.NEXT; eventDirectionName = Direction.LEFT; } else { directionalClassName = ClassName.RIGHT; orderClassName = ClassName.PREV; eventDirectionName = Direction.RIGHT; } if (nextElement && $$$1(nextElement).hasClass(ClassName.ACTIVE)) { this._isSliding = false; return; } var slideEvent = this._triggerSlideEvent(nextElement, eventDirectionName); if (slideEvent.isDefaultPrevented()) { return; } if (!activeElement || !nextElement) { // Some weirdness is happening, so we bail return; } this._isSliding = true; if (isCycling) { this.pause(); } this._setActiveIndicatorElement(nextElement); var slidEvent = $$$1.Event(Event.SLID, { relatedTarget: nextElement, direction: eventDirectionName, from: activeElementIndex, to: nextElementIndex }); if ($$$1(this._element).hasClass(ClassName.SLIDE)) { $$$1(nextElement).addClass(orderClassName); Util.reflow(nextElement); $$$1(activeElement).addClass(directionalClassName); $$$1(nextElement).addClass(directionalClassName); var transitionDuration = Util.getTransitionDurationFromElement(activeElement); $$$1(activeElement).one(Util.TRANSITION_END, function () { $$$1(nextElement).removeClass(directionalClassName + " " + orderClassName).addClass(ClassName.ACTIVE); $$$1(activeElement).removeClass(ClassName.ACTIVE + " " + orderClassName + " " + directionalClassName); _this3._isSliding = false; setTimeout(function () { return $$$1(_this3._element).trigger(slidEvent); }, 0); }).emulateTransitionEnd(transitionDuration); } else { $$$1(activeElement).removeClass(ClassName.ACTIVE); $$$1(nextElement).addClass(ClassName.ACTIVE); this._isSliding = false; $$$1(this._element).trigger(slidEvent); } if (isCycling) { this.cycle(); } }; // Static Carousel._jQueryInterface = function _jQueryInterface(config) { return this.each(function () { var data = $$$1(this).data(DATA_KEY); var _config = _objectSpread({}, Default, $$$1(this).data()); if (typeof config === 'object') { _config = _objectSpread({}, _config, config); } var action = typeof config === 'string' ? config : _config.slide; if (!data) { data = new Carousel(this, _config); $$$1(this).data(DATA_KEY, data); } if (typeof config === 'number') { data.to(config); } else if (typeof action === 'string') { if (typeof data[action] === 'undefined') { throw new TypeError("No method named \"" + action + "\""); } data[action](); } else if (_config.interval) { data.pause(); data.cycle(); } }); }; Carousel._dataApiClickHandler = function _dataApiClickHandler(event) { var selector = Util.getSelectorFromElement(this); if (!selector) { return; } var target = $$$1(selector)[0]; if (!target || !$$$1(target).hasClass(ClassName.CAROUSEL)) { return; } var config = _objectSpread({}, $$$1(target).data(), $$$1(this).data()); var slideIndex = this.getAttribute('data-slide-to'); if (slideIndex) { config.interval = false; } Carousel._jQueryInterface.call($$$1(target), config); if (slideIndex) { $$$1(target).data(DATA_KEY).to(slideIndex); } event.preventDefault(); }; _createClass(Carousel, null, [{ key: "VERSION", get: function get() { return VERSION; } }, { key: "Default", get: function get() { return Default; } }]); return Carousel; }(); /** * ------------------------------------------------------------------------ * Data Api implementation * ------------------------------------------------------------------------ */ $$$1(document).on(Event.CLICK_DATA_API, Selector.DATA_SLIDE, Carousel._dataApiClickHandler); $$$1(window).on(Event.LOAD_DATA_API, function () { var carousels = [].slice.call(document.querySelectorAll(Selector.DATA_RIDE)); for (var i = 0, len = carousels.length; i < len; i++) { var $carousel = $$$1(carousels[i]); Carousel._jQueryInterface.call($carousel, $carousel.data()); } }); /** * ------------------------------------------------------------------------ * jQuery * ------------------------------------------------------------------------ */ $$$1.fn[NAME] = Carousel._jQueryInterface; $$$1.fn[NAME].Constructor = Carousel; $$$1.fn[NAME].noConflict = function () { $$$1.fn[NAME] = JQUERY_NO_CONFLICT; return Carousel._jQueryInterface; }; return Carousel; }($); /** * -------------------------------------------------------------------------- * Bootstrap (v4.1.3): collapse.js * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * -------------------------------------------------------------------------- */ var Collapse = function ($$$1) { /** * ------------------------------------------------------------------------ * Constants * ------------------------------------------------------------------------ */ var NAME = 'collapse'; var VERSION = '4.1.3'; var DATA_KEY = 'bs.collapse'; var EVENT_KEY = "." + DATA_KEY; var DATA_API_KEY = '.data-api'; var JQUERY_NO_CONFLICT = $$$1.fn[NAME]; var Default = { toggle: true, parent: '' }; var DefaultType = { toggle: 'boolean', parent: '(string|element)' }; var Event = { SHOW: "show" + EVENT_KEY, SHOWN: "shown" + EVENT_KEY, HIDE: "hide" + EVENT_KEY, HIDDEN: "hidden" + EVENT_KEY, CLICK_DATA_API: "click" + EVENT_KEY + DATA_API_KEY }; var ClassName = { SHOW: 'show', COLLAPSE: 'collapse', COLLAPSING: 'collapsing', COLLAPSED: 'collapsed' }; var Dimension = { WIDTH: 'width', HEIGHT: 'height' }; var Selector = { ACTIVES: '.show, .collapsing', DATA_TOGGLE: '[data-toggle="collapse"]' /** * ------------------------------------------------------------------------ * Class Definition * ------------------------------------------------------------------------ */ }; var Collapse = /*#__PURE__*/ function () { function Collapse(element, config) { this._isTransitioning = false; this._element = element; this._config = this._getConfig(config); this._triggerArray = $$$1.makeArray(document.querySelectorAll("[data-toggle=\"collapse\"][href=\"#" + element.id + "\"]," + ("[data-toggle=\"collapse\"][data-target=\"#" + element.id + "\"]"))); var toggleList = [].slice.call(document.querySelectorAll(Selector.DATA_TOGGLE)); for (var i = 0, len = toggleList.length; i < len; i++) { var elem = toggleList[i]; var selector = Util.getSelectorFromElement(elem); var filterElement = [].slice.call(document.querySelectorAll(selector)).filter(function (foundElem) { return foundElem === element; }); if (selector !== null && filterElement.length > 0) { this._selector = selector; this._triggerArray.push(elem); } } this._parent = this._config.parent ? this._getParent() : null; if (!this._config.parent) { this._addAriaAndCollapsedClass(this._element, this._triggerArray); } if (this._config.toggle) { this.toggle(); } } // Getters var _proto = Collapse.prototype; // Public _proto.toggle = function toggle() { if ($$$1(this._element).hasClass(ClassName.SHOW)) { this.hide(); } else { this.show(); } }; _proto.show = function show() { var _this = this; if (this._isTransitioning || $$$1(this._element).hasClass(ClassName.SHOW)) { return; } var actives; var activesData; if (this._parent) { actives = [].slice.call(this._parent.querySelectorAll(Selector.ACTIVES)).filter(function (elem) { return elem.getAttribute('data-parent') === _this._config.parent; }); if (actives.length === 0) { actives = null; } } if (actives) { activesData = $$$1(actives).not(this._selector).data(DATA_KEY); if (activesData && activesData._isTransitioning) { return; } } var startEvent = $$$1.Event(Event.SHOW); $$$1(this._element).trigger(startEvent); if (startEvent.isDefaultPrevented()) { return; } if (actives) { Collapse._jQueryInterface.call($$$1(actives).not(this._selector), 'hide'); if (!activesData) { $$$1(actives).data(DATA_KEY, null); } } var dimension = this._getDimension(); $$$1(this._element).removeClass(ClassName.COLLAPSE).addClass(ClassName.COLLAPSING); this._element.style[dimension] = 0; if (this._triggerArray.length) { $$$1(this._triggerArray).removeClass(ClassName.COLLAPSED).attr('aria-expanded', true); } this.setTransitioning(true); var complete = function complete() { $$$1(_this._element).removeClass(ClassName.COLLAPSING).addClass(ClassName.COLLAPSE).addClass(ClassName.SHOW); _this._element.style[dimension] = ''; _this.setTransitioning(false); $$$1(_this._element).trigger(Event.SHOWN); }; var capitalizedDimension = dimension[0].toUpperCase() + dimension.slice(1); var scrollSize = "scroll" + capitalizedDimension; var transitionDuration = Util.getTransitionDurationFromElement(this._element); $$$1(this._element).one(Util.TRANSITION_END, complete).emulateTransitionEnd(transitionDuration); this._element.style[dimension] = this._element[scrollSize] + "px"; }; _proto.hide = function hide() { var _this2 = this; if (this._isTransitioning || !$$$1(this._element).hasClass(ClassName.SHOW)) { return; } var startEvent = $$$1.Event(Event.HIDE); $$$1(this._element).trigger(startEvent); if (startEvent.isDefaultPrevented()) { return; } var dimension = this._getDimension(); this._element.style[dimension] = this._element.getBoundingClientRect()[dimension] + "px"; Util.reflow(this._element); $$$1(this._element).addClass(ClassName.COLLAPSING).removeClass(ClassName.COLLAPSE).removeClass(ClassName.SHOW); var triggerArrayLength = this._triggerArray.length; if (triggerArrayLength > 0) { for (var i = 0; i < triggerArrayLength; i++) { var trigger = this._triggerArray[i]; var selector = Util.getSelectorFromElement(trigger); if (selector !== null) { var $elem = $$$1([].slice.call(document.querySelectorAll(selector))); if (!$elem.hasClass(ClassName.SHOW)) { $$$1(trigger).addClass(ClassName.COLLAPSED).attr('aria-expanded', false); } } } } this.setTransitioning(true); var complete = function complete() { _this2.setTransitioning(false); $$$1(_this2._element).removeClass(ClassName.COLLAPSING).addClass(ClassName.COLLAPSE).trigger(Event.HIDDEN); }; this._element.style[dimension] = ''; var transitionDuration = Util.getTransitionDurationFromElement(this._element); $$$1(this._element).one(Util.TRANSITION_END, complete).emulateTransitionEnd(transitionDuration); }; _proto.setTransitioning = function setTransitioning(isTransitioning) { this._isTransitioning = isTransitioning; }; _proto.dispose = function dispose() { $$$1.removeData(this._element, DATA_KEY); this._config = null; this._parent = null; this._element = null; this._triggerArray = null; this._isTransitioning = null; }; // Private _proto._getConfig = function _getConfig(config) { config = _objectSpread({}, Default, config); config.toggle = Boolean(config.toggle); // Coerce string values Util.typeCheckConfig(NAME, config, DefaultType); return config; }; _proto._getDimension = function _getDimension() { var hasWidth = $$$1(this._element).hasClass(Dimension.WIDTH); return hasWidth ? Dimension.WIDTH : Dimension.HEIGHT; }; _proto._getParent = function _getParent() { var _this3 = this; var parent = null; if (Util.isElement(this._config.parent)) { parent = this._config.parent; // It's a jQuery object if (typeof this._config.parent.jquery !== 'undefined') { parent = this._config.parent[0]; } } else { parent = document.querySelector(this._config.parent); } var selector = "[data-toggle=\"collapse\"][data-parent=\"" + this._config.parent + "\"]"; var children = [].slice.call(parent.querySelectorAll(selector)); $$$1(children).each(function (i, element) { _this3._addAriaAndCollapsedClass(Collapse._getTargetFromElement(element), [element]); }); return parent; }; _proto._addAriaAndCollapsedClass = function _addAriaAndCollapsedClass(element, triggerArray) { if (element) { var isOpen = $$$1(element).hasClass(ClassName.SHOW); if (triggerArray.length) { $$$1(triggerArray).toggleClass(ClassName.COLLAPSED, !isOpen).attr('aria-expanded', isOpen); } } }; // Static Collapse._getTargetFromElement = function _getTargetFromElement(element) { var selector = Util.getSelectorFromElement(element); return selector ? document.querySelector(selector) : null; }; Collapse._jQueryInterface = function _jQueryInterface(config) { return this.each(function () { var $this = $$$1(this); var data = $this.data(DATA_KEY); var _config = _objectSpread({}, Default, $this.data(), typeof config === 'object' && config ? config : {}); if (!data && _config.toggle && /show|hide/.test(config)) { _config.toggle = false; } if (!data) { data = new Collapse(this, _config); $this.data(DATA_KEY, data); } if (typeof config === 'string') { if (typeof data[config] === 'undefined') { throw new TypeError("No method named \"" + config + "\""); } data[config](); } }); }; _createClass(Collapse, null, [{ key: "VERSION", get: function get() { return VERSION; } }, { key: "Default", get: function get() { return Default; } }]); return Collapse; }(); /** * ------------------------------------------------------------------------ * Data Api implementation * ------------------------------------------------------------------------ */ $$$1(document).on(Event.CLICK_DATA_API, Selector.DATA_TOGGLE, function (event) { // preventDefault only for <a> elements (which change the URL) not inside the collapsible element if (event.currentTarget.tagName === 'A') { event.preventDefault(); } var $trigger = $$$1(this); var selector = Util.getSelectorFromElement(this); var selectors = [].slice.call(document.querySelectorAll(selector)); $$$1(selectors).each(function () { var $target = $$$1(this); var data = $target.data(DATA_KEY); var config = data ? 'toggle' : $trigger.data(); Collapse._jQueryInterface.call($target, config); }); }); /** * ------------------------------------------------------------------------ * jQuery * ------------------------------------------------------------------------ */ $$$1.fn[NAME] = Collapse._jQueryInterface; $$$1.fn[NAME].Constructor = Collapse; $$$1.fn[NAME].noConflict = function () { $$$1.fn[NAME] = JQUERY_NO_CONFLICT; return Collapse._jQueryInterface; }; return Collapse; }($); /** * -------------------------------------------------------------------------- * Bootstrap (v4.1.3): dropdown.js * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * -------------------------------------------------------------------------- */ var Dropdown = function ($$$1) { /** * ------------------------------------------------------------------------ * Constants * ------------------------------------------------------------------------ */ var NAME = 'dropdown'; var VERSION = '4.1.3'; var DATA_KEY = 'bs.dropdown'; var EVENT_KEY = "." + DATA_KEY; var DATA_API_KEY = '.data-api'; var JQUERY_NO_CONFLICT = $$$1.fn[NAME]; var ESCAPE_KEYCODE = 27; // KeyboardEvent.which value for Escape (Esc) key var SPACE_KEYCODE = 32; // KeyboardEvent.which value for space key var TAB_KEYCODE = 9; // KeyboardEvent.which value for tab key var ARROW_UP_KEYCODE = 38; // KeyboardEvent.which value for up arrow key var ARROW_DOWN_KEYCODE = 40; // KeyboardEvent.which value for down arrow key var RIGHT_MOUSE_BUTTON_WHICH = 3; // MouseEvent.which value for the right button (assuming a right-handed mouse) var REGEXP_KEYDOWN = new RegExp(ARROW_UP_KEYCODE + "|" + ARROW_DOWN_KEYCODE + "|" + ESCAPE_KEYCODE); var Event = { HIDE: "hide" + EVENT_KEY, HIDDEN: "hidden" + EVENT_KEY, SHOW: "show" + EVENT_KEY, SHOWN: "shown" + EVENT_KEY, CLICK: "click" + EVENT_KEY, CLICK_DATA_API: "click" + EVENT_KEY + DATA_API_KEY, KEYDOWN_DATA_API: "keydown" + EVENT_KEY + DATA_API_KEY, KEYUP_DATA_API: "keyup" + EVENT_KEY + DATA_API_KEY }; var ClassName = { DISABLED: 'disabled', SHOW: 'show', DROPUP: 'dropup', DROPRIGHT: 'dropright', DROPLEFT: 'dropleft', MENURIGHT: 'dropdown-menu-right', MENULEFT: 'dropdown-menu-left', POSITION_STATIC: 'position-static' }; var Selector = { DATA_TOGGLE: '[data-toggle="dropdown"]', FORM_CHILD: '.dropdown form', MENU: '.dropdown-menu', NAVBAR_NAV: '.navbar-nav', VISIBLE_ITEMS: '.dropdown-menu .dropdown-item:not(.disabled):not(:disabled)' }; var AttachmentMap = { TOP: 'top-start', TOPEND: 'top-end', BOTTOM: 'bottom-start', BOTTOMEND: 'bottom-end', RIGHT: 'right-start', RIGHTEND: 'right-end', LEFT: 'left-start', LEFTEND: 'left-end' }; var Default = { offset: 0, flip: true, boundary: 'scrollParent', reference: 'toggle', display: 'dynamic' }; var DefaultType = { offset: '(number|string|function)', flip: 'boolean', boundary: '(string|element)', reference: '(string|element)', display: 'string' /** * ------------------------------------------------------------------------ * Class Definition * ------------------------------------------------------------------------ */ }; var Dropdown = /*#__PURE__*/ function () { function Dropdown(element, config) { this._element = element; this._popper = null; this._config = this._getConfig(config); this._menu = this._getMenuElement(); this._inNavbar = this._detectNavbar(); this._addEventListeners(); } // Getters var _proto = Dropdown.prototype; // Public _proto.toggle = function toggle() { if (this._element.disabled || $$$1(this._element).hasClass(ClassName.DISABLED)) { return; } var parent = Dropdown._getParentFromElement(this._element); var isActive = $$$1(this._menu).hasClass(ClassName.SHOW); Dropdown._clearMenus(); if (isActive) { return; } var relatedTarget = { relatedTarget: this._element }; var showEvent = $$$1.Event(Event.SHOW, relatedTarget); $$$1(parent).trigger(showEvent); if (showEvent.isDefaultPrevented()) { return; } // Disable totally Popper.js for Dropdown in Navbar if (!this._inNavbar) { /** * Check for Popper dependency * Popper - https://popper.js.org */ if (typeof Popper === 'undefined') { throw new TypeError('Bootstrap dropdown require Popper.js (https://popper.js.org)'); } var referenceElement = this._element; if (this._config.reference === 'parent') { referenceElement = parent; } else if (Util.isElement(this._config.reference)) { referenceElement = this._config.reference; // Check if it's jQuery element if (typeof this._config.reference.jquery !== 'undefined') { referenceElement = this._config.reference[0]; } } // If boundary is not `scrollParent`, then set position to `static` // to allow the menu to "escape" the scroll parent's boundaries // https://github.com/twbs/bootstrap/issues/24251 if (this._config.boundary !== 'scrollParent') { $$$1(parent).addClass(ClassName.POSITION_STATIC); } this._popper = new Popper(referenceElement, this._menu, this._getPopperConfig()); } // If this is a touch-enabled device we add extra // empty mouseover listeners to the body's immediate children; // only needed because of broken event delegation on iOS // https://www.quirksmode.org/blog/archives/2014/02/mouse_event_bub.html if ('ontouchstart' in document.documentElement && $$$1(parent).closest(Selector.NAVBAR_NAV).length === 0) { $$$1(document.body).children().on('mouseover', null, $$$1.noop); } this._element.focus(); this._element.setAttribute('aria-expanded', true); $$$1(this._menu).toggleClass(ClassName.SHOW); $$$1(parent).toggleClass(ClassName.SHOW).trigger($$$1.Event(Event.SHOWN, relatedTarget)); }; _proto.dispose = function dispose() { $$$1.removeData(this._element, DATA_KEY); $$$1(this._element).off(EVENT_KEY); this._element = null; this._menu = null; if (this._popper !== null) { this._popper.destroy(); this._popper = null; } }; _proto.update = function update() { this._inNavbar = this._detectNavbar(); if (this._popper !== null) { this._popper.scheduleUpdate(); } }; // Private _proto._addEventListeners = function _addEventListeners() { var _this = this; $$$1(this._element).on(Event.CLICK, function (event) { event.preventDefault(); event.stopPropagation(); _this.toggle(); }); }; _proto._getConfig = function _getConfig(config) { config = _objectSpread({}, this.constructor.Default, $$$1(this._element).data(), config); Util.typeCheckConfig(NAME, config, this.constructor.DefaultType); return config; }; _proto._getMenuElement = function _getMenuElement() { if (!this._menu) { var parent = Dropdown._getParentFromElement(this._element); if (parent) { this._menu = parent.querySelector(Selector.MENU); } } return this._menu; }; _proto._getPlacement = function _getPlacement() { var $parentDropdown = $$$1(this._element.parentNode); var placement = AttachmentMap.BOTTOM; // Handle dropup if ($parentDropdown.hasClass(ClassName.DROPUP)) { placement = AttachmentMap.TOP; if ($$$1(this._menu).hasClass(ClassName.MENURIGHT)) { placement = AttachmentMap.TOPEND; } } else if ($parentDropdown.hasClass(ClassName.DROPRIGHT)) { placement = AttachmentMap.RIGHT; } else if ($parentDropdown.hasClass(ClassName.DROPLEFT)) { placement = AttachmentMap.LEFT; } else if ($$$1(this._menu).hasClass(ClassName.MENURIGHT)) { placement = AttachmentMap.BOTTOMEND; } return placement; }; _proto._detectNavbar = function _detectNavbar() { return $$$1(this._element).closest('.navbar').length > 0; }; _proto._getPopperConfig = function _getPopperConfig() { var _this2 = this; var offsetConf = {}; if (typeof this._config.offset === 'function') { offsetConf.fn = function (data) { data.offsets = _objectSpread({}, data.offsets, _this2._config.offset(data.offsets) || {}); return data; }; } else { offsetConf.offset = this._config.offset; } var popperConfig = { placement: this._getPlacement(), modifiers: { offset: offsetConf, flip: { enabled: this._config.flip }, preventOverflow: { boundariesElement: this._config.boundary } } // Disable Popper.js if we have a static display }; if (this._config.display === 'static') { popperConfig.modifiers.applyStyle = { enabled: false }; } return popperConfig; }; // Static Dropdown._jQueryInterface = function _jQueryInterface(config) { return this.each(function () { var data = $$$1(this).data(DATA_KEY); var _config = typeof config === 'object' ? config : null; if (!data) { data = new Dropdown(this, _config); $$$1(this).data(DATA_KEY, data); } if (typeof config === 'string') { if (typeof data[config] === 'undefined') { throw new TypeError("No method named \"" + config + "\""); } data[config](); } }); }; Dropdown._clearMenus = function _clearMenus(event) { if (event && (event.which === RIGHT_MOUSE_BUTTON_WHICH || event.type === 'keyup' && event.which !== TAB_KEYCODE)) { return; } var toggles = [].slice.call(document.querySelectorAll(Selector.DATA_TOGGLE)); for (var i = 0, len = toggles.length; i < len; i++) { var parent = Dropdown._getParentFromElement(toggles[i]); var context = $$$1(toggles[i]).data(DATA_KEY); var relatedTarget = { relatedTarget: toggles[i] }; if (event && event.type === 'click') { relatedTarget.clickEvent = event; } if (!context) { continue; } var dropdownMenu = context._menu; if (!$$$1(parent).hasClass(ClassName.SHOW)) { continue; } if (event && (event.type === 'click' && /input|textarea/i.test(event.target.tagName) || event.type === 'keyup' && event.which === TAB_KEYCODE) && $$$1.contains(parent, event.target)) { continue; } var hideEvent = $$$1.Event(Event.HIDE, relatedTarget); $$$1(parent).trigger(hideEvent); if (hideEvent.isDefaultPrevented()) { continue; } // If this is a touch-enabled device we remove the extra // empty mouseover listeners we added for iOS support if ('ontouchstart' in document.documentElement) { $$$1(document.body).children().off('mouseover', null, $$$1.noop); } toggles[i].setAttribute('aria-expanded', 'false'); $$$1(dropdownMenu).removeClass(ClassName.SHOW); $$$1(parent).removeClass(ClassName.SHOW).trigger($$$1.Event(Event.HIDDEN, relatedTarget)); } }; Dropdown._getParentFromElement = function _getParentFromElement(element) { var parent; var selector = Util.getSelectorFromElement(element); if (selector) { parent = document.querySelector(selector); } return parent || element.parentNode; }; // eslint-disable-next-line complexity Dropdown._dataApiKeydownHandler = function _dataApiKeydownHandler(event) { // If not input/textarea: // - And not a key in REGEXP_KEYDOWN => not a dropdown command // If input/textarea: // - If space key => not a dropdown command // - If key is other than escape // - If key is not up or down => not a dropdown command // - If trigger inside the menu => not a dropdown command if (/input|textarea/i.test(event.target.tagName) ? event.which === SPACE_KEYCODE || event.which !== ESCAPE_KEYCODE && (event.which !== ARROW_DOWN_KEYCODE && event.which !== ARROW_UP_KEYCODE || $$$1(event.target).closest(Selector.MENU).length) : !REGEXP_KEYDOWN.test(event.which)) { return; } event.preventDefault(); event.stopPropagation(); if (this.disabled || $$$1(this).hasClass(ClassName.DISABLED)) { return; } var parent = Dropdown._getParentFromElement(this); var isActive = $$$1(parent).hasClass(ClassName.SHOW); if (!isActive && (event.which !== ESCAPE_KEYCODE || event.which !== SPACE_KEYCODE) || isActive && (event.which === ESCAPE_KEYCODE || event.which === SPACE_KEYCODE)) { if (event.which === ESCAPE_KEYCODE) { var toggle = parent.querySelector(Selector.DATA_TOGGLE); $$$1(toggle).trigger('focus'); } $$$1(this).trigger('click'); return; } var items = [].slice.call(parent.querySelectorAll(Selector.VISIBLE_ITEMS)); if (items.length === 0) { return; } var index = items.indexOf(event.target); if (event.which === ARROW_UP_KEYCODE && index > 0) { // Up index--; } if (event.which === ARROW_DOWN_KEYCODE && index < items.length - 1) { // Down index++; } if (index < 0) { index = 0; } items[index].focus(); }; _createClass(Dropdown, null, [{ key: "VERSION", get: function get() { return VERSION; } }, { key: "Default", get: function get() { return Default; } }, { key: "DefaultType", get: function get() { return DefaultType; } }]); return Dropdown; }(); /** * ------------------------------------------------------------------------ * Data Api implementation * ------------------------------------------------------------------------ */ $$$1(document).on(Event.KEYDOWN_DATA_API, Selector.DATA_TOGGLE, Dropdown._dataApiKeydownHandler).on(Event.KEYDOWN_DATA_API, Selector.MENU, Dropdown._dataApiKeydownHandler).on(Event.CLICK_DATA_API + " " + Event.KEYUP_DATA_API, Dropdown._clearMenus).on(Event.CLICK_DATA_API, Selector.DATA_TOGGLE, function (event) { event.preventDefault(); event.stopPropagation(); Dropdown._jQueryInterface.call($$$1(this), 'toggle'); }).on(Event.CLICK_DATA_API, Selector.FORM_CHILD, function (e) { e.stopPropagation(); }); /** * ------------------------------------------------------------------------ * jQuery * ------------------------------------------------------------------------ */ $$$1.fn[NAME] = Dropdown._jQueryInterface; $$$1.fn[NAME].Constructor = Dropdown; $$$1.fn[NAME].noConflict = function () { $$$1.fn[NAME] = JQUERY_NO_CONFLICT; return Dropdown._jQueryInterface; }; return Dropdown; }($, Popper); /** * -------------------------------------------------------------------------- * Bootstrap (v4.1.3): modal.js * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * -------------------------------------------------------------------------- */ var Modal = function ($$$1) { /** * ------------------------------------------------------------------------ * Constants * ------------------------------------------------------------------------ */ var NAME = 'modal'; var VERSION = '4.1.3'; var DATA_KEY = 'bs.modal'; var EVENT_KEY = "." + DATA_KEY; var DATA_API_KEY = '.data-api'; var JQUERY_NO_CONFLICT = $$$1.fn[NAME]; var ESCAPE_KEYCODE = 27; // KeyboardEvent.which value for Escape (Esc) key var Default = { backdrop: true, keyboard: true, focus: true, show: true }; var DefaultType = { backdrop: '(boolean|string)', keyboard: 'boolean', focus: 'boolean', show: 'boolean' }; var Event = { HIDE: "hide" + EVENT_KEY, HIDDEN: "hidden" + EVENT_KEY, SHOW: "show" + EVENT_KEY, SHOWN: "shown" + EVENT_KEY, FOCUSIN: "focusin" + EVENT_KEY, RESIZE: "resize" + EVENT_KEY, CLICK_DISMISS: "click.dismiss" + EVENT_KEY, KEYDOWN_DISMISS: "keydown.dismiss" + EVENT_KEY, MOUSEUP_DISMISS: "mouseup.dismiss" + EVENT_KEY, MOUSEDOWN_DISMISS: "mousedown.dismiss" + EVENT_KEY, CLICK_DATA_API: "click" + EVENT_KEY + DATA_API_KEY }; var ClassName = { SCROLLBAR_MEASURER: 'modal-scrollbar-measure', BACKDROP: 'modal-backdrop', OPEN: 'modal-open', FADE: 'fade', SHOW: 'show' }; var Selector = { DIALOG: '.modal-dialog', DATA_TOGGLE: '[data-toggle="modal"]', DATA_DISMISS: '[data-dismiss="modal"]', FIXED_CONTENT: '.fixed-top, .fixed-bottom, .is-fixed, .sticky-top', STICKY_CONTENT: '.sticky-top' /** * ------------------------------------------------------------------------ * Class Definition * ------------------------------------------------------------------------ */ }; var Modal = /*#__PURE__*/ function () { function Modal(element, config) { this._config = this._getConfig(config); this._element = element; this._dialog = element.querySelector(Selector.DIALOG); this._backdrop = null; this._isShown = false; this._isBodyOverflowing = false; this._ignoreBackdropClick = false; this._scrollbarWidth = 0; } // Getters var _proto = Modal.prototype; // Public _proto.toggle = function toggle(relatedTarget) { return this._isShown ? this.hide() : this.show(relatedTarget); }; _proto.show = function show(relatedTarget) { var _this = this; if (this._isTransitioning || this._isShown) { return; } if ($$$1(this._element).hasClass(ClassName.FADE)) { this._isTransitioning = true; } var showEvent = $$$1.Event(Event.SHOW, { relatedTarget: relatedTarget }); $$$1(this._element).trigger(showEvent); if (this._isShown || showEvent.isDefaultPrevented()) { return; } this._isShown = true; this._checkScrollbar(); this._setScrollbar(); this._adjustDialog(); $$$1(document.body).addClass(ClassName.OPEN); this._setEscapeEvent(); this._setResizeEvent(); $$$1(this._element).on(Event.CLICK_DISMISS, Selector.DATA_DISMISS, function (event) { return _this.hide(event); }); $$$1(this._dialog).on(Event.MOUSEDOWN_DISMISS, function () { $$$1(_this._element).one(Event.MOUSEUP_DISMISS, function (event) { if ($$$1(event.target).is(_this._element)) { _this._ignoreBackdropClick = true; } }); }); this._showBackdrop(function () { return _this._showElement(relatedTarget); }); }; _proto.hide = function hide(event) { var _this2 = this; if (event) { event.preventDefault(); } if (this._isTransitioning || !this._isShown) { return; } var hideEvent = $$$1.Event(Event.HIDE); $$$1(this._element).trigger(hideEvent); if (!this._isShown || hideEvent.isDefaultPrevented()) { return; } this._isShown = false; var transition = $$$1(this._element).hasClass(ClassName.FADE); if (transition) { this._isTransitioning = true; } this._setEscapeEvent(); this._setResizeEvent(); $$$1(document).off(Event.FOCUSIN); $$$1(this._element).removeClass(ClassName.SHOW); $$$1(this._element).off(Event.CLICK_DISMISS); $$$1(this._dialog).off(Event.MOUSEDOWN_DISMISS); if (transition) { var transitionDuration = Util.getTransitionDurationFromElement(this._element); $$$1(this._element).one(Util.TRANSITION_END, function (event) { return _this2._hideModal(event); }).emulateTransitionEnd(transitionDuration); } else { this._hideModal(); } }; _proto.dispose = function dispose() { $$$1.removeData(this._element, DATA_KEY); $$$1(window, document, this._element, this._backdrop).off(EVENT_KEY); this._config = null; this._element = null; this._dialog = null; this._backdrop = null; this._isShown = null; this._isBodyOverflowing = null; this._ignoreBackdropClick = null; this._scrollbarWidth = null; }; _proto.handleUpdate = function handleUpdate() { this._adjustDialog(); }; // Private _proto._getConfig = function _getConfig(config) { config = _objectSpread({}, Default, config); Util.typeCheckConfig(NAME, config, DefaultType); return config; }; _proto._showElement = function _showElement(relatedTarget) { var _this3 = this; var transition = $$$1(this._element).hasClass(ClassName.FADE); if (!this._element.parentNode || this._element.parentNode.nodeType !== Node.ELEMENT_NODE) { // Don't move modal's DOM position document.body.appendChild(this._element); } this._element.style.display = 'block'; this._element.removeAttribute('aria-hidden'); this._element.scrollTop = 0; if (transition) { Util.reflow(this._element); } $$$1(this._element).addClass(ClassName.SHOW); if (this._config.focus) { this._enforceFocus(); } var shownEvent = $$$1.Event(Event.SHOWN, { relatedTarget: relatedTarget }); var transitionComplete = function transitionComplete() { if (_this3._config.focus) { _this3._element.focus(); } _this3._isTransitioning = false; $$$1(_this3._element).trigger(shownEvent); }; if (transition) { var transitionDuration = Util.getTransitionDurationFromElement(this._element); $$$1(this._dialog).one(Util.TRANSITION_END, transitionComplete).emulateTransitionEnd(transitionDuration); } else { transitionComplete(); } }; _proto._enforceFocus = function _enforceFocus() { var _this4 = this; $$$1(document).off(Event.FOCUSIN) // Guard against infinite focus loop .on(Event.FOCUSIN, function (event) { if (document !== event.target && _this4._element !== event.target && $$$1(_this4._element).has(event.target).length === 0) { _this4._element.focus(); } }); }; _proto._setEscapeEvent = function _setEscapeEvent() { var _this5 = this; if (this._isShown && this._config.keyboard) { $$$1(this._element).on(Event.KEYDOWN_DISMISS, function (event) { if (event.which === ESCAPE_KEYCODE) { event.preventDefault(); _this5.hide(); } }); } else if (!this._isShown) { $$$1(this._element).off(Event.KEYDOWN_DISMISS); } }; _proto._setResizeEvent = function _setResizeEvent() { var _this6 = this; if (this._isShown) { $$$1(window).on(Event.RESIZE, function (event) { return _this6.handleUpdate(event); }); } else { $$$1(window).off(Event.RESIZE); } }; _proto._hideModal = function _hideModal() { var _this7 = this; this._element.style.display = 'none'; this._element.setAttribute('aria-hidden', true); this._isTransitioning = false; this._showBackdrop(function () { $$$1(document.body).removeClass(ClassName.OPEN); _this7._resetAdjustments(); _this7._resetScrollbar(); $$$1(_this7._element).trigger(Event.HIDDEN); }); }; _proto._removeBackdrop = function _removeBackdrop() { if (this._backdrop) { $$$1(this._backdrop).remove(); this._backdrop = null; } }; _proto._showBackdrop = function _showBackdrop(callback) { var _this8 = this; var animate = $$$1(this._element).hasClass(ClassName.FADE) ? ClassName.FADE : ''; if (this._isShown && this._config.backdrop) { this._backdrop = document.createElement('div'); this._backdrop.className = ClassName.BACKDROP; if (animate) { this._backdrop.classList.add(animate); } $$$1(this._backdrop).appendTo(document.body); $$$1(this._element).on(Event.CLICK_DISMISS, function (event) { if (_this8._ignoreBackdropClick) { _this8._ignoreBackdropClick = false; return; } if (event.target !== event.currentTarget) { return; } if (_this8._config.backdrop === 'static') { _this8._element.focus(); } else { _this8.hide(); } }); if (animate) { Util.reflow(this._backdrop); } $$$1(this._backdrop).addClass(ClassName.SHOW); if (!callback) { return; } if (!animate) { callback(); return; } var backdropTransitionDuration = Util.getTransitionDurationFromElement(this._backdrop); $$$1(this._backdrop).one(Util.TRANSITION_END, callback).emulateTransitionEnd(backdropTransitionDuration); } else if (!this._isShown && this._backdrop) { $$$1(this._backdrop).removeClass(ClassName.SHOW); var callbackRemove = function callbackRemove() { _this8._removeBackdrop(); if (callback) { callback(); } }; if ($$$1(this._element).hasClass(ClassName.FADE)) { var _backdropTransitionDuration = Util.getTransitionDurationFromElement(this._backdrop); $$$1(this._backdrop).one(Util.TRANSITION_END, callbackRemove).emulateTransitionEnd(_backdropTransitionDuration); } else { callbackRemove(); } } else if (callback) { callback(); } }; // ---------------------------------------------------------------------- // the following methods are used to handle overflowing modals // todo (fat): these should probably be refactored out of modal.js // ---------------------------------------------------------------------- _proto._adjustDialog = function _adjustDialog() { var isModalOverflowing = this._element.scrollHeight > document.documentElement.clientHeight; if (!this._isBodyOverflowing && isModalOverflowing) { this._element.style.paddingLeft = this._scrollbarWidth + "px"; } if (this._isBodyOverflowing && !isModalOverflowing) { this._element.style.paddingRight = this._scrollbarWidth + "px"; } }; _proto._resetAdjustments = function _resetAdjustments() { this._element.style.paddingLeft = ''; this._element.style.paddingRight = ''; }; _proto._checkScrollbar = function _checkScrollbar() { var rect = document.body.getBoundingClientRect(); this._isBodyOverflowing = rect.left + rect.right < window.innerWidth; this._scrollbarWidth = this._getScrollbarWidth(); }; _proto._setScrollbar = function _setScrollbar() { var _this9 = this; if (this._isBodyOverflowing) { // Note: DOMNode.style.paddingRight returns the actual value or '' if not set // while $(DOMNode).css('padding-right') returns the calculated value or 0 if not set var fixedContent = [].slice.call(document.querySelectorAll(Selector.FIXED_CONTENT)); var stickyContent = [].slice.call(document.querySelectorAll(Selector.STICKY_CONTENT)); // Adjust fixed content padding $$$1(fixedContent).each(function (index, element) { var actualPadding = element.style.paddingRight; var calculatedPadding = $$$1(element).css('padding-right'); $$$1(element).data('padding-right', actualPadding).css('padding-right', parseFloat(calculatedPadding) + _this9._scrollbarWidth + "px"); }); // Adjust sticky content margin $$$1(stickyContent).each(function (index, element) { var actualMargin = element.style.marginRight; var calculatedMargin = $$$1(element).css('margin-right'); $$$1(element).data('margin-right', actualMargin).css('margin-right', parseFloat(calculatedMargin) - _this9._scrollbarWidth + "px"); }); // Adjust body padding var actualPadding = document.body.style.paddingRight; var calculatedPadding = $$$1(document.body).css('padding-right'); $$$1(document.body).data('padding-right', actualPadding).css('padding-right', parseFloat(calculatedPadding) + this._scrollbarWidth + "px"); } }; _proto._resetScrollbar = function _resetScrollbar() { // Restore fixed content padding var fixedContent = [].slice.call(document.querySelectorAll(Selector.FIXED_CONTENT)); $$$1(fixedContent).each(function (index, element) { var padding = $$$1(element).data('padding-right'); $$$1(element).removeData('padding-right'); element.style.paddingRight = padding ? padding : ''; }); // Restore sticky content var elements = [].slice.call(document.querySelectorAll("" + Selector.STICKY_CONTENT)); $$$1(elements).each(function (index, element) { var margin = $$$1(element).data('margin-right'); if (typeof margin !== 'undefined') { $$$1(element).css('margin-right', margin).removeData('margin-right'); } }); // Restore body padding var padding = $$$1(document.body).data('padding-right'); $$$1(document.body).removeData('padding-right'); document.body.style.paddingRight = padding ? padding : ''; }; _proto._getScrollbarWidth = function _getScrollbarWidth() { // thx d.walsh var scrollDiv = document.createElement('div'); scrollDiv.className = ClassName.SCROLLBAR_MEASURER; document.body.appendChild(scrollDiv); var scrollbarWidth = scrollDiv.getBoundingClientRect().width - scrollDiv.clientWidth; document.body.removeChild(scrollDiv); return scrollbarWidth; }; // Static Modal._jQueryInterface = function _jQueryInterface(config, relatedTarget) { return this.each(function () { var data = $$$1(this).data(DATA_KEY); var _config = _objectSpread({}, Default, $$$1(this).data(), typeof config === 'object' && config ? config : {}); if (!data) { data = new Modal(this, _config); $$$1(this).data(DATA_KEY, data); } if (typeof config === 'string') { if (typeof data[config] === 'undefined') { throw new TypeError("No method named \"" + config + "\""); } data[config](relatedTarget); } else if (_config.show) { data.show(relatedTarget); } }); }; _createClass(Modal, null, [{ key: "VERSION", get: function get() { return VERSION; } }, { key: "Default", get: function get() { return Default; } }]); return Modal; }(); /** * ------------------------------------------------------------------------ * Data Api implementation * ------------------------------------------------------------------------ */ $$$1(document).on(Event.CLICK_DATA_API, Selector.DATA_TOGGLE, function (event) { var _this10 = this; var target; var selector = Util.getSelectorFromElement(this); if (selector) { target = document.querySelector(selector); } var config = $$$1(target).data(DATA_KEY) ? 'toggle' : _objectSpread({}, $$$1(target).data(), $$$1(this).data()); if (this.tagName === 'A' || this.tagName === 'AREA') { event.preventDefault(); } var $target = $$$1(target).one(Event.SHOW, function (showEvent) { if (showEvent.isDefaultPrevented()) { // Only register focus restorer if modal will actually get shown return; } $target.one(Event.HIDDEN, function () { if ($$$1(_this10).is(':visible')) { _this10.focus(); } }); }); Modal._jQueryInterface.call($$$1(target), config, this); }); /** * ------------------------------------------------------------------------ * jQuery * ------------------------------------------------------------------------ */ $$$1.fn[NAME] = Modal._jQueryInterface; $$$1.fn[NAME].Constructor = Modal; $$$1.fn[NAME].noConflict = function () { $$$1.fn[NAME] = JQUERY_NO_CONFLICT; return Modal._jQueryInterface; }; return Modal; }($); /** * -------------------------------------------------------------------------- * Bootstrap (v4.1.3): tooltip.js * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * -------------------------------------------------------------------------- */ var Tooltip = function ($$$1) { /** * ------------------------------------------------------------------------ * Constants * ------------------------------------------------------------------------ */ var NAME = 'tooltip'; var VERSION = '4.1.3'; var DATA_KEY = 'bs.tooltip'; var EVENT_KEY = "." + DATA_KEY; var JQUERY_NO_CONFLICT = $$$1.fn[NAME]; var CLASS_PREFIX = 'bs-tooltip'; var BSCLS_PREFIX_REGEX = new RegExp("(^|\\s)" + CLASS_PREFIX + "\\S+", 'g'); var DefaultType = { animation: 'boolean', template: 'string', title: '(string|element|function)', trigger: 'string', delay: '(number|object)', html: 'boolean', selector: '(string|boolean)', placement: '(string|function)', offset: '(number|string)', container: '(string|element|boolean)', fallbackPlacement: '(string|array)', boundary: '(string|element)' }; var AttachmentMap = { AUTO: 'auto', TOP: 'top', RIGHT: 'right', BOTTOM: 'bottom', LEFT: 'left' }; var Default = { animation: true, template: '<div class="tooltip" role="tooltip">' + '<div class="arrow"></div>' + '<div class="tooltip-inner"></div></div>', trigger: 'hover focus', title: '', delay: 0, html: false, selector: false, placement: 'top', offset: 0, container: false, fallbackPlacement: 'flip', boundary: 'scrollParent' }; var HoverState = { SHOW: 'show', OUT: 'out' }; var Event = {
INSERTED: "inserted" + EVENT_KEY, CLICK: "click" + EVENT_KEY, FOCUSIN: "focusin" + EVENT_KEY, FOCUSOUT: "focusout" + EVENT_KEY, MOUSEENTER: "mouseenter" + EVENT_KEY, MOUSELEAVE: "mouseleave" + EVENT_KEY }; var ClassName = { FADE: 'fade', SHOW: 'show' }; var Selector = { TOOLTIP: '.tooltip', TOOLTIP_INNER: '.tooltip-inner', ARROW: '.arrow' }; var Trigger = { HOVER: 'hover', FOCUS: 'focus', CLICK: 'click', MANUAL: 'manual' /** * ------------------------------------------------------------------------ * Class Definition * ------------------------------------------------------------------------ */ }; var Tooltip = /*#__PURE__*/ function () { function Tooltip(element, config) { /** * Check for Popper dependency * Popper - https://popper.js.org */ if (typeof Popper === 'undefined') { throw new TypeError('Bootstrap tooltips require Popper.js (https://popper.js.org)'); } // private this._isEnabled = true; this._timeout = 0; this._hoverState = ''; this._activeTrigger = {}; this._popper = null; // Protected this.element = element; this.config = this._getConfig(config); this.tip = null; this._setListeners(); } // Getters var _proto = Tooltip.prototype; // Public _proto.enable = function enable() { this._isEnabled = true; }; _proto.disable = function disable() { this._isEnabled = false; }; _proto.toggleEnabled = function toggleEnabled() { this._isEnabled = !this._isEnabled; }; _proto.toggle = function toggle(event) { if (!this._isEnabled) { return; } if (event) { var dataKey = this.constructor.DATA_KEY; var context = $$$1(event.currentTarget).data(dataKey); if (!context) { context = new this.constructor(event.currentTarget, this._getDelegateConfig()); $$$1(event.currentTarget).data(dataKey, context); } context._activeTrigger.click = !context._activeTrigger.click; if (context._isWithActiveTrigger()) { context._enter(null, context); } else { context._leave(null, context); } } else { if ($$$1(this.getTipElement()).hasClass(ClassName.SHOW)) { this._leave(null, this); return; } this._enter(null, this); } }; _proto.dispose = function dispose() { clearTimeout(this._timeout); $$$1.removeData(this.element, this.constructor.DATA_KEY); $$$1(this.element).off(this.constructor.EVENT_KEY); $$$1(this.element).closest('.modal').off('hide.bs.modal'); if (this.tip) { $$$1(this.tip).remove(); } this._isEnabled = null; this._timeout = null; this._hoverState = null; this._activeTrigger = null; if (this._popper !== null) { this._popper.destroy(); } this._popper = null; this.element = null; this.config = null; this.tip = null; }; _proto.show = function show() { var _this = this; if ($$$1(this.element).css('display') === 'none') { throw new Error('Please use show on visible elements'); } var showEvent = $$$1.Event(this.constructor.Event.SHOW); if (this.isWithContent() && this._isEnabled) { $$$1(this.element).trigger(showEvent); var isInTheDom = $$$1.contains(this.element.ownerDocument.documentElement, this.element); if (showEvent.isDefaultPrevented() || !isInTheDom) { return; } var tip = this.getTipElement(); var tipId = Util.getUID(this.constructor.NAME); tip.setAttribute('id', tipId); this.element.setAttribute('aria-describedby', tipId); this.setContent(); if (this.config.animation) { $$$1(tip).addClass(ClassName.FADE); } var placement = typeof this.config.placement === 'function' ? this.config.placement.call(this, tip, this.element) : this.config.placement; var attachment = this._getAttachment(placement); this.addAttachmentClass(attachment); var container = this.config.container === false ? document.body : $$$1(document).find(this.config.container); $$$1(tip).data(this.constructor.DATA_KEY, this); if (!$$$1.contains(this.element.ownerDocument.documentElement, this.tip)) { $$$1(tip).appendTo(container); } $$$1(this.element).trigger(this.constructor.Event.INSERTED); this._popper = new Popper(this.element, tip, { placement: attachment, modifiers: { offset: { offset: this.config.offset }, flip: { behavior: this.config.fallbackPlacement }, arrow: { element: Selector.ARROW }, preventOverflow: { boundariesElement: this.config.boundary } }, onCreate: function onCreate(data) { if (data.originalPlacement !== data.placement) { _this._handlePopperPlacementChange(data); } }, onUpdate: function onUpdate(data) { _this._handlePopperPlacementChange(data); } }); $$$1(tip).addClass(ClassName.SHOW); // If this is a touch-enabled device we add extra // empty mouseover listeners to the body's immediate children; // only needed because of broken event delegation on iOS // https://www.quirksmode.org/blog/archives/2014/02/mouse_event_bub.html if ('ontouchstart' in document.documentElement) { $$$1(document.body).children().on('mouseover', null, $$$1.noop); } var complete = function complete() { if (_this.config.animation) { _this._fixTransition(); } var prevHoverState = _this._hoverState; _this._hoverState = null; $$$1(_this.element).trigger(_this.constructor.Event.SHOWN); if (prevHoverState === HoverState.OUT) { _this._leave(null, _this); } }; if ($$$1(this.tip).hasClass(ClassName.FADE)) { var transitionDuration = Util.getTransitionDurationFromElement(this.tip); $$$1(this.tip).one(Util.TRANSITION_END, complete).emulateTransitionEnd(transitionDuration); } else { complete(); } } }; _proto.hide = function hide(callback) { var _this2 = this; var tip = this.getTipElement(); var hideEvent = $$$1.Event(this.constructor.Event.HIDE); var complete = function complete() { if (_this2._hoverState !== HoverState.SHOW && tip.parentNode) { tip.parentNode.removeChild(tip); } _this2._cleanTipClass(); _this2.element.removeAttribute('aria-describedby'); $$$1(_this2.element).trigger(_this2.constructor.Event.HIDDEN); if (_this2._popper !== null) { _this2._popper.destroy(); } if (callback) { callback(); } }; $$$1(this.element).trigger(hideEvent); if (hideEvent.isDefaultPrevented()) { return; } $$$1(tip).removeClass(ClassName.SHOW); // If this is a touch-enabled device we remove the extra // empty mouseover listeners we added for iOS support if ('ontouchstart' in document.documentElement) { $$$1(document.body).children().off('mouseover', null, $$$1.noop); } this._activeTrigger[Trigger.CLICK] = false; this._activeTrigger[Trigger.FOCUS] = false; this._activeTrigger[Trigger.HOVER] = false; if ($$$1(this.tip).hasClass(ClassName.FADE)) { var transitionDuration = Util.getTransitionDurationFromElement(tip); $$$1(tip).one(Util.TRANSITION_END, complete).emulateTransitionEnd(transitionDuration); } else { complete(); } this._hoverState = ''; }; _proto.update = function update() { if (this._popper !== null) { this._popper.scheduleUpdate(); } }; // Protected _proto.isWithContent = function isWithContent() { return Boolean(this.getTitle()); }; _proto.addAttachmentClass = function addAttachmentClass(attachment) { $$$1(this.getTipElement()).addClass(CLASS_PREFIX + "-" + attachment); }; _proto.getTipElement = function getTipElement() { this.tip = this.tip || $$$1(this.config.template)[0]; return this.tip; }; _proto.setContent = function setContent() { var tip = this.getTipElement(); this.setElementContent($$$1(tip.querySelectorAll(Selector.TOOLTIP_INNER)), this.getTitle()); $$$1(tip).removeClass(ClassName.FADE + " " + ClassName.SHOW); }; _proto.setElementContent = function setElementContent($element, content) { var html = this.config.html; if (typeof content === 'object' && (content.nodeType || content.jquery)) { // Content is a DOM node or a jQuery if (html) { if (!$$$1(content).parent().is($element)) { $element.empty().append(content); } } else { $element.text($$$1(content).text()); } } else { $element[html ? 'html' : 'text'](content); } }; _proto.getTitle = function getTitle() { var title = this.element.getAttribute('data-original-title'); if (!title) { title = typeof this.config.title === 'function' ? this.config.title.call(this.element) : this.config.title; } return title; }; // Private _proto._getAttachment = function _getAttachment(placement) { return AttachmentMap[placement.toUpperCase()]; }; _proto._setListeners = function _setListeners() { var _this3 = this; var triggers = this.config.trigger.split(' '); triggers.forEach(function (trigger) { if (trigger === 'click') { $$$1(_this3.element).on(_this3.constructor.Event.CLICK, _this3.config.selector, function (event) { return _this3.toggle(event); }); } else if (trigger !== Trigger.MANUAL) { var eventIn = trigger === Trigger.HOVER ? _this3.constructor.Event.MOUSEENTER : _this3.constructor.Event.FOCUSIN; var eventOut = trigger === Trigger.HOVER ? _this3.constructor.Event.MOUSELEAVE : _this3.constructor.Event.FOCUSOUT; $$$1(_this3.element).on(eventIn, _this3.config.selector, function (event) { return _this3._enter(event); }).on(eventOut, _this3.config.selector, function (event) { return _this3._leave(event); }); } $$$1(_this3.element).closest('.modal').on('hide.bs.modal', function () { return _this3.hide(); }); }); if (this.config.selector) { this.config = _objectSpread({}, this.config, { trigger: 'manual', selector: '' }); } else { this._fixTitle(); } }; _proto._fixTitle = function _fixTitle() { var titleType = typeof this.element.getAttribute('data-original-title'); if (this.element.getAttribute('title') || titleType !== 'string') { this.element.setAttribute('data-original-title', this.element.getAttribute('title') || ''); this.element.setAttribute('title', ''); } }; _proto._enter = function _enter(event, context) { var dataKey = this.constructor.DATA_KEY; context = context || $$$1(event.currentTarget).data(dataKey); if (!context) { context = new this.constructor(event.currentTarget, this._getDelegateConfig()); $$$1(event.currentTarget).data(dataKey, context); } if (event) { context._activeTrigger[event.type === 'focusin' ? Trigger.FOCUS : Trigger.HOVER] = true; } if ($$$1(context.getTipElement()).hasClass(ClassName.SHOW) || context._hoverState === HoverState.SHOW) { context._hoverState = HoverState.SHOW; return; } clearTimeout(context._timeout); context._hoverState = HoverState.SHOW; if (!context.config.delay || !context.config.delay.show) { context.show(); return; } context._timeout = setTimeout(function () { if (context._hoverState === HoverState.SHOW) { context.show(); } }, context.config.delay.show); }; _proto._leave = function _leave(event, context) { var dataKey = this.constructor.DATA_KEY; context = context || $$$1(event.currentTarget).data(dataKey); if (!context) { context = new this.constructor(event.currentTarget, this._getDelegateConfig()); $$$1(event.currentTarget).data(dataKey, context); } if (event) { context._activeTrigger[event.type === 'focusout' ? Trigger.FOCUS : Trigger.HOVER] = false; } if (context._isWithActiveTrigger()) { return; } clearTimeout(context._timeout); context._hoverState = HoverState.OUT; if (!context.config.delay || !context.config.delay.hide) { context.hide(); return; } context._timeout = setTimeout(function () { if (context._hoverState === HoverState.OUT) { context.hide(); } }, context.config.delay.hide); }; _proto._isWithActiveTrigger = function _isWithActiveTrigger() { for (var trigger in this._activeTrigger) { if (this._activeTrigger[trigger]) { return true; } } return false; }; _proto._getConfig = function _getConfig(config) { config = _objectSpread({}, this.constructor.Default, $$$1(this.element).data(), typeof config === 'object' && config ? config : {}); if (typeof config.delay === 'number') { config.delay = { show: config.delay, hide: config.delay }; } if (typeof config.title === 'number') { config.title = config.title.toString(); } if (typeof config.content === 'number') { config.content = config.content.toString(); } Util.typeCheckConfig(NAME, config, this.constructor.DefaultType); return config; }; _proto._getDelegateConfig = function _getDelegateConfig() { var config = {}; if (this.config) { for (var key in this.config) { if (this.constructor.Default[key] !== this.config[key]) { config[key] = this.config[key]; } } } return config; }; _proto._cleanTipClass = function _cleanTipClass() { var $tip = $$$1(this.getTipElement()); var tabClass = $tip.attr('class').match(BSCLS_PREFIX_REGEX); if (tabClass !== null && tabClass.length) { $tip.removeClass(tabClass.join('')); } }; _proto._handlePopperPlacementChange = function _handlePopperPlacementChange(popperData) { var popperInstance = popperData.instance; this.tip = popperInstance.popper; this._cleanTipClass(); this.addAttachmentClass(this._getAttachment(popperData.placement)); }; _proto._fixTransition = function _fixTransition() { var tip = this.getTipElement(); var initConfigAnimation = this.config.animation; if (tip.getAttribute('x-placement') !== null) { return; } $$$1(tip).removeClass(ClassName.FADE); this.config.animation = false; this.hide(); this.show(); this.config.animation = initConfigAnimation; }; // Static Tooltip._jQueryInterface = function _jQueryInterface(config) { return this.each(function () { var data = $$$1(this).data(DATA_KEY); var _config = typeof config === 'object' && config; if (!data && /dispose|hide/.test(config)) { return; } if (!data) { data = new Tooltip(this, _config); $$$1(this).data(DATA_KEY, data); } if (typeof config === 'string') { if (typeof data[config] === 'undefined') { throw new TypeError("No method named \"" + config + "\""); } data[config](); } }); }; _createClass(Tooltip, null, [{ key: "VERSION", get: function get() { return VERSION; } }, { key: "Default", get: function get() { return Default; } }, { key: "NAME", get: function get() { return NAME; } }, { key: "DATA_KEY", get: function get() { return DATA_KEY; } }, { key: "Event", get: function get() { return Event; } }, { key: "EVENT_KEY", get: function get() { return EVENT_KEY; } }, { key: "DefaultType", get: function get() { return DefaultType; } }]); return Tooltip; }(); /** * ------------------------------------------------------------------------ * jQuery * ------------------------------------------------------------------------ */ $$$1.fn[NAME] = Tooltip._jQueryInterface; $$$1.fn[NAME].Constructor = Tooltip; $$$1.fn[NAME].noConflict = function () { $$$1.fn[NAME] = JQUERY_NO_CONFLICT; return Tooltip._jQueryInterface; }; return Tooltip; }($, Popper); /** * -------------------------------------------------------------------------- * Bootstrap (v4.1.3): popover.js * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * -------------------------------------------------------------------------- */ var Popover = function ($$$1) { /** * ------------------------------------------------------------------------ * Constants * ------------------------------------------------------------------------ */ var NAME = 'popover'; var VERSION = '4.1.3'; var DATA_KEY = 'bs.popover'; var EVENT_KEY = "." + DATA_KEY; var JQUERY_NO_CONFLICT = $$$1.fn[NAME]; var CLASS_PREFIX = 'bs-popover'; var BSCLS_PREFIX_REGEX = new RegExp("(^|\\s)" + CLASS_PREFIX + "\\S+", 'g'); var Default = _objectSpread({}, Tooltip.Default, { placement: 'right', trigger: 'click', content: '', template: '<div class="popover" role="tooltip">' + '<div class="arrow"></div>' + '<h3 class="popover-header"></h3>' + '<div class="popover-body"></div></div>' }); var DefaultType = _objectSpread({}, Tooltip.DefaultType, { content: '(string|element|function)' }); var ClassName = { FADE: 'fade', SHOW: 'show' }; var Selector = { TITLE: '.popover-header', CONTENT: '.popover-body' }; var Event = { HIDE: "hide" + EVENT_KEY, HIDDEN: "hidden" + EVENT_KEY, SHOW: "show" + EVENT_KEY, SHOWN: "shown" + EVENT_KEY, INSERTED: "inserted" + EVENT_KEY, CLICK: "click" + EVENT_KEY, FOCUSIN: "focusin" + EVENT_KEY, FOCUSOUT: "focusout" + EVENT_KEY, MOUSEENTER: "mouseenter" + EVENT_KEY, MOUSELEAVE: "mouseleave" + EVENT_KEY /** * ------------------------------------------------------------------------ * Class Definition * ------------------------------------------------------------------------ */ }; var Popover = /*#__PURE__*/ function (_Tooltip) { _inheritsLoose(Popover, _Tooltip); function Popover() { return _Tooltip.apply(this, arguments) || this; } var _proto = Popover.prototype; // Overrides _proto.isWithContent = function isWithContent() { return this.getTitle() || this._getContent(); }; _proto.addAttachmentClass = function addAttachmentClass(attachment) { $$$1(this.getTipElement()).addClass(CLASS_PREFIX + "-" + attachment); }; _proto.getTipElement = function getTipElement() { this.tip = this.tip || $$$1(this.config.template)[0]; return this.tip; }; _proto.setContent = function setContent() { var $tip = $$$1(this.getTipElement()); // We use append for html objects to maintain js events this.setElementContent($tip.find(Selector.TITLE), this.getTitle()); var content = this._getContent(); if (typeof content === 'function') { content = content.call(this.element); } this.setElementContent($tip.find(Selector.CONTENT), content); $tip.removeClass(ClassName.FADE + " " + ClassName.SHOW); }; // Private _proto._getContent = function _getContent() { return this.element.getAttribute('data-content') || this.config.content; }; _proto._cleanTipClass = function _cleanTipClass() { var $tip = $$$1(this.getTipElement()); var tabClass = $tip.attr('class').match(BSCLS_PREFIX_REGEX); if (tabClass !== null && tabClass.length > 0) { $tip.removeClass(tabClass.join('')); } }; // Static Popover._jQueryInterface = function _jQueryInterface(config) { return this.each(function () { var data = $$$1(this).data(DATA_KEY); var _config = typeof config === 'object' ? config : null; if (!data && /destroy|hide/.test(config)) { return; } if (!data) { data = new Popover(this, _config); $$$1(this).data(DATA_KEY, data); } if (typeof config === 'string') { if (typeof data[config] === 'undefined') { throw new TypeError("No method named \"" + config + "\""); } data[config](); } }); }; _createClass(Popover, null, [{ key: "VERSION", // Getters get: function get() { return VERSION; } }, { key: "Default", get: function get() { return Default; } }, { key: "NAME", get: function get() { return NAME; } }, { key: "DATA_KEY", get: function get() { return DATA_KEY; } }, { key: "Event", get: function get() { return Event; } }, { key: "EVENT_KEY", get: function get() { return EVENT_KEY; } }, { key: "DefaultType", get: function get() { return DefaultType; } }]); return Popover; }(Tooltip); /** * ------------------------------------------------------------------------ * jQuery * ------------------------------------------------------------------------ */ $$$1.fn[NAME] = Popover._jQueryInterface; $$$1.fn[NAME].Constructor = Popover; $$$1.fn[NAME].noConflict = function () { $$$1.fn[NAME] = JQUERY_NO_CONFLICT; return Popover._jQueryInterface; }; return Popover; }($); /** * -------------------------------------------------------------------------- * Bootstrap (v4.1.3): scrollspy.js * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * -------------------------------------------------------------------------- */ var ScrollSpy = function ($$$1) { /** * ------------------------------------------------------------------------ * Constants * ------------------------------------------------------------------------ */ var NAME = 'scrollspy'; var VERSION = '4.1.3'; var DATA_KEY = 'bs.scrollspy'; var EVENT_KEY = "." + DATA_KEY; var DATA_API_KEY = '.data-api'; var JQUERY_NO_CONFLICT = $$$1.fn[NAME]; var Default = { offset: 10, method: 'auto', target: '' }; var DefaultType = { offset: 'number', method: 'string', target: '(string|element)' }; var Event = { ACTIVATE: "activate" + EVENT_KEY, SCROLL: "scroll" + EVENT_KEY, LOAD_DATA_API: "load" + EVENT_KEY + DATA_API_KEY }; var ClassName = { DROPDOWN_ITEM: 'dropdown-item', DROPDOWN_MENU: 'dropdown-menu', ACTIVE: 'active' }; var Selector = { DATA_SPY: '[data-spy="scroll"]', ACTIVE: '.active', NAV_LIST_GROUP: '.nav, .list-group', NAV_LINKS: '.nav-link', NAV_ITEMS: '.nav-item', LIST_ITEMS: '.list-group-item', DROPDOWN: '.dropdown', DROPDOWN_ITEMS: '.dropdown-item', DROPDOWN_TOGGLE: '.dropdown-toggle' }; var OffsetMethod = { OFFSET: 'offset', POSITION: 'position' /** * ------------------------------------------------------------------------ * Class Definition * ------------------------------------------------------------------------ */ }; var ScrollSpy = /*#__PURE__*/ function () { function ScrollSpy(element, config) { var _this = this; this._element = element; this._scrollElement = element.tagName === 'BODY' ? window : element; this._config = this._getConfig(config); this._selector = this._config.target + " " + Selector.NAV_LINKS + "," + (this._config.target + " " + Selector.LIST_ITEMS + ",") + (this._config.target + " " + Selector.DROPDOWN_ITEMS); this._offsets = []; this._targets = []; this._activeTarget = null; this._scrollHeight = 0; $$$1(this._scrollElement).on(Event.SCROLL, function (event) { return _this._process(event); }); this.refresh(); this._process(); } // Getters var _proto = ScrollSpy.prototype; // Public _proto.refresh = function refresh() { var _this2 = this; var autoMethod = this._scrollElement === this._scrollElement.window ? OffsetMethod.OFFSET : OffsetMethod.POSITION; var offsetMethod = this._config.method === 'auto' ? autoMethod : this._config.method; var offsetBase = offsetMethod === OffsetMethod.POSITION ? this._getScrollTop() : 0; this._offsets = []; this._targets = []; this._scrollHeight = this._getScrollHeight(); var targets = [].slice.call(document.querySelectorAll(this._selector)); targets.map(function (element) { var target; var targetSelector = Util.getSelectorFromElement(element); if (targetSelector) { target = document.querySelector(targetSelector); } if (target) { var targetBCR = target.getBoundingClientRect(); if (targetBCR.width || targetBCR.height) { // TODO (fat): remove sketch reliance on jQuery position/offset return [$$$1(target)[offsetMethod]().top + offsetBase, targetSelector]; } } return null; }).filter(function (item) { return item; }).sort(function (a, b) { return a[0] - b[0]; }).forEach(function (item) { _this2._offsets.push(item[0]); _this2._targets.push(item[1]); }); }; _proto.dispose = function dispose() { $$$1.removeData(this._element, DATA_KEY); $$$1(this._scrollElement).off(EVENT_KEY); this._element = null; this._scrollElement = null; this._config = null; this._selector = null; this._offsets = null; this._targets = null; this._activeTarget = null; this._scrollHeight = null; }; // Private _proto._getConfig = function _getConfig(config) { config = _objectSpread({}, Default, typeof config === 'object' && config ? config : {}); if (typeof config.target !== 'string') { var id = $$$1(config.target).attr('id'); if (!id) { id = Util.getUID(NAME); $$$1(config.target).attr('id', id); } config.target = "#" + id; } Util.typeCheckConfig(NAME, config, DefaultType); return config; }; _proto._getScrollTop = function _getScrollTop() { return this._scrollElement === window ? this._scrollElement.pageYOffset : this._scrollElement.scrollTop; }; _proto._getScrollHeight = function _getScrollHeight() { return this._scrollElement.scrollHeight || Math.max(document.body.scrollHeight, document.documentElement.scrollHeight); }; _proto._getOffsetHeight = function _getOffsetHeight() { return this._scrollElement === window ? window.innerHeight : this._scrollElement.getBoundingClientRect().height; }; _proto._process = function _process() { var scrollTop = this._getScrollTop() + this._config.offset; var scrollHeight = this._getScrollHeight(); var maxScroll = this._config.offset + scrollHeight - this._getOffsetHeight(); if (this._scrollHeight !== scrollHeight) { this.refresh(); } if (scrollTop >= maxScroll) { var target = this._targets[this._targets.length - 1]; if (this._activeTarget !== target) { this._activate(target); } return; } if (this._activeTarget && scrollTop < this._offsets[0] && this._offsets[0] > 0) { this._activeTarget = null; this._clear(); return; } var offsetLength = this._offsets.length; for (var i = offsetLength; i--;) { var isActiveTarget = this._activeTarget !== this._targets[i] && scrollTop >= this._offsets[i] && (typeof this._offsets[i + 1] === 'undefined' || scrollTop < this._offsets[i + 1]); if (isActiveTarget) { this._activate(this._targets[i]); } } }; _proto._activate = function _activate(target) { this._activeTarget = target; this._clear(); var queries = this._selector.split(','); // eslint-disable-next-line arrow-body-style queries = queries.map(function (selector) { return selector + "[data-target=\"" + target + "\"]," + (selector + "[href=\"" + target + "\"]"); }); var $link = $$$1([].slice.call(document.querySelectorAll(queries.join(',')))); if ($link.hasClass(ClassName.DROPDOWN_ITEM)) { $link.closest(Selector.DROPDOWN).find(Selector.DROPDOWN_TOGGLE).addClass(ClassName.ACTIVE); $link.addClass(ClassName.ACTIVE); } else { // Set triggered link as active $link.addClass(ClassName.ACTIVE); // Set triggered links parents as active // With both <ul> and <nav> markup a parent is the previous sibling of any nav ancestor $link.parents(Selector.NAV_LIST_GROUP).prev(Selector.NAV_LINKS + ", " + Selector.LIST_ITEMS).addClass(ClassName.ACTIVE); // Handle special case when .nav-link is inside .nav-item $link.parents(Selector.NAV_LIST_GROUP).prev(Selector.NAV_ITEMS).children(Selector.NAV_LINKS).addClass(ClassName.ACTIVE); } $$$1(this._scrollElement).trigger(Event.ACTIVATE, { relatedTarget: target }); }; _proto._clear = function _clear() { var nodes = [].slice.call(document.querySelectorAll(this._selector)); $$$1(nodes).filter(Selector.ACTIVE).removeClass(ClassName.ACTIVE); }; // Static ScrollSpy._jQueryInterface = function _jQueryInterface(config) { return this.each(function () { var data = $$$1(this).data(DATA_KEY); var _config = typeof config === 'object' && config; if (!data) { data = new ScrollSpy(this, _config); $$$1(this).data(DATA_KEY, data); } if (typeof config === 'string') { if (typeof data[config] === 'undefined') { throw new TypeError("No method named \"" + config + "\""); } data[config](); } }); }; _createClass(ScrollSpy, null, [{ key: "VERSION", get: function get() { return VERSION; } }, { key: "Default", get: function get() { return Default; } }]); return ScrollSpy; }(); /** * ------------------------------------------------------------------------ * Data Api implementation * ------------------------------------------------------------------------ */ $$$1(window).on(Event.LOAD_DATA_API, function () { var scrollSpys = [].slice.call(document.querySelectorAll(Selector.DATA_SPY)); var scrollSpysLength = scrollSpys.length; for (var i = scrollSpysLength; i--;) { var $spy = $$$1(scrollSpys[i]); ScrollSpy._jQueryInterface.call($spy, $spy.data()); } }); /** * ------------------------------------------------------------------------ * jQuery * ------------------------------------------------------------------------ */ $$$1.fn[NAME] = ScrollSpy._jQueryInterface; $$$1.fn[NAME].Constructor = ScrollSpy; $$$1.fn[NAME].noConflict = function () { $$$1.fn[NAME] = JQUERY_NO_CONFLICT; return ScrollSpy._jQueryInterface; }; return ScrollSpy; }($); /** * -------------------------------------------------------------------------- * Bootstrap (v4.1.3): tab.js * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * -------------------------------------------------------------------------- */ var Tab = function ($$$1) { /** * ------------------------------------------------------------------------ * Constants * ------------------------------------------------------------------------ */ var NAME = 'tab'; var VERSION = '4.1.3'; var DATA_KEY = 'bs.tab'; var EVENT_KEY = "." + DATA_KEY; var DATA_API_KEY = '.data-api'; var JQUERY_NO_CONFLICT = $$$1.fn[NAME]; var Event = { HIDE: "hide" + EVENT_KEY, HIDDEN: "hidden" + EVENT_KEY, SHOW: "show" + EVENT_KEY, SHOWN: "shown" + EVENT_KEY, CLICK_DATA_API: "click" + EVENT_KEY + DATA_API_KEY }; var ClassName = { DROPDOWN_MENU: 'dropdown-menu', ACTIVE: 'active', DISABLED: 'disabled', FADE: 'fade', SHOW: 'show' }; var Selector = { DROPDOWN: '.dropdown', NAV_LIST_GROUP: '.nav, .list-group', ACTIVE: '.active', ACTIVE_UL: '> li > .active', DATA_TOGGLE: '[data-toggle="tab"], [data-toggle="pill"], [data-toggle="list"]', DROPDOWN_TOGGLE: '.dropdown-toggle', DROPDOWN_ACTIVE_CHILD: '> .dropdown-menu .active' /** * ------------------------------------------------------------------------ * Class Definition * ------------------------------------------------------------------------ */ }; var Tab = /*#__PURE__*/ function () { function Tab(element) { this._element = element; } // Getters var _proto = Tab.prototype; // Public _proto.show = function show() { var _this = this; if (this._element.parentNode && this._element.parentNode.nodeType === Node.ELEMENT_NODE && $$$1(this._element).hasClass(ClassName.ACTIVE) || $$$1(this._element).hasClass(ClassName.DISABLED)) { return; } var target; var previous; var listElement = $$$1(this._element).closest(Selector.NAV_LIST_GROUP)[0]; var selector = Util.getSelectorFromElement(this._element); if (listElement) { var itemSelector = listElement.nodeName === 'UL' ? Selector.ACTIVE_UL : Selector.ACTIVE; previous = $$$1.makeArray($$$1(listElement).find(itemSelector)); previous = previous[previous.length - 1]; } var hideEvent = $$$1.Event(Event.HIDE, { relatedTarget: this._element }); var showEvent = $$$1.Event(Event.SHOW, { relatedTarget: previous }); if (previous) { $$$1(previous).trigger(hideEvent); } $$$1(this._element).trigger(showEvent); if (showEvent.isDefaultPrevented() || hideEvent.isDefaultPrevented()) { return; } if (selector) { target = document.querySelector(selector); } this._activate(this._element, listElement); var complete = function complete() { var hiddenEvent = $$$1.Event(Event.HIDDEN, { relatedTarget: _this._element }); var shownEvent = $$$1.Event(Event.SHOWN, { relatedTarget: previous }); $$$1(previous).trigger(hiddenEvent); $$$1(_this._element).trigger(shownEvent); }; if (target) { this._activate(target, target.parentNode, complete); } else { complete(); } }; _proto.dispose = function dispose() { $$$1.removeData(this._element, DATA_KEY); this._element = null; }; // Private _proto._activate = function _activate(element, container, callback) { var _this2 = this; var activeElements; if (container.nodeName === 'UL') { activeElements = $$$1(container).find(Selector.ACTIVE_UL); } else { activeElements = $$$1(container).children(Selector.ACTIVE); } var active = activeElements[0]; var isTransitioning = callback && active && $$$1(active).hasClass(ClassName.FADE); var complete = function complete() { return _this2._transitionComplete(element, active, callback); }; if (active && isTransitioning) { var transitionDuration = Util.getTransitionDurationFromElement(active); $$$1(active).one(Util.TRANSITION_END, complete).emulateTransitionEnd(transitionDuration); } else { complete(); } }; _proto._transitionComplete = function _transitionComplete(element, active, callback) { if (active) { $$$1(active).removeClass(ClassName.SHOW + " " + ClassName.ACTIVE); var dropdownChild = $$$1(active.parentNode).find(Selector.DROPDOWN_ACTIVE_CHILD)[0]; if (dropdownChild) { $$$1(dropdownChild).removeClass(ClassName.ACTIVE); } if (active.getAttribute('role') === 'tab') { active.setAttribute('aria-selected', false); } } $$$1(element).addClass(ClassName.ACTIVE); if (element.getAttribute('role') === 'tab') { element.setAttribute('aria-selected', true); } Util.reflow(element); $$$1(element).addClass(ClassName.SHOW); if (element.parentNode && $$$1(element.parentNode).hasClass(ClassName.DROPDOWN_MENU)) { var dropdownElement = $$$1(element).closest(Selector.DROPDOWN)[0]; if (dropdownElement) { var dropdownToggleList = [].slice.call(dropdownElement.querySelectorAll(Selector.DROPDOWN_TOGGLE)); $$$1(dropdownToggleList).addClass(ClassName.ACTIVE); } element.setAttribute('aria-expanded', true); } if (callback) { callback(); } }; // Static Tab._jQueryInterface = function _jQueryInterface(config) { return this.each(function () { var $this = $$$1(this); var data = $this.data(DATA_KEY); if (!data) { data = new Tab(this); $this.data(DATA_KEY, data); } if (typeof config === 'string') { if (typeof data[config] === 'undefined') { throw new TypeError("No method named \"" + config + "\""); } data[config](); } }); }; _createClass(Tab, null, [{ key: "VERSION", get: function get() { return VERSION; } }]); return Tab; }(); /** * ------------------------------------------------------------------------ * Data Api implementation * ------------------------------------------------------------------------ */ $$$1(document).on(Event.CLICK_DATA_API, Selector.DATA_TOGGLE, function (event) { event.preventDefault(); Tab._jQueryInterface.call($$$1(this), 'show'); }); /** * ------------------------------------------------------------------------ * jQuery * ------------------------------------------------------------------------ */ $$$1.fn[NAME] = Tab._jQueryInterface; $$$1.fn[NAME].Constructor = Tab; $$$1.fn[NAME].noConflict = function () { $$$1.fn[NAME] = JQUERY_NO_CONFLICT; return Tab._jQueryInterface; }; return Tab; }($); /** * -------------------------------------------------------------------------- * Bootstrap (v4.1.3): index.js * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) * -------------------------------------------------------------------------- */ (function ($$$1) { if (typeof $$$1 === 'undefined') { throw new TypeError('Bootstrap\'s JavaScript requires jQuery. jQuery must be included before Bootstrap\'s JavaScript.'); } var version = $$$1.fn.jquery.split(' ')[0].split('.'); var minMajor = 1; var ltMajor = 2; var minMinor = 9; var minPatch = 1; var maxMajor = 4; if (version[0] < ltMajor && version[1] < minMinor || version[0] === minMajor && version[1] === minMinor && version[2] < minPatch || version[0] >= maxMajor) { throw new Error('Bootstrap\'s JavaScript requires at least jQuery v1.9.1 but less than v4.0.0'); } })($); exports.Util = Util; exports.Alert = Alert; exports.Button = Button; exports.Carousel = Carousel; exports.Collapse = Collapse; exports.Dropdown = Dropdown; exports.Modal = Modal; exports.Popover = Popover; exports.Scrollspy = ScrollSpy; exports.Tab = Tab; exports.Tooltip = Tooltip; Object.defineProperty(exports, '__esModule', { value: true }); }))); //# sourceMappingURL=bootstrap.js.map /***/ }), /* 6 */ /***/ (function(module, exports) { /*! Waypoints - 4.0.1 Copyright © 2011-2016 Caleb Troughton Licensed under the MIT license. https://github.com/imakewebthings/waypoints/blob/master/licenses.txt */ !function(){"use strict";function t(o){if(!o)throw new Error("No options passed to Waypoint constructor");if(!o.element)throw new Error("No element option passed to Waypoint constructor");if(!o.handler)throw new Error("No handler option passed to Waypoint constructor");this.key="waypoint-"+e,this.options=t.Adapter.extend({},t.defaults,o),this.element=this.options.element,this.adapter=new t.Adapter(this.element),this.callback=o.handler,this.axis=this.options.horizontal?"horizontal":"vertical",this.enabled=this.options.enabled,this.triggerPoint=null,this.group=t.Group.findOrCreate({name:this.options.group,axis:this.axis}),this.context=t.Context.findOrCreateByElement(this.options.context),t.offsetAliases[this.options.offset]&&(this.options.offset=t.offsetAliases[this.options.offset]),this.group.add(this),this.context.add(this),i[this.key]=this,e+=1}var e=0,i={};t.prototype.queueTrigger=function(t){this.group.queueTrigger(this,t)},t.prototype.trigger=function(t){this.enabled&&this.callback&&this.callback.apply(this,t)},t.prototype.destroy=function(){this.context.remove(this),this.group.remove(this),delete i[this.key]},t.prototype.disable=function(){return this.enabled=!1,this},t.prototype.enable=function(){return this.context.refresh(),this.enabled=!0,this},t.prototype.next=function(){return this.group.next(this)},t.prototype.previous=function(){return this.group.previous(this)},t.invokeAll=function(t){var e=[];for(var o in i)e.push(i[o]);for(var n=0,r=e.length;r>n;n++)e[n][t]()},t.destroyAll=function(){t.invokeAll("destroy")},t.disableAll=function(){t.invokeAll("disable")},t.enableAll=function(){t.Context.refreshAll();for(var e in i)i[e].enabled=!0;return this},t.refreshAll=function(){t.Context.refreshAll()},t.viewportHeight=function(){return window.innerHeight||document.documentElement.clientHeight},t.viewportWidth=function(){return document.documentElement.clientWidth},t.adapters=[],t.defaults={context:window,continuous:!0,enabled:!0,group:"default",horizontal:!1,offset:0},t.offsetAliases={"bottom-in-view":function(){return this.context.innerHeight()-this.adapter.outerHeight()},"right-in-view":function(){return this.context.innerWidth()-this.adapter.outerWidth()}},window.Waypoint=t}(),function(){"use strict";function t(t){window.setTimeout(t,1e3/60)}function e(t){this.element=t,this.Adapter=n.Adapter,this.adapter=new this.Adapter(t),this.key="waypoint-context-"+i,this.didScroll=!1,this.didResize=!1,this.oldScroll={x:this.adapter.scrollLeft(),y:this.adapter.scrollTop()},this.waypoints={vertical:{},horizontal:{}},t.waypointContextKey=this.key,o[t.waypointContextKey]=this,i+=1,n.windowContext||(n.windowContext=!0,n.windowContext=new e(window)),this.createThrottledScrollHandler(),this.createThrottledResizeHandler()}var i=0,o={},n=window.Waypoint,r=window.onload;e.prototype.add=function(t){var e=t.options.horizontal?"horizontal":"vertical";this.waypoints[e][t.key]=t,this.refresh()},e.prototype.checkEmpty=function(){var t=this.Adapter.isEmptyObject(this.waypoints.horizontal),e=this.Adapter.isEmptyObject(this.waypoints.vertical),i=this.element==this.element.window;t&&e&&!i&&(this.adapter.off(".waypoints"),delete o[this.key])},e.prototype.createThrottledResizeHandler=function(){function t(){e.handleResize(),e.didResize=!1}var e=this;this.adapter.on("resize.waypoints",function(){e.didResize||(e.didResize=!0,n.requestAnimationFrame(t))})},e.prototype.createThrottledScrollHandler=function(){function t(){e.handleScroll(),e.didScroll=!1}var e=this;this.adapter.on("scroll.waypoints",function(){(!e.didScroll||n.isTouch)&&(e.didScroll=!0,n.requestAnimationFrame(t))})},e.prototype.handleResize=function(){n.Context.refreshAll()},e.prototype.handleScroll=function(){var t={},e={horizontal:{newScroll:this.adapter.scrollLeft(),oldScroll:this.oldScroll.x,forward:"right",backward:"left"},vertical:{newScroll:this.adapter.scrollTop(),oldScroll:this.oldScroll.y,forward:"down",backward:"up"}};for(var i in e){var o=e[i],n=o.newScroll>o.oldScroll,r=n?o.forward:o.backward;for(var s in this.waypoints[i]){var a=this.waypoints[i][s];if(null!==a.triggerPoint){var l=o.oldScroll<a.triggerPoint,h=o.newScroll>=a.triggerPoint,p=l&&h,u=!l&&!h;(p||u)&&(a.queueTrigger(r),t[a.group.id]=a.group)}}}for(var c in t)t[c].flushTriggers();this.oldScroll={x:e.horizontal.newScroll,y:e.vertical.newScroll}},e.prototype.innerHeight=function(){return this.element==this.element.window?n.viewportHeight():this.adapter.innerHeight()},e.prototype.remove=function(t){delete this.waypoints[t.axis][t.key],this.checkEmpty()},e.prototype.innerWidth=function(){return this.element==this.element.window?n.viewportWidth():this.adapter.innerWidth()},e.prototype.destroy=function(){var t=[];for(var e in this.waypoints)for(var i in this.waypoints[e])t.push(this.waypoints[e][i]);for(var o=0,n=t.length;n>o;o++)t[o].destroy()},e.prototype.refresh=function(){var t,e=this.element==this.element.window,i=e?void 0:this.adapter.offset(),o={};this.handleScroll(),t={horizontal:{contextOffset:e?0:i.left,contextScroll:e?0:this.oldScroll.x,contextDimension:this.innerWidth(),oldScroll:this.oldScroll.x,forward:"right",backward:"left",offsetProp:"left"},vertical:{contextOffset:e?0:i.top,contextScroll:e?0:this.oldScroll.y,contextDimension:this.innerHeight(),oldScroll:this.oldScroll.y,forward:"down",backward:"up",offsetProp:"top"}};for(var r in t){var s=t[r];for(var a in this.waypoints[r]){var l,h,p,u,c,d=this.waypoints[r][a],f=d.options.offset,w=d.triggerPoint,y=0,g=null==w;d.element!==d.element.window&&(y=d.adapter.offset()[s.offsetProp]),"function"==typeof f?f=f.apply(d):"string"==typeof f&&(f=parseFloat(f),d.options.offset.indexOf("%")>-1&&(f=Math.ceil(s.contextDimension*f/100))),l=s.contextScroll-s.contextOffset,d.triggerPoint=Math.floor(y+l-f),h=w<s.oldScroll,p=d.triggerPoint>=s.oldScroll,u=h&&p,c=!h&&!p,!g&&u?(d.queueTrigger(s.backward),o[d.group.id]=d.group):!g&&c?(d.queueTrigger(s.forward),o[d.group.id]=d.group):g&&s.oldScroll>=d.triggerPoint&&(d.queueTrigger(s.forward),o[d.group.id]=d.group)}}return n.requestAnimationFrame(function(){for(var t in o)o[t].flushTriggers()}),this},e.findOrCreateByElement=function(t){return e.findByElement(t)||new e(t)},e.refreshAll=function(){for(var t in o)o[t].refresh()},e.findByElement=function(t){return o[t.waypointContextKey]},window.onload=function(){r&&r(),e.refreshAll()},n.requestAnimationFrame=function(e){var i=window.requestAnimationFrame||window.mozRequestAnimationFrame||window.webkitRequestAnimationFrame||t;i.call(window,e)},n.Context=e}(),function(){"use strict";function t(t,e){return t.triggerPoint-e.triggerPoint}function e(t,e){return e.triggerPoint-t.triggerPoint}function i(t){this.name=t.name,this.axis=t.axis,this.id=this.name+"-"+this.axis,this.waypoints=[],this.clearTriggerQueues(),o[this.axis][this.name]=this}var o={vertical:{},horizontal:{}},n=window.Waypoint;i.prototype.add=function(t){this.waypoints.push(t)},i.prototype.clearTriggerQueues=function(){this.triggerQueues={up:[],down:[],left:[],right:[]}},i.prototype.flushTriggers=function(){for(var i in this.triggerQueues){var o=this.triggerQueues[i],n="up"===i||"left"===i;o.sort(n?e:t);for(var r=0,s=o.length;s>r;r+=1){var a=o[r];(a.options.continuous||r===o.length-1)&&a.trigger([i])}}this.clearTriggerQueues()},i.prototype.next=function(e){this.waypoints.sort(t);var i=n.Adapter.inArray(e,this.waypoints),o=i===this.waypoints.length-1;return o?null:this.waypoints[i+1]},i.prototype.previous=function(e){this.waypoints.sort(t);var i=n.Adapter.inArray(e,this.waypoints);return i?this.waypoints[i-1]:null},i.prototype.queueTrigger=function(t,e){this.triggerQueues[e].push(t)},i.prototype.remove=function(t){var e=n.Adapter.inArray(t,this.waypoints);e>-1&&this.waypoints.splice(e,1)},i.prototype.first=function(){return this.waypoints[0]},i.prototype.last=function(){return this.waypoints[this.waypoints.length-1]},i.findOrCreate=function(t){return o[t.axis][t.name]||new i(t)},n.Group=i}(),function(){"use strict";function t(t){this.$element=e(t)}var e=window.jQuery,i=window.Waypoint;e.each(["innerHeight","innerWidth","off","offset","on","outerHeight","outerWidth","scrollLeft","scrollTop"],function(e,i){t.prototype[i]=function(){var t=Array.prototype.slice.call(arguments);return this.$element[i].apply(this.$element,t)}}),e.each(["extend","inArray","isEmptyObject"],function(i,o){t[o]=e[o]}),i.adapters.push({name:"jquery",Adapter:t}),i.Adapter=t}(),function(){"use strict";function t(t){return function(){var i=[],o=arguments[0];return t.isFunction(arguments[0])&&(o=t.extend({},arguments[1]),o.handler=arguments[0]),this.each(function(){var n=t.extend({},o,{element:this});"string"==typeof n.context&&(n.context=t(this).closest(n.context)[0]),i.push(new e(n))}),i}}var e=window.Waypoint;window.jQuery&&(window.jQuery.fn.waypoint=t(window.jQuery)),window.Zepto&&(window.Zepto.fn.waypoint=t(window.Zepto))}(); /***/ }), /* 7 */ /***/ (function(module, exports) { /*! Waypoints Inview Shortcut - 4.0.1 Copyright © 2011-2016 Caleb Troughton Licensed under the MIT license. https://github.com/imakewebthings/waypoints/blob/master/licenses.txt */ !function(){"use strict";function t(){}function e(t){this.options=i.Adapter.extend({},e.defaults,t),this.axis=this.options.horizontal?"horizontal":"vertical",this.waypoints=[],this.element=this.options.element,this.createWaypoints()}var i=window.Waypoint;e.prototype.createWaypoints=function(){for(var t={vertical:[{down:"enter",up:"exited",offset:"100%"},{down:"entered",up:"exit",offset:"bottom-in-view"},{down:"exit",up:"entered",offset:0},{down:"exited",up:"enter",offset:function(){return-this.adapter.outerHeight()}}],horizontal:[{right:"enter",left:"exited",offset:"100%"},{right:"entered",left:"exit",offset:"right-in-view"},{right:"exit",left:"entered",offset:0},{right:"exited",left:"enter",offset:function(){return-this.adapter.outerWidth()}}]},e=0,i=t[this.axis].length;i>e;e++){var n=t[this.axis][e];this.createWaypoint(n)}},e.prototype.createWaypoint=function(t){var e=this;this.waypoints.push(new i({context:this.options.context,element:this.options.element,enabled:this.options.enabled,handler:function(t){return function(i){e.options[t[i]].call(e,i)}}(t),offset:t.offset,horizontal:this.options.horizontal}))},e.prototype.destroy=function(){for(var t=0,e=this.waypoints.length;e>t;t++)this.waypoints[t].destroy();this.waypoints=[]},e.prototype.disable=function(){for(var t=0,e=this.waypoints.length;e>t;t++)this.waypoints[t].disable()},e.prototype.enable=function(){for(var t=0,e=this.waypoints.length;e>t;t++)this.waypoints[t].enable()},e.defaults={context:window,enabled:!0,enter:t,entered:t,exit:t,exited:t},i.Inview=e}(); /***/ }), /* 8 */ /***/ (function(module, exports) { /*! jssocials - v1.5.0 - 2017-04-30 * http://js-socials.com * Copyright (c) 2017 Artem Tabalin; Licensed MIT */ !function(a,b,c){function d(a,c){var d=b(a);d.data(f,this),this._$element=d,this.shares=[],this._init(c),this._render()}var e="JSSocials",f=e,g=function(a,c){return b.isFunction(a)?a.apply(c,b.makeArray(arguments).slice(2)):a},h=/(\.(jpeg|png|gif|bmp|svg)$|^data:image\/(jpeg|png|gif|bmp|svg\+xml);base64)/i,i=/(&?[a-zA-Z0-9]+=)?\{([a-zA-Z0-9]+)\}/g,j={G:1e9,M:1e6,K:1e3},k={};d.prototype={url:"",text:"",shareIn:"blank",showLabel:function(a){return this.showCount===!1?a>this.smallScreenWidth:a>=this.largeScreenWidth},showCount:function(a){return a<=this.smallScreenWidth?"inside":!0},smallScreenWidth:640,largeScreenWidth:1024,resizeTimeout:200,elementClass:"jssocials",sharesClass:"jssocials-shares",shareClass:"jssocials-share",shareButtonClass:"jssocials-share-button",shareLinkClass:"jssocials-share-link",shareLogoClass:"jssocials-share-logo",shareLabelClass:"jssocials-share-label",shareLinkCountClass:"jssocials-share-link-count",shareCountBoxClass:"jssocials-share-count-box",shareCountClass:"jssocials-share-count",shareZeroCountClass:"jssocials-share-no-count",_init:function(a){this._initDefaults(),b.extend(this,a),this._initShares(),this._attachWindowResizeCallback()},_initDefaults:function(){this.url=a.location.href,this.text=b.trim(b("meta[name=description]").attr("content")||b("title").text())},_initShares:function(){this.shares=b.map(this.shares,b.proxy(function(a){"string"==typeof a&&(a={share:a});var c=a.share&&k[a.share];if(!c&&!a.renderer)throw Error("Share '"+a.share+"' is not found");return b.extend({url:this.url,text:this.text},c,a)},this))},_attachWindowResizeCallback:function(){b(a).on("resize",b.proxy(this._windowResizeHandler,this))},_detachWindowResizeCallback:function(){b(a).off("resize",this._windowResizeHandler)},_windowResizeHandler:function(){(b.isFunction(this.showLabel)||b.isFunction(this.showCount))&&(a.clearTimeout(this._resizeTimer),this._resizeTimer=setTimeout(b.proxy(this.refresh,this),this.resizeTimeout))},_render:function(){this._clear(),this._defineOptionsByScreen(),this._$element.addClass(this.elementClass),this._$shares=b("<div>").addClass(this.sharesClass).appendTo(this._$element),this._renderShares()},_defineOptionsByScreen:function(){this._screenWidth=b(a).width(),this._showLabel=g(this.showLabel,this,this._screenWidth),this._showCount=g(this.showCount,this,this._screenWidth)},_renderShares:function(){b.each(this.shares,b.proxy(function(a,b){this._renderShare(b)},this))},_renderShare:function(a){var c;c=b.isFunction(a.renderer)?b(a.renderer()):this._createShare(a),c.addClass(this.shareClass).addClass(a.share?"jssocials-share-"+a.share:"").addClass(a.css).appendTo(this._$shares)},_createShare:function(a){var c=b("<div>"),d=this._createShareLink(a).appendTo(c);if(this._showCount){var e="inside"===this._showCount,f=e?d:b("<div>").addClass(this.shareCountBoxClass).appendTo(c);f.addClass(e?this.shareLinkCountClass:this.shareCountBoxClass),this._renderShareCount(a,f)}return c},_createShareLink:function(a){var c=this._getShareStrategy(a),d=c.call(a,{shareUrl:this._getShareUrl(a)});return d.addClass(this.shareLinkClass).append(this._createShareLogo(a)),this._showLabel&&d.append(this._createShareLabel(a)),b.each(this.on||{},function(c,e){b.isFunction(e)&&d.on(c,b.proxy(e,a))}),d},_getShareStrategy:function(a){var b=m[a.shareIn||this.shareIn];if(!b)throw Error("Share strategy '"+this.shareIn+"' not found");return b},_getShareUrl:function(a){var b=g(a.shareUrl,a);return this._formatShareUrl(b,a)},_createShareLogo:function(a){var c=a.logo,d=h.test(c)?b("<img>").attr("src",a.logo):b("<i>").addClass(c);return d.addClass(this.shareLogoClass),d},_createShareLabel:function(a){return b("<span>").addClass(this.shareLabelClass).text(a.label)},_renderShareCount:function(a,c){var d=b("<span>").addClass(this.shareCountClass);c.addClass(this.shareZeroCountClass).append(d),this._loadCount(a).done(b.proxy(function(a){a&&(c.removeClass(this.shareZeroCountClass),d.text(a))},this))},_loadCount:function(a){var c=b.Deferred(),d=this._getCountUrl(a);if(!d)return c.resolve(0).promise();var e=b.proxy(function(b){c.resolve(this._getCountValue(b,a))},this);return b.getJSON(d).done(e).fail(function(){b.get(d).done(e).fail(function(){c.resolve(0)})}),c.promise()},_getCountUrl:function(a){var b=g(a.countUrl,a);return this._formatShareUrl(b,a)},_getCountValue:function(a,c){var d=(b.isFunction(c.getCount)?c.getCount(a):a)||0;return"string"==typeof d?d:this._formatNumber(d)},_formatNumber:function(a){return b.each(j,function(b,c){return a>=c?(a=parseFloat((a/c).toFixed(2))+b,!1):void 0}),a},_formatShareUrl:function(b,c){return b.replace(i,function(b,d,e){var f=c[e]||"";return f?(d||"")+a.encodeURIComponent(f):""})},_clear:function(){a.clearTimeout(this._resizeTimer),this._$element.empty()},_passOptionToShares:function(a,c){var d=this.shares;b.each(["url","text"],function(e,f){f===a&&b.each(d,function(b,d){d[a]=c})})},_normalizeShare:function(a){return b.isNumeric(a)?this.shares[a]:"string"==typeof a?b.grep(this.shares,function(b){return b.share===a})[0]:a},refresh:function(){this._render()},destroy:function(){this._clear(),this._detachWindowResizeCallback(),this._$element.removeClass(this.elementClass).removeData(f)},option:function(a,b){return 1===arguments.length?this[a]:(this[a]=b,this._passOptionToShares(a,b),void this.refresh())},shareOption:function(a,b,c){return a=this._normalizeShare(a),2===arguments.length?a[b]:(a[b]=c,void this.refresh())}},b.fn.jsSocials=function(a){var e=b.makeArray(arguments),g=e.slice(1),h=this;return this.each(function(){var e,i=b(this),j=i.data(f);if(j)if("string"==typeof a){if(e=j[a].apply(j,g),e!==c&&e!==j)return h=e,!1}else j._detachWindowResizeCallback(),j._init(a),j._render();else new d(i,a)}),h};var l=function(a){var c;b.isPlainObject(a)?c=d.prototype:(c=k[a],a=arguments[1]||{}),b.extend(c,a)},m={popup:function(c){return b("<a>").attr("href","#").on("click",function(){return a.open(c.shareUrl,null,"width=600, height=400, location=0, menubar=0, resizeable=0, scrollbars=0, status=0, titlebar=0, toolbar=0"),!1})},blank:function(a){return b("<a>").attr({target:"_blank",href:a.shareUrl})},self:function(a){return b("<a>").attr({target:"_self",href:a.shareUrl})}};a.jsSocials={Socials:d,shares:k,shareStrategies:m,setDefaults:l}}(window,jQuery),function(a,b,c){b.extend(c.shares,{email:{label:"E-mail",logo:"fa fa-at",shareUrl:"mailto:{to}?subject={text}&body={url}",countUrl:"",shareIn:"self"},twitter:{label:"Tweet",logo:"fa fa-twitter",shareUrl:"https://twitter.com/share?url={url}&text={text}&via={via}&hashtags={hashtags}",countUrl:""},facebook:{label:"Like",logo:"fa fa-facebook",shareUrl:"https://facebook.com/sharer/sharer.php?u={url}",countUrl:"https://graph.facebook.com/?id={url}",getCount:function(a){return a.share&&a.share.share_count||0}},vkontakte:{label:"Like",logo:"fa fa-vk",shareUrl:"https://vk.com/share.php?url={url}&title={title}&description={text}",countUrl:"https://vk.com/share.php?act=count&index=1&url={url}",getCount:function(a){return parseInt(a.slice(15,-2).split(", ")[1])}},googleplus:{label:"+1",logo:"fa fa-google",shareUrl:"https://plus.google.com/share?url={url}",countUrl:""},linkedin:{label:"Share",logo:"fa fa-linkedin",shareUrl:"https://www.linkedin.com/shareArticle?mini=true&url={url}",countUrl:"https://www.linkedin.com/countserv/count/share?format=jsonp&url={url}&callback=?",getCount:function(a){return a.count}},pinterest:{label:"Pin it",logo:"fa fa-pinterest",shareUrl:"https://pinterest.com/pin/create/bookmarklet/?media={media}&url={url}&description={text}",countUrl:"https://api.pinterest.com/v1/urls/count.json?&url={url}&callback=?",getCount:function(a){return a.count}},stumbleupon:{label:"Share",logo:"fa fa-stumbleupon",shareUrl:"http://www.stumbleupon.com/submit?url={url}&title={title}",countUrl:"https://cors-anywhere.herokuapp.com/https://www.stumbleupon.com/services/1.01/badge.getinfo?url={url}",getCount:function(a){return a.result&&a.result.views}},telegram:{label:"Telegram",logo:"fa fa-telegram",shareUrl:"tg://msg?text={url} {text}",countUrl:"",shareIn:"self"},whatsapp:{label:"WhatsApp",logo:"fa fa-whatsapp",shareUrl:"whatsapp://send?text={url} {text}",countUrl:"",shareIn:"self"},line:{label:"LINE",logo:"fa fa-comment",shareUrl:"http://line.me/R/msg/text/?{text} {url}",countUrl:""},viber:{label:"Viber",logo:"fa fa-volume-control-phone",shareUrl:"viber://forward?text={url} {text}",countUrl:"",shareIn:"self"},pocket:{label:"Pocket",logo:"fa fa-get-pocket",shareUrl:"https://getpocket.com/save?url={url}&title={title}",countUrl:""},messenger:{label:"Share",logo:"fa fa-commenting",shareUrl:"fb-messenger://share?link={url}",countUrl:"",shareIn:"self"},rss:{label:"RSS",logo:"fa fa-rss",shareUrl:"/feeds/",countUrl:"",shareIn:"blank"}})}(window,jQuery,window.jsSocials); /***/ }), /* 9 */ /***/ (function(module, __webpack_exports__, __webpack_require__) { "use strict"; /* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return addClassRemoveItFromSiblings; }); /* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return scrollSmoothlyTo; }); /** * Given two JQuery objects, current and siblings, add a class to current * and remove it from each sibling */ var addClassRemoveItFromSiblings = function addClassRemoveItFromSiblings($current, $siblings, className) { // Remove the class from the siblings $siblings.each(function () { $(this).removeClass(className); }); // Add class to $current.addClass(className); }; var scrollSmoothlyTo = function scrollSmoothlyTo($to) { $('html, body').animate({ scrollTop: $to.offset().top }, 500); console.log('hola'); }; // Export functions /***/ }), /* 10 */ /***/ (function(module, exports) { // removed by extract-text-webpack-plugin /***/ }) /******/ ]);
HIDE: "hide" + EVENT_KEY, HIDDEN: "hidden" + EVENT_KEY, SHOW: "show" + EVENT_KEY, SHOWN: "shown" + EVENT_KEY,
writer.rs
#![allow(dead_code)] use std::io; use std::fmt::Write; use bytes::BufMut; use futures::{Async, Poll}; use tokio_io::AsyncWrite; // use http::header::{HeaderValue, CONNECTION, DATE}; use body::Binary; use server::{WriterState, MAX_WRITE_BUFFER_SIZE}; use server::shared::SharedBytes; use client::ClientRequest; const AVERAGE_HEADER_SIZE: usize = 30; // totally scientific bitflags! { struct Flags: u8 { const STARTED = 0b0000_0001; const UPGRADE = 0b0000_0010; const KEEPALIVE = 0b0000_0100; const DISCONNECTED = 0b0000_1000; } } pub(crate) struct
{ flags: Flags, written: u64, headers_size: u32, buffer: SharedBytes, } impl HttpClientWriter { pub fn new(buf: SharedBytes) -> HttpClientWriter { HttpClientWriter { flags: Flags::empty(), written: 0, headers_size: 0, buffer: buf, } } pub fn disconnected(&mut self) { self.buffer.take(); } pub fn keepalive(&self) -> bool { self.flags.contains(Flags::KEEPALIVE) && !self.flags.contains(Flags::UPGRADE) } fn write_to_stream<T: AsyncWrite>(&mut self, stream: &mut T) -> io::Result<WriterState> { while !self.buffer.is_empty() { match stream.write(self.buffer.as_ref()) { Ok(0) => { self.disconnected(); return Ok(WriterState::Done); }, Ok(n) => { let _ = self.buffer.split_to(n); }, Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { if self.buffer.len() > MAX_WRITE_BUFFER_SIZE { return Ok(WriterState::Pause) } else { return Ok(WriterState::Done) } } Err(err) => return Err(err), } } Ok(WriterState::Done) } } impl HttpClientWriter { pub fn start(&mut self, msg: &mut ClientRequest) { // prepare task self.flags.insert(Flags::STARTED); // render message { let buffer = self.buffer.get_mut(); buffer.reserve(256 + msg.headers().len() * AVERAGE_HEADER_SIZE); // status line let _ = write!(buffer, "{} {} {:?}\r\n", msg.method(), msg.uri().path(), msg.version()); // write headers for (key, value) in msg.headers() { let v = value.as_ref(); let k = key.as_str().as_bytes(); buffer.reserve(k.len() + v.len() + 4); buffer.put_slice(k); buffer.put_slice(b": "); buffer.put_slice(v); buffer.put_slice(b"\r\n"); } // using helpers::date is quite a lot faster //if !msg.headers.contains_key(DATE) { // helpers::date(&mut buffer); //} else { // msg eof buffer.extend_from_slice(b"\r\n"); //} self.headers_size = buffer.len() as u32; } } pub fn write(&mut self, payload: &Binary) -> io::Result<WriterState> { self.written += payload.len() as u64; if !self.flags.contains(Flags::DISCONNECTED) { self.buffer.extend_from_slice(payload.as_ref()) } if self.buffer.len() > MAX_WRITE_BUFFER_SIZE { Ok(WriterState::Pause) } else { Ok(WriterState::Done) } } pub fn write_eof(&mut self) -> io::Result<WriterState> { if self.buffer.len() > MAX_WRITE_BUFFER_SIZE { Ok(WriterState::Pause) } else { Ok(WriterState::Done) } } #[inline] pub fn poll_completed<T: AsyncWrite>(&mut self, stream: &mut T, shutdown: bool) -> Poll<(), io::Error> { match self.write_to_stream(stream) { Ok(WriterState::Done) => { if shutdown { stream.shutdown() } else { Ok(Async::Ready(())) } }, Ok(WriterState::Pause) => Ok(Async::NotReady), Err(err) => Err(err) } } }
HttpClientWriter
private.rs
// Copyright 2015-2020 Parity Technologies (UK) Ltd. // This file is part of Open Ethereum. // Open Ethereum is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Open Ethereum is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Open Ethereum. If not, see <http://www.gnu.org/licenses/>. //! SecretStore-specific rpc interface. use ethereum_types::{H160, H256, U256}; use jsonrpc_core::Error; use jsonrpc_derive::rpc; use v1::types::{Bytes, PrivateTransactionReceipt, BlockNumber, PrivateTransactionReceiptAndTransaction, CallRequest, PrivateTransactionLog}; /// Private transaction management RPC interface.
#[rpc(server)] pub trait Private { /// RPC Metadata type Metadata; /// Sends private transaction; Transaction will be added to the validation queue and sent out when ready. #[rpc(name = "private_sendTransaction")] fn send_transaction(&self, _: Bytes) -> Result<PrivateTransactionReceipt, Error>; /// Creates a transaction for contract's deployment from origin (signed transaction) #[rpc(name = "private_composeDeploymentTransaction")] fn compose_deployment_transaction( &self, _: BlockNumber, _: Bytes, _: Vec<H160>, _: U256 ) -> Result<PrivateTransactionReceiptAndTransaction, Error>; /// Make a call to the private contract #[rpc(name = "private_call")] fn private_call(&self, _: BlockNumber, _: CallRequest) -> Result<Bytes, Error>; /// Retrieve the id of the key associated with the contract #[rpc(name = "private_contractKey")] fn private_contract_key(&self, _: H160) -> Result<H256, Error>; /// Retrieve log information about private transaction #[rpc(name = "private_log")] fn private_log(&self, _: H256) -> Result<PrivateTransactionLog, Error>; }
config.go
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package controller // Config struct for rpc controller cmd type Config struct { EtcdKeyFile string EtcdCertFile string EtcdCaCertFile string
EtcdEndpoints []string }
blank-page.component.ts
import { Component, OnInit } from '@angular/core'; import { Restaurant } from './blank-test-content/blank-teste-content.model'; import { RestaurantsService } from './blank-page.service'; import { FormGroup, FormBuilder, Validators } from '@angular/forms'; @Component({ selector: 'app-blank-page', templateUrl: './blank-page.component.html', styleUrls: ['./blank-page.component.scss'] }) export class BlankPageComponent implements OnInit { signUpForm: FormGroup;
restaurants: Restaurant[] constructor(private fb: FormBuilder, private restaurantsService: RestaurantsService) {} ngOnInit() { this.signUpForm = this.fb.group({ restaurantName: this.fb.control('', [Validators.required]), categoryName: this.fb.control('', [Validators.required]), timeInput: this.fb.control('', [Validators.required]), starsInput: this.fb.control('', [Validators.required]) }); this.restaurantsService.restaurants() .subscribe(restaurants => this.restaurants = restaurants) } addRestaurant() { this.restaurantsService.addRestaurant(this.signUpForm.value.restaurantName, this.signUpForm.value.categoryName, this.signUpForm.value.timeInput, this.signUpForm.value.starsInput) .subscribe(x => alert('Restaurante adicionado com sucesso')) alert("oi" + this.signUpForm.value.restaurantName + "" + this.signUpForm.value.categoryName + "" + this.signUpForm.value.timeInput + "" + this.signUpForm.value.starsInput) } }
system.py
# This file is part of the Open Data Cube, see https://opendatacube.org for more information # # Copyright (c) 2015-2020 ODC Contributors # SPDX-License-Identifier: Apache-2.0 import logging import click from click import echo, style from sqlalchemy.exc import OperationalError import datacube from datacube.index import index_connect from datacube.drivers.postgres._connections import IndexSetupError from datacube.ui import click as ui from datacube.ui.click import cli, handle_exception from datacube.config import LocalConfig _LOG = logging.getLogger('datacube-system') @cli.group(name='system', help='System commands') def system(): pass @system.command('init', help='Initialise the database') @click.option( '--default-types/--no-default-types', is_flag=True, default=True, help="Add default types? (default: true)" ) @click.option( '--init-users/--no-init-users', is_flag=True, default=True, help="Include user roles and grants. (default: true)" ) @click.option( '--recreate-views/--no-recreate-views', is_flag=True, default=True, help="Recreate dynamic views" ) @click.option( '--rebuild/--no-rebuild', is_flag=True, default=False, help="Rebuild all dynamic fields (caution: slow)" ) @click.option( '--lock-table/--no-lock-table', is_flag=True, default=False, help="Allow table to be locked (eg. while creating missing indexes)" ) @ui.pass_index(expect_initialised=False) def database_init(index, default_types, init_users, recreate_views, rebuild, lock_table): echo('Initialising database...') was_created = index.init_db(with_default_types=default_types, with_permissions=init_users) if was_created: echo(style('Created.', bold=True)) else: echo(style('Updated.', bold=True)) echo('Checking indexes/views.')
) echo('Done.') @system.command('check', help='Check and display current configuration') @ui.pass_config def check(local_config: LocalConfig): """ Verify & view current configuration """ def echo_field(name, value): echo('{:<15}'.format(name + ':') + style(str(value), bold=True)) echo_field('Version', datacube.__version__) echo_field('Config files', ','.join(local_config.files_loaded)) echo_field('Host', '{}:{}'.format(local_config['db_hostname'] or 'localhost', local_config.get('db_port', None) or '5432')) echo_field('Database', local_config['db_database']) echo_field('User', local_config['db_username']) echo_field('Environment', local_config['env']) echo_field('Index Driver', local_config['index_driver']) echo() echo('Valid connection:\t', nl=False) try: index = index_connect(local_config=local_config) echo(style('YES', bold=True)) for role, user, description in index.users.list_users(): if user == local_config['db_username']: echo('You have %s privileges.' % style(role.upper(), bold=True)) except OperationalError as e: handle_exception('Error Connecting to Database: %s', e) except IndexSetupError as e: handle_exception('Database not initialised: %s', e)
index.metadata_types.check_field_indexes( allow_table_lock=lock_table, rebuild_indexes=rebuild, rebuild_views=recreate_views or rebuild,
notificacion.service.ts
import { Injectable } from '@angular/core'; import { ToastrService } from 'ngx-toastr'; @Injectable({ providedIn: 'root' }) export class
{ constructor(private toastr: ToastrService) { } public exitoso(titulo="Exitoso",descripcion="peticion exitosa"){ this.toastr.success( `<span data-notify="icon" class="nc-icon nc-bell-55"></span><span data-notify="message"> <b>${titulo}</b> - ${descripcion}.</span>`, "", { timeOut: 4000, closeButton: true, enableHtml: true, toastClass: "alert alert-success alert-with-icon", positionClass: "toast-top-right" } ); } public informacion(titulo="Información",descripcion=""){ this.toastr.info( `<span data-notify="icon" class="nc-icon nc-bell-55"></span><span data-notify="message"> <b>${titulo}</b> - ${descripcion}.</span>`, "", { timeOut: 4000, closeButton: true, enableHtml: true, toastClass: "alert alert-warning alert-with-icon", positionClass: "toast-top-right" } ); } public error(titulo="Error",descripcion="Ahhh ocurrido un error"){ this.toastr.error( `<span data-notify="icon" class="nc-icon nc-bell-55"></span><span data-notify="message"> <b>${titulo}</b> - ${descripcion}.</span>`, "", { timeOut: 6000, closeButton: true, enableHtml: true, toastClass: "alert alert-danger alert-with-icon", positionClass: "toast-top-right" } ); } }
NotificacionService
generated.rs
// ================================================================= // // * WARNING * // // This file is generated! // // Changes made to this file will be overwritten. If changes are // required to the generated code, the service_crategen project // must be updated to generate the changes. // // ================================================================= use std::error::Error; use std::fmt; use std::io; #[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::reactor::{CredentialsProvider, RequestDispatcher}; use rusoto_core::region; use rusoto_core::request::DispatchSignedRequest; use rusoto_core::{ClientInner, RusotoFuture}; use rusoto_core::credential::{CredentialsError, ProvideAwsCredentials}; use rusoto_core::request::HttpDispatchError; use hyper::StatusCode; use rusoto_core::signature::SignedRequest; use serde_json; use serde_json::from_str; use serde_json::Value as SerdeJsonValue; /// <p>A container for account-level settings within AWS Device Farm.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct AccountSettings { /// <p>The AWS account number specified in the <code>AccountSettings</code> container.</p> #[serde(rename = "awsAccountNumber")] #[serde(skip_serializing_if = "Option::is_none")] pub aws_account_number: Option<String>, /// <p>The default number of minutes (at the account level) a test run will execute before it times out. Default value is 60 minutes.</p> #[serde(rename = "defaultJobTimeoutMinutes")] #[serde(skip_serializing_if = "Option::is_none")] pub default_job_timeout_minutes: Option<i64>, /// <p>The maximum number of minutes a test run will execute before it times out.</p> #[serde(rename = "maxJobTimeoutMinutes")] #[serde(skip_serializing_if = "Option::is_none")] pub max_job_timeout_minutes: Option<i64>, /// <p>The maximum number of device slots that the AWS account can purchase. Each maximum is expressed as an <code>offering-id:number</code> pair, where the <code>offering-id</code> represents one of the IDs returned by the <code>ListOfferings</code> command.</p> #[serde(rename = "maxSlots")] #[serde(skip_serializing_if = "Option::is_none")] pub max_slots: Option<::std::collections::HashMap<String, i64>>, /// <p>When set to <code>true</code>, for private devices, Device Farm will not sign your app again. For public devices, Device Farm always signs your apps again and this parameter has no effect.</p> <p>For more information about how Device Farm re-signs your app(s), see <a href="https://aws.amazon.com/device-farm/faq/">Do you modify my app?</a> in the <i>AWS Device Farm FAQs</i>.</p> #[serde(rename = "skipAppResign")] #[serde(skip_serializing_if = "Option::is_none")] pub skip_app_resign: Option<bool>, /// <p>Information about an AWS account's usage of free trial device minutes.</p> #[serde(rename = "trialMinutes")] #[serde(skip_serializing_if = "Option::is_none")] pub trial_minutes: Option<TrialMinutes>, /// <p>Returns the unmetered devices you have purchased or want to purchase.</p> #[serde(rename = "unmeteredDevices")] #[serde(skip_serializing_if = "Option::is_none")] pub unmetered_devices: Option<::std::collections::HashMap<String, i64>>, /// <p>Returns the unmetered remote access devices you have purchased or want to purchase.</p> #[serde(rename = "unmeteredRemoteAccessDevices")] #[serde(skip_serializing_if = "Option::is_none")] pub unmetered_remote_access_devices: Option<::std::collections::HashMap<String, i64>>, } /// <p>Represents the output of a test. Examples of artifacts include logs and screenshots.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct Artifact { /// <p>The artifact's ARN.</p> #[serde(rename = "arn")] #[serde(skip_serializing_if = "Option::is_none")] pub arn: Option<String>, /// <p>The artifact's file extension.</p> #[serde(rename = "extension")] #[serde(skip_serializing_if = "Option::is_none")] pub extension: Option<String>, /// <p>The artifact's name.</p> #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, /// <p><p>The artifact&#39;s type.</p> <p>Allowed values include the following:</p> <ul> <li> <p>UNKNOWN: An unknown type.</p> </li> <li> <p>SCREENSHOT: The screenshot type.</p> </li> <li> <p>DEVICE<em>LOG: The device log type.</p> </li> <li> <p>MESSAGE</em>LOG: The message log type.</p> </li> <li> <p>RESULT<em>LOG: The result log type.</p> </li> <li> <p>SERVICE</em>LOG: The service log type.</p> </li> <li> <p>WEBKIT<em>LOG: The web kit log type.</p> </li> <li> <p>INSTRUMENTATION</em>OUTPUT: The instrumentation type.</p> </li> <li> <p>EXERCISER<em>MONKEY</em>OUTPUT: For Android, the artifact (log) generated by an Android fuzz test.</p> </li> <li> <p>CALABASH<em>JSON</em>OUTPUT: The Calabash JSON output type.</p> </li> <li> <p>CALABASH<em>PRETTY</em>OUTPUT: The Calabash pretty output type.</p> </li> <li> <p>CALABASH<em>STANDARD</em>OUTPUT: The Calabash standard output type.</p> </li> <li> <p>CALABASH<em>JAVA</em>XML<em>OUTPUT: The Calabash Java XML output type.</p> </li> <li> <p>AUTOMATION</em>OUTPUT: The automation output type.</p> </li> <li> <p>APPIUM<em>SERVER</em>OUTPUT: The Appium server output type.</p> </li> <li> <p>APPIUM<em>JAVA</em>OUTPUT: The Appium Java output type.</p> </li> <li> <p>APPIUM<em>JAVA</em>XML<em>OUTPUT: The Appium Java XML output type.</p> </li> <li> <p>APPIUM</em>PYTHON<em>OUTPUT: The Appium Python output type.</p> </li> <li> <p>APPIUM</em>PYTHON<em>XML</em>OUTPUT: The Appium Python XML output type.</p> </li> <li> <p>EXPLORER<em>EVENT</em>LOG: The Explorer event log output type.</p> </li> <li> <p>EXPLORER<em>SUMMARY</em>LOG: The Explorer summary log output type.</p> </li> <li> <p>APPLICATION<em>CRASH</em>REPORT: The application crash report output type.</p> </li> <li> <p>XCTEST_LOG: The XCode test output type.</p> </li> </ul></p> #[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option<String>, /// <p>The pre-signed Amazon S3 URL that can be used with a corresponding GET request to download the artifact's file.</p> #[serde(rename = "url")] #[serde(skip_serializing_if = "Option::is_none")] pub url: Option<String>, } /// <p>Represents the amount of CPU that an app is using on a physical device.</p> <p>Note that this does not represent system-wide CPU usage.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct CPU { /// <p>The CPU's architecture, for example x86 or ARM.</p> #[serde(rename = "architecture")] #[serde(skip_serializing_if = "Option::is_none")] pub architecture: Option<String>, /// <p>The clock speed of the device's CPU, expressed in hertz (Hz). For example, a 1.2 GHz CPU is expressed as 1200000000.</p> #[serde(rename = "clock")] #[serde(skip_serializing_if = "Option::is_none")] pub clock: Option<f64>, /// <p>The CPU's frequency.</p> #[serde(rename = "frequency")] #[serde(skip_serializing_if = "Option::is_none")] pub frequency: Option<String>, } /// <p>Represents entity counters.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct Counters { /// <p>The number of errored entities.</p> #[serde(rename = "errored")] #[serde(skip_serializing_if = "Option::is_none")] pub errored: Option<i64>, /// <p>The number of failed entities.</p> #[serde(rename = "failed")] #[serde(skip_serializing_if = "Option::is_none")] pub failed: Option<i64>, /// <p>The number of passed entities.</p> #[serde(rename = "passed")] #[serde(skip_serializing_if = "Option::is_none")] pub passed: Option<i64>, /// <p>The number of skipped entities.</p> #[serde(rename = "skipped")] #[serde(skip_serializing_if = "Option::is_none")] pub skipped: Option<i64>, /// <p>The number of stopped entities.</p> #[serde(rename = "stopped")] #[serde(skip_serializing_if = "Option::is_none")] pub stopped: Option<i64>, /// <p>The total number of entities.</p> #[serde(rename = "total")] #[serde(skip_serializing_if = "Option::is_none")] pub total: Option<i64>, /// <p>The number of warned entities.</p> #[serde(rename = "warned")] #[serde(skip_serializing_if = "Option::is_none")] pub warned: Option<i64>, } /// <p>Represents a request to the create device pool operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateDevicePoolRequest { /// <p>The device pool's description.</p> #[serde(rename = "description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option<String>, /// <p>The device pool's name.</p> #[serde(rename = "name")] pub name: String, /// <p>The ARN of the project for the device pool.</p> #[serde(rename = "projectArn")] pub project_arn: String, /// <p>The device pool's rules.</p> #[serde(rename = "rules")] pub rules: Vec<Rule>, } /// <p>Represents the result of a create device pool request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct CreateDevicePoolResult { /// <p>The newly created device pool.</p> #[serde(rename = "devicePool")] #[serde(skip_serializing_if = "Option::is_none")] pub device_pool: Option<DevicePool>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateInstanceProfileRequest { /// <p>The description of your instance profile.</p> #[serde(rename = "description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option<String>, /// <p>An array of strings specifying the list of app packages that should not be cleaned up from the device after a test run is over.</p> <p>The list of packages is only considered if you set <code>packageCleanup</code> to <code>true</code>.</p> #[serde(rename = "excludeAppPackagesFromCleanup")] #[serde(skip_serializing_if = "Option::is_none")] pub exclude_app_packages_from_cleanup: Option<Vec<String>>, /// <p>The name of your instance profile.</p> #[serde(rename = "name")] pub name: String, /// <p>When set to <code>true</code>, Device Farm will remove app packages after a test run. The default value is <code>false</code> for private devices.</p> #[serde(rename = "packageCleanup")] #[serde(skip_serializing_if = "Option::is_none")] pub package_cleanup: Option<bool>, /// <p>When set to <code>true</code>, Device Farm will reboot the instance after a test run. The default value is <code>true</code>.</p> #[serde(rename = "rebootAfterUse")] #[serde(skip_serializing_if = "Option::is_none")] pub reboot_after_use: Option<bool>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct CreateInstanceProfileResult { /// <p>An object containing information about your instance profile.</p> #[serde(rename = "instanceProfile")] #[serde(skip_serializing_if = "Option::is_none")] pub instance_profile: Option<InstanceProfile>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateNetworkProfileRequest { /// <p>The description of the network profile.</p> #[serde(rename = "description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option<String>, /// <p>The data throughput rate in bits per second, as an integer from 0 to 104857600.</p> #[serde(rename = "downlinkBandwidthBits")] #[serde(skip_serializing_if = "Option::is_none")] pub downlink_bandwidth_bits: Option<i64>, /// <p>Delay time for all packets to destination in milliseconds as an integer from 0 to 2000.</p> #[serde(rename = "downlinkDelayMs")] #[serde(skip_serializing_if = "Option::is_none")] pub downlink_delay_ms: Option<i64>, /// <p>Time variation in the delay of received packets in milliseconds as an integer from 0 to 2000.</p> #[serde(rename = "downlinkJitterMs")] #[serde(skip_serializing_if = "Option::is_none")] pub downlink_jitter_ms: Option<i64>, /// <p>Proportion of received packets that fail to arrive from 0 to 100 percent.</p> #[serde(rename = "downlinkLossPercent")] #[serde(skip_serializing_if = "Option::is_none")] pub downlink_loss_percent: Option<i64>, /// <p>The name you wish to specify for the new network profile.</p> #[serde(rename = "name")] pub name: String, /// <p>The Amazon Resource Name (ARN) of the project for which you want to create a network profile.</p> #[serde(rename = "projectArn")] pub project_arn: String, /// <p>The type of network profile you wish to create. Valid values are listed below.</p> #[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option<String>, /// <p>The data throughput rate in bits per second, as an integer from 0 to 104857600.</p> #[serde(rename = "uplinkBandwidthBits")] #[serde(skip_serializing_if = "Option::is_none")] pub uplink_bandwidth_bits: Option<i64>, /// <p>Delay time for all packets to destination in milliseconds as an integer from 0 to 2000.</p> #[serde(rename = "uplinkDelayMs")] #[serde(skip_serializing_if = "Option::is_none")] pub uplink_delay_ms: Option<i64>, /// <p>Time variation in the delay of received packets in milliseconds as an integer from 0 to 2000.</p> #[serde(rename = "uplinkJitterMs")] #[serde(skip_serializing_if = "Option::is_none")] pub uplink_jitter_ms: Option<i64>, /// <p>Proportion of transmitted packets that fail to arrive from 0 to 100 percent.</p> #[serde(rename = "uplinkLossPercent")] #[serde(skip_serializing_if = "Option::is_none")] pub uplink_loss_percent: Option<i64>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct CreateNetworkProfileResult { /// <p>The network profile that is returned by the create network profile request.</p> #[serde(rename = "networkProfile")] #[serde(skip_serializing_if = "Option::is_none")] pub network_profile: Option<NetworkProfile>, } /// <p>Represents a request to the create project operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateProjectRequest { /// <p>Sets the execution timeout value (in minutes) for a project. All test runs in this project will use the specified execution timeout value unless overridden when scheduling a run.</p> #[serde(rename = "defaultJobTimeoutMinutes")] #[serde(skip_serializing_if = "Option::is_none")] pub default_job_timeout_minutes: Option<i64>, /// <p>The project's name.</p> #[serde(rename = "name")] pub name: String, } /// <p>Represents the result of a create project request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct CreateProjectResult { /// <p>The newly created project.</p> #[serde(rename = "project")] #[serde(skip_serializing_if = "Option::is_none")] pub project: Option<Project>, } /// <p>Configuration settings for a remote access session, including billing method.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateRemoteAccessSessionConfiguration { /// <p>The billing method for the remote access session.</p> #[serde(rename = "billingMethod")] #[serde(skip_serializing_if = "Option::is_none")] pub billing_method: Option<String>, } /// <p>Creates and submits a request to start a remote access session.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateRemoteAccessSessionRequest { /// <p>Unique identifier for the client. If you want access to multiple devices on the same client, you should pass the same <code>clientId</code> value in each call to <code>CreateRemoteAccessSession</code>. This is required only if <code>remoteDebugEnabled</code> is set to <code>true</code>.</p> #[serde(rename = "clientId")] #[serde(skip_serializing_if = "Option::is_none")] pub client_id: Option<String>, /// <p>The configuration information for the remote access session request.</p> #[serde(rename = "configuration")] #[serde(skip_serializing_if = "Option::is_none")] pub configuration: Option<CreateRemoteAccessSessionConfiguration>, /// <p>The Amazon Resource Name (ARN) of the device for which you want to create a remote access session.</p> #[serde(rename = "deviceArn")] pub device_arn: String, /// <p>The Amazon Resource Name (ARN) of the device instance for which you want to create a remote access session.</p> #[serde(rename = "instanceArn")] #[serde(skip_serializing_if = "Option::is_none")] pub instance_arn: Option<String>, /// <p><p>The interaction mode of the remote access session. Valid values are:</p> <ul> <li> <p>INTERACTIVE: You can interact with the iOS device by viewing, touching, and rotating the screen. You <b>cannot</b> run XCUITest framework-based tests in this mode.</p> </li> <li> <p>NO<em>VIDEO: You are connected to the device but cannot interact with it or view the screen. This mode has the fastest test execution speed. You <b>can</b> run XCUITest framework-based tests in this mode.</p> </li> <li> <p>VIDEO</em>ONLY: You can view the screen but cannot touch or rotate it. You <b>can</b> run XCUITest framework-based tests and watch the screen in this mode.</p> </li> </ul></p> #[serde(rename = "interactionMode")] #[serde(skip_serializing_if = "Option::is_none")] pub interaction_mode: Option<String>, /// <p>The name of the remote access session that you wish to create.</p> #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, /// <p>The Amazon Resource Name (ARN) of the project for which you want to create a remote access session.</p> #[serde(rename = "projectArn")] pub project_arn: String, /// <p>Set to <code>true</code> if you want to access devices remotely for debugging in your remote access session.</p> #[serde(rename = "remoteDebugEnabled")] #[serde(skip_serializing_if = "Option::is_none")] pub remote_debug_enabled: Option<bool>, /// <p>The Amazon Resource Name (ARN) for the app to be recorded in the remote access session.</p> #[serde(rename = "remoteRecordAppArn")] #[serde(skip_serializing_if = "Option::is_none")] pub remote_record_app_arn: Option<String>, /// <p>Set to <code>true</code> to enable remote recording for the remote access session.</p> #[serde(rename = "remoteRecordEnabled")] #[serde(skip_serializing_if = "Option::is_none")] pub remote_record_enabled: Option<bool>, /// <p>When set to <code>true</code>, for private devices, Device Farm will not sign your app again. For public devices, Device Farm always signs your apps again and this parameter has no effect.</p> <p>For more information about how Device Farm re-signs your app(s), see <a href="https://aws.amazon.com/device-farm/faq/">Do you modify my app?</a> in the <i>AWS Device Farm FAQs</i>.</p> #[serde(rename = "skipAppResign")] #[serde(skip_serializing_if = "Option::is_none")] pub skip_app_resign: Option<bool>, /// <p>The public key of the <code>ssh</code> key pair you want to use for connecting to remote devices in your remote debugging session. This is only required if <code>remoteDebugEnabled</code> is set to <code>true</code>.</p> #[serde(rename = "sshPublicKey")] #[serde(skip_serializing_if = "Option::is_none")] pub ssh_public_key: Option<String>, } /// <p>Represents the server response from a request to create a remote access session.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct CreateRemoteAccessSessionResult { /// <p>A container that describes the remote access session when the request to create a remote access session is sent.</p> #[serde(rename = "remoteAccessSession")] #[serde(skip_serializing_if = "Option::is_none")] pub remote_access_session: Option<RemoteAccessSession>, } /// <p>Represents a request to the create upload operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct CreateUploadRequest { /// <p>The upload's content type (for example, "application/octet-stream").</p> #[serde(rename = "contentType")] #[serde(skip_serializing_if = "Option::is_none")] pub content_type: Option<String>, /// <p>The upload's file name. The name should not contain the '/' character. If uploading an iOS app, the file name needs to end with the <code>.ipa</code> extension. If uploading an Android app, the file name needs to end with the <code>.apk</code> extension. For all others, the file name must end with the <code>.zip</code> file extension.</p> #[serde(rename = "name")] pub name: String, /// <p>The ARN of the project for the upload.</p> #[serde(rename = "projectArn")] pub project_arn: String, /// <p>The upload's upload type.</p> <p>Must be one of the following values:</p> <ul> <li> <p>ANDROID_APP: An Android upload.</p> </li> <li> <p>IOS_APP: An iOS upload.</p> </li> <li> <p>WEB_APP: A web appliction upload.</p> </li> <li> <p>EXTERNAL_DATA: An external data upload.</p> </li> <li> <p>APPIUM_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.</p> </li> <li> <p>APPIUM_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.</p> </li> <li> <p>APPIUM_PYTHON_TEST_PACKAGE: An Appium Python test package upload.</p> </li> <li> <p>APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE: An Appium Java JUnit test package upload.</p> </li> <li> <p>APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE: An Appium Java TestNG test package upload.</p> </li> <li> <p>APPIUM_WEB_PYTHON_TEST_PACKAGE: An Appium Python test package upload.</p> </li> <li> <p>CALABASH_TEST_PACKAGE: A Calabash test package upload.</p> </li> <li> <p>INSTRUMENTATION_TEST_PACKAGE: An instrumentation upload.</p> </li> <li> <p>UIAUTOMATION_TEST_PACKAGE: A uiautomation test package upload.</p> </li> <li> <p>UIAUTOMATOR_TEST_PACKAGE: A uiautomator test package upload.</p> </li> <li> <p>XCTEST_TEST_PACKAGE: An XCode test package upload.</p> </li> <li> <p>XCTEST_UI_TEST_PACKAGE: An XCode UI test package upload.</p> </li> </ul> <p> <b>Note</b> If you call <code>CreateUpload</code> with <code>WEB_APP</code> specified, AWS Device Farm throws an <code>ArgumentException</code> error.</p> #[serde(rename = "type")] pub type_: String, } /// <p>Represents the result of a create upload request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct CreateUploadResult { /// <p>The newly created upload.</p> #[serde(rename = "upload")] #[serde(skip_serializing_if = "Option::is_none")] pub upload: Option<Upload>, } /// <p>A JSON object specifying the paths where the artifacts generated by the customer's tests, on the device or in the test environment, will be pulled from.</p> <p>Specify <code>deviceHostPaths</code> and optionally specify either <code>iosPaths</code> or <code>androidPaths</code>.</p> <p>For web app tests, you can specify both <code>iosPaths</code> and <code>androidPaths</code>.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct CustomerArtifactPaths { /// <p>Comma-separated list of paths on the Android device where the artifacts generated by the customer's tests will be pulled from.</p> #[serde(rename = "androidPaths")] #[serde(skip_serializing_if = "Option::is_none")] pub android_paths: Option<Vec<String>>, /// <p>Comma-separated list of paths in the test execution environment where the artifacts generated by the customer's tests will be pulled from.</p> #[serde(rename = "deviceHostPaths")] #[serde(skip_serializing_if = "Option::is_none")] pub device_host_paths: Option<Vec<String>>, /// <p>Comma-separated list of paths on the iOS device where the artifacts generated by the customer's tests will be pulled from.</p> #[serde(rename = "iosPaths")] #[serde(skip_serializing_if = "Option::is_none")] pub ios_paths: Option<Vec<String>>, } /// <p>Represents a request to the delete device pool operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteDevicePoolRequest { /// <p>Represents the Amazon Resource Name (ARN) of the Device Farm device pool you wish to delete.</p> #[serde(rename = "arn")] pub arn: String, } /// <p>Represents the result of a delete device pool request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct DeleteDevicePoolResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteInstanceProfileRequest { /// <p>The Amazon Resource Name (ARN) of the instance profile you are requesting to delete.</p> #[serde(rename = "arn")] pub arn: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct DeleteInstanceProfileResult {} #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteNetworkProfileRequest { /// <p>The Amazon Resource Name (ARN) of the network profile you want to delete.</p> #[serde(rename = "arn")] pub arn: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct DeleteNetworkProfileResult {} /// <p>Represents a request to the delete project operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteProjectRequest { /// <p>Represents the Amazon Resource Name (ARN) of the Device Farm project you wish to delete.</p> #[serde(rename = "arn")] pub arn: String, } /// <p>Represents the result of a delete project request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct DeleteProjectResult {} /// <p>Represents the request to delete the specified remote access session.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteRemoteAccessSessionRequest { /// <p>The Amazon Resource Name (ARN) of the sesssion for which you want to delete remote access.</p> #[serde(rename = "arn")] pub arn: String, } /// <p>The response from the server when a request is made to delete the remote access session.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct DeleteRemoteAccessSessionResult {} /// <p>Represents a request to the delete run operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteRunRequest { /// <p>The Amazon Resource Name (ARN) for the run you wish to delete.</p> #[serde(rename = "arn")] pub arn: String, } /// <p>Represents the result of a delete run request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct DeleteRunResult {} /// <p>Represents a request to the delete upload operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct DeleteUploadRequest { /// <p>Represents the Amazon Resource Name (ARN) of the Device Farm upload you wish to delete.</p> #[serde(rename = "arn")] pub arn: String, } /// <p>Represents the result of a delete upload request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct DeleteUploadResult {} /// <p>Represents a device type that an app is tested against.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct Device { /// <p>The device's ARN.</p> #[serde(rename = "arn")] #[serde(skip_serializing_if = "Option::is_none")] pub arn: Option<String>, /// <p>The device's carrier.</p> #[serde(rename = "carrier")] #[serde(skip_serializing_if = "Option::is_none")] pub carrier: Option<String>, /// <p>Information about the device's CPU.</p> #[serde(rename = "cpu")] #[serde(skip_serializing_if = "Option::is_none")] pub cpu: Option<CPU>, /// <p>The name of the fleet to which this device belongs.</p> #[serde(rename = "fleetName")] #[serde(skip_serializing_if = "Option::is_none")] pub fleet_name: Option<String>, /// <p>The type of fleet to which this device belongs. Possible values for fleet type are PRIVATE and PUBLIC.</p> #[serde(rename = "fleetType")] #[serde(skip_serializing_if = "Option::is_none")] pub fleet_type: Option<String>, /// <p><p>The device&#39;s form factor.</p> <p>Allowed values include:</p> <ul> <li> <p>PHONE: The phone form factor.</p> </li> <li> <p>TABLET: The tablet form factor.</p> </li> </ul></p> #[serde(rename = "formFactor")] #[serde(skip_serializing_if = "Option::is_none")] pub form_factor: Option<String>, /// <p>The device's heap size, expressed in bytes.</p> #[serde(rename = "heapSize")] #[serde(skip_serializing_if = "Option::is_none")] pub heap_size: Option<i64>, /// <p>The device's image name.</p> #[serde(rename = "image")] #[serde(skip_serializing_if = "Option::is_none")] pub image: Option<String>, /// <p>The instances belonging to this device.</p> #[serde(rename = "instances")] #[serde(skip_serializing_if = "Option::is_none")] pub instances: Option<Vec<DeviceInstance>>, /// <p>The device's manufacturer name.</p> #[serde(rename = "manufacturer")] #[serde(skip_serializing_if = "Option::is_none")] pub manufacturer: Option<String>, /// <p>The device's total memory size, expressed in bytes.</p> #[serde(rename = "memory")] #[serde(skip_serializing_if = "Option::is_none")] pub memory: Option<i64>, /// <p>The device's model name.</p> #[serde(rename = "model")] #[serde(skip_serializing_if = "Option::is_none")] pub model: Option<String>, /// <p>The device's model ID.</p> #[serde(rename = "modelId")] #[serde(skip_serializing_if = "Option::is_none")] pub model_id: Option<String>, /// <p>The device's display name.</p> #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, /// <p>The device's operating system type.</p> #[serde(rename = "os")] #[serde(skip_serializing_if = "Option::is_none")] pub os: Option<String>, /// <p><p>The device&#39;s platform.</p> <p>Allowed values include:</p> <ul> <li> <p>ANDROID: The Android platform.</p> </li> <li> <p>IOS: The iOS platform.</p> </li> </ul></p> #[serde(rename = "platform")] #[serde(skip_serializing_if = "Option::is_none")] pub platform: Option<String>, /// <p>The device's radio.</p> #[serde(rename = "radio")] #[serde(skip_serializing_if = "Option::is_none")] pub radio: Option<String>, /// <p>Specifies whether remote access has been enabled for the specified device.</p> #[serde(rename = "remoteAccessEnabled")] #[serde(skip_serializing_if = "Option::is_none")] pub remote_access_enabled: Option<bool>, /// <p>This flag is set to <code>true</code> if remote debugging is enabled for the device.</p> #[serde(rename = "remoteDebugEnabled")] #[serde(skip_serializing_if = "Option::is_none")] pub remote_debug_enabled: Option<bool>, /// <p>The resolution of the device.</p> #[serde(rename = "resolution")] #[serde(skip_serializing_if = "Option::is_none")] pub resolution: Option<Resolution>, } /// <p>Represents the device instance.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct DeviceInstance { /// <p>The Amazon Resource Name (ARN) of the device instance.</p> #[serde(rename = "arn")] #[serde(skip_serializing_if = "Option::is_none")] pub arn: Option<String>, /// <p>The Amazon Resource Name (ARN) of the device.</p> #[serde(rename = "deviceArn")] #[serde(skip_serializing_if = "Option::is_none")] pub device_arn: Option<String>, /// <p>A object containing information about the instance profile.</p> #[serde(rename = "instanceProfile")] #[serde(skip_serializing_if = "Option::is_none")] pub instance_profile: Option<InstanceProfile>, /// <p>An array of strings describing the device instance.</p> #[serde(rename = "labels")] #[serde(skip_serializing_if = "Option::is_none")] pub labels: Option<Vec<String>>, /// <p>The status of the device instance. Valid values are listed below.</p> #[serde(rename = "status")] #[serde(skip_serializing_if = "Option::is_none")] pub status: Option<String>, /// <p>Unique device identifier for the device instance.</p> #[serde(rename = "udid")] #[serde(skip_serializing_if = "Option::is_none")] pub udid: Option<String>, } /// <p>Represents the total (metered or unmetered) minutes used by the resource to run tests. Contains the sum of minutes consumed by all children.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct DeviceMinutes { /// <p>When specified, represents only the sum of metered minutes used by the resource to run tests.</p> #[serde(rename = "metered")] #[serde(skip_serializing_if = "Option::is_none")] pub metered: Option<f64>, /// <p>When specified, represents the total minutes used by the resource to run tests.</p> #[serde(rename = "total")] #[serde(skip_serializing_if = "Option::is_none")] pub total: Option<f64>, /// <p>When specified, represents only the sum of unmetered minutes used by the resource to run tests.</p> #[serde(rename = "unmetered")] #[serde(skip_serializing_if = "Option::is_none")] pub unmetered: Option<f64>, } /// <p>Represents a collection of device types.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct DevicePool { /// <p>The device pool's ARN.</p> #[serde(rename = "arn")] #[serde(skip_serializing_if = "Option::is_none")] pub arn: Option<String>, /// <p>The device pool's description.</p> #[serde(rename = "description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option<String>, /// <p>The device pool's name.</p> #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, /// <p>Information about the device pool's rules.</p> #[serde(rename = "rules")] #[serde(skip_serializing_if = "Option::is_none")] pub rules: Option<Vec<Rule>>, /// <p><p>The device pool&#39;s type.</p> <p>Allowed values include:</p> <ul> <li> <p>CURATED: A device pool that is created and managed by AWS Device Farm.</p> </li> <li> <p>PRIVATE: A device pool that is created and managed by the device pool developer.</p> </li> </ul></p> #[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option<String>, } /// <p>Represents a device pool compatibility result.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct DevicePoolCompatibilityResult { /// <p>Whether the result was compatible with the device pool.</p> #[serde(rename = "compatible")] #[serde(skip_serializing_if = "Option::is_none")] pub compatible: Option<bool>, /// <p>The device (phone or tablet) that you wish to return information about.</p> #[serde(rename = "device")] #[serde(skip_serializing_if = "Option::is_none")] pub device: Option<Device>, /// <p>Information about the compatibility.</p> #[serde(rename = "incompatibilityMessages")] #[serde(skip_serializing_if = "Option::is_none")] pub incompatibility_messages: Option<Vec<IncompatibilityMessage>>, } /// <p>Represents configuration information about a test run, such as the execution timeout (in minutes).</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ExecutionConfiguration { /// <p>True if account cleanup is enabled at the beginning of the test; otherwise, false.</p> #[serde(rename = "accountsCleanup")] #[serde(skip_serializing_if = "Option::is_none")] pub accounts_cleanup: Option<bool>, /// <p>True if app package cleanup is enabled at the beginning of the test; otherwise, false.</p> #[serde(rename = "appPackagesCleanup")] #[serde(skip_serializing_if = "Option::is_none")] pub app_packages_cleanup: Option<bool>, /// <p>The number of minutes a test run will execute before it times out.</p> #[serde(rename = "jobTimeoutMinutes")] #[serde(skip_serializing_if = "Option::is_none")] pub job_timeout_minutes: Option<i64>, /// <p>When set to <code>true</code>, for private devices, Device Farm will not sign your app again. For public devices, Device Farm always signs your apps again and this parameter has no effect.</p> <p>For more information about how Device Farm re-signs your app(s), see <a href="https://aws.amazon.com/device-farm/faq/">Do you modify my app?</a> in the <i>AWS Device Farm FAQs</i>.</p> #[serde(rename = "skipAppResign")] #[serde(skip_serializing_if = "Option::is_none")] pub skip_app_resign: Option<bool>, } /// <p>Represents the request sent to retrieve the account settings.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetAccountSettingsRequest {} /// <p>Represents the account settings return values from the <code>GetAccountSettings</code> request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct GetAccountSettingsResult { /// <p>The account settings.</p> #[serde(rename = "accountSettings")] #[serde(skip_serializing_if = "Option::is_none")] pub account_settings: Option<AccountSettings>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetDeviceInstanceRequest { /// <p>The Amazon Resource Name (ARN) of the instance you're requesting information about.</p> #[serde(rename = "arn")] pub arn: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct GetDeviceInstanceResult { /// <p>An object containing information about your device instance.</p> #[serde(rename = "deviceInstance")] #[serde(skip_serializing_if = "Option::is_none")] pub device_instance: Option<DeviceInstance>, } /// <p>Represents a request to the get device pool compatibility operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetDevicePoolCompatibilityRequest { /// <p>The ARN of the app that is associated with the specified device pool.</p> #[serde(rename = "appArn")] #[serde(skip_serializing_if = "Option::is_none")] pub app_arn: Option<String>, /// <p>The device pool's ARN.</p> #[serde(rename = "devicePoolArn")] pub device_pool_arn: String, /// <p>Information about the uploaded test to be run against the device pool.</p> #[serde(rename = "test")] #[serde(skip_serializing_if = "Option::is_none")] pub test: Option<ScheduleRunTest>, /// <p><p>The test type for the specified device pool.</p> <p>Allowed values include the following:</p> <ul> <li> <p>BUILTIN<em>FUZZ: The built-in fuzz type.</p> </li> <li> <p>BUILTIN</em>EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.</p> </li> <li> <p>APPIUM<em>JAVA</em>JUNIT: The Appium Java JUnit type.</p> </li> <li> <p>APPIUM<em>JAVA</em>TESTNG: The Appium Java TestNG type.</p> </li> <li> <p>APPIUM<em>PYTHON: The Appium Python type.</p> </li> <li> <p>APPIUM</em>WEB<em>JAVA</em>JUNIT: The Appium Java JUnit type for Web apps.</p> </li> <li> <p>APPIUM<em>WEB</em>JAVA<em>TESTNG: The Appium Java TestNG type for Web apps.</p> </li> <li> <p>APPIUM</em>WEB<em>PYTHON: The Appium Python type for Web apps.</p> </li> <li> <p>CALABASH: The Calabash type.</p> </li> <li> <p>INSTRUMENTATION: The Instrumentation type.</p> </li> <li> <p>UIAUTOMATION: The uiautomation type.</p> </li> <li> <p>UIAUTOMATOR: The uiautomator type.</p> </li> <li> <p>XCTEST: The XCode test type.</p> </li> <li> <p>XCTEST</em>UI: The XCode UI test type.</p> </li> </ul></p> #[serde(rename = "testType")] #[serde(skip_serializing_if = "Option::is_none")] pub test_type: Option<String>, } /// <p>Represents the result of describe device pool compatibility request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct GetDevicePoolCompatibilityResult { /// <p>Information about compatible devices.</p> #[serde(rename = "compatibleDevices")] #[serde(skip_serializing_if = "Option::is_none")] pub compatible_devices: Option<Vec<DevicePoolCompatibilityResult>>, /// <p>Information about incompatible devices.</p> #[serde(rename = "incompatibleDevices")] #[serde(skip_serializing_if = "Option::is_none")] pub incompatible_devices: Option<Vec<DevicePoolCompatibilityResult>>, } /// <p>Represents a request to the get device pool operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetDevicePoolRequest { /// <p>The device pool's ARN.</p> #[serde(rename = "arn")] pub arn: String, } /// <p>Represents the result of a get device pool request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct GetDevicePoolResult { /// <p>An object containing information about the requested device pool.</p> #[serde(rename = "devicePool")] #[serde(skip_serializing_if = "Option::is_none")] pub device_pool: Option<DevicePool>, } /// <p>Represents a request to the get device request.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetDeviceRequest { /// <p>The device type's ARN.</p> #[serde(rename = "arn")] pub arn: String, } /// <p>Represents the result of a get device request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct GetDeviceResult { /// <p>An object containing information about the requested device.</p> #[serde(rename = "device")] #[serde(skip_serializing_if = "Option::is_none")] pub device: Option<Device>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetInstanceProfileRequest { /// <p>The Amazon Resource Name (ARN) of your instance profile.</p> #[serde(rename = "arn")] pub arn: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct GetInstanceProfileResult { /// <p>An object containing information about your instance profile.</p> #[serde(rename = "instanceProfile")] #[serde(skip_serializing_if = "Option::is_none")] pub instance_profile: Option<InstanceProfile>, } /// <p>Represents a request to the get job operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetJobRequest { /// <p>The job's ARN.</p> #[serde(rename = "arn")] pub arn: String, } /// <p>Represents the result of a get job request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct GetJobResult { /// <p>An object containing information about the requested job.</p> #[serde(rename = "job")] #[serde(skip_serializing_if = "Option::is_none")] pub job: Option<Job>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetNetworkProfileRequest { /// <p>The Amazon Resource Name (ARN) of the network profile you want to return information about.</p> #[serde(rename = "arn")] pub arn: String, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct GetNetworkProfileResult { /// <p>The network profile.</p> #[serde(rename = "networkProfile")] #[serde(skip_serializing_if = "Option::is_none")] pub network_profile: Option<NetworkProfile>, } /// <p>Represents the request to retrieve the offering status for the specified customer or account.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetOfferingStatusRequest { /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>Returns the status result for a device offering.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct GetOfferingStatusResult { /// <p>When specified, gets the offering status for the current period.</p> #[serde(rename = "current")] #[serde(skip_serializing_if = "Option::is_none")] pub current: Option<::std::collections::HashMap<String, OfferingStatus>>, /// <p>When specified, gets the offering status for the next period.</p> #[serde(rename = "nextPeriod")] #[serde(skip_serializing_if = "Option::is_none")] pub next_period: Option<::std::collections::HashMap<String, OfferingStatus>>, /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>Represents a request to the get project operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetProjectRequest { /// <p>The project's ARN.</p> #[serde(rename = "arn")] pub arn: String, } /// <p>Represents the result of a get project request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct GetProjectResult { /// <p>The project you wish to get information about.</p> #[serde(rename = "project")] #[serde(skip_serializing_if = "Option::is_none")] pub project: Option<Project>, } /// <p>Represents the request to get information about the specified remote access session.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetRemoteAccessSessionRequest { /// <p>The Amazon Resource Name (ARN) of the remote access session about which you want to get session information.</p> #[serde(rename = "arn")] pub arn: String, } /// <p>Represents the response from the server that lists detailed information about the remote access session.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct GetRemoteAccessSessionResult { /// <p>A container that lists detailed information about the remote access session.</p> #[serde(rename = "remoteAccessSession")] #[serde(skip_serializing_if = "Option::is_none")] pub remote_access_session: Option<RemoteAccessSession>, } /// <p>Represents a request to the get run operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetRunRequest { /// <p>The run's ARN.</p> #[serde(rename = "arn")] pub arn: String, } /// <p>Represents the result of a get run request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct GetRunResult { /// <p>The run you wish to get results from.</p> #[serde(rename = "run")] #[serde(skip_serializing_if = "Option::is_none")] pub run: Option<Run>, } /// <p>Represents a request to the get suite operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetSuiteRequest { /// <p>The suite's ARN.</p> #[serde(rename = "arn")] pub arn: String, } /// <p>Represents the result of a get suite request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct GetSuiteResult { /// <p>A collection of one or more tests.</p> #[serde(rename = "suite")] #[serde(skip_serializing_if = "Option::is_none")] pub suite: Option<Suite>, } /// <p>Represents a request to the get test operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetTestRequest { /// <p>The test's ARN.</p> #[serde(rename = "arn")] pub arn: String, } /// <p>Represents the result of a get test request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct GetTestResult { /// <p>A test condition that is evaluated.</p> #[serde(rename = "test")] #[serde(skip_serializing_if = "Option::is_none")] pub test: Option<Test>, } /// <p>Represents a request to the get upload operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct GetUploadRequest { /// <p>The upload's ARN.</p> #[serde(rename = "arn")] pub arn: String, } /// <p>Represents the result of a get upload request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct GetUploadResult { /// <p>An app or a set of one or more tests to upload or that have been uploaded.</p> #[serde(rename = "upload")] #[serde(skip_serializing_if = "Option::is_none")] pub upload: Option<Upload>, } /// <p>Represents information about incompatibility.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct IncompatibilityMessage { /// <p>A message about the incompatibility.</p> #[serde(rename = "message")] #[serde(skip_serializing_if = "Option::is_none")] pub message: Option<String>, /// <p><p>The type of incompatibility.</p> <p>Allowed values include:</p> <ul> <li> <p>ARN: The ARN.</p> </li> <li> <p>FORM<em>FACTOR: The form factor (for example, phone or tablet).</p> </li> <li> <p>MANUFACTURER: The manufacturer.</p> </li> <li> <p>PLATFORM: The platform (for example, Android or iOS).</p> </li> <li> <p>REMOTE</em>ACCESS<em>ENABLED: Whether the device is enabled for remote access.</p> </li> <li> <p>APPIUM</em>VERSION: The Appium version for the test.</p> </li> </ul></p> #[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option<String>, } /// <p>Represents the request to install an Android application (in .apk format) or an iOS application (in .ipa format) as part of a remote access session.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct InstallToRemoteAccessSessionRequest { /// <p>The Amazon Resource Name (ARN) of the app about which you are requesting information.</p> #[serde(rename = "appArn")] pub app_arn: String, /// <p>The Amazon Resource Name (ARN) of the remote access session about which you are requesting information.</p> #[serde(rename = "remoteAccessSessionArn")] pub remote_access_session_arn: String, } /// <p>Represents the response from the server after AWS Device Farm makes a request to install to a remote access session.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct InstallToRemoteAccessSessionResult { /// <p>An app to upload or that has been uploaded.</p> #[serde(rename = "appUpload")] #[serde(skip_serializing_if = "Option::is_none")] pub app_upload: Option<Upload>, } /// <p>Represents the instance profile.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct InstanceProfile { /// <p>The Amazon Resource Name (ARN) of the instance profile.</p> #[serde(rename = "arn")] #[serde(skip_serializing_if = "Option::is_none")] pub arn: Option<String>, /// <p>The description of the instance profile.</p> #[serde(rename = "description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option<String>, /// <p>An array of strings specifying the list of app packages that should not be cleaned up from the device after a test run is over.</p> <p>The list of packages is only considered if you set <code>packageCleanup</code> to <code>true</code>.</p> #[serde(rename = "excludeAppPackagesFromCleanup")] #[serde(skip_serializing_if = "Option::is_none")] pub exclude_app_packages_from_cleanup: Option<Vec<String>>, /// <p>The name of the instance profile.</p> #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, /// <p>When set to <code>true</code>, Device Farm will remove app packages after a test run. The default value is <code>false</code> for private devices.</p> #[serde(rename = "packageCleanup")] #[serde(skip_serializing_if = "Option::is_none")] pub package_cleanup: Option<bool>, /// <p>When set to <code>true</code>, Device Farm will reboot the instance after a test run. The default value is <code>true</code>.</p> #[serde(rename = "rebootAfterUse")] #[serde(skip_serializing_if = "Option::is_none")] pub reboot_after_use: Option<bool>, } /// <p>Represents a device.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct Job { /// <p>The job's ARN.</p> #[serde(rename = "arn")] #[serde(skip_serializing_if = "Option::is_none")] pub arn: Option<String>, /// <p>The job's result counters.</p> #[serde(rename = "counters")] #[serde(skip_serializing_if = "Option::is_none")] pub counters: Option<Counters>, /// <p>When the job was created.</p> #[serde(rename = "created")] #[serde(skip_serializing_if = "Option::is_none")] pub created: Option<f64>, /// <p>The device (phone or tablet).</p> #[serde(rename = "device")] #[serde(skip_serializing_if = "Option::is_none")] pub device: Option<Device>, /// <p>Represents the total (metered or unmetered) minutes used by the job.</p> #[serde(rename = "deviceMinutes")] #[serde(skip_serializing_if = "Option::is_none")] pub device_minutes: Option<DeviceMinutes>, /// <p>The Amazon Resource Name (ARN) of the instance.</p> #[serde(rename = "instanceArn")] #[serde(skip_serializing_if = "Option::is_none")] pub instance_arn: Option<String>, /// <p>A message about the job's result.</p> #[serde(rename = "message")] #[serde(skip_serializing_if = "Option::is_none")] pub message: Option<String>, /// <p>The job's name.</p> #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, /// <p><p>The job&#39;s result.</p> <p>Allowed values include:</p> <ul> <li> <p>PENDING: A pending condition.</p> </li> <li> <p>PASSED: A passing condition.</p> </li> <li> <p>WARNED: A warning condition.</p> </li> <li> <p>FAILED: A failed condition.</p> </li> <li> <p>SKIPPED: A skipped condition.</p> </li> <li> <p>ERRORED: An error condition.</p> </li> <li> <p>STOPPED: A stopped condition.</p> </li> </ul></p> #[serde(rename = "result")] #[serde(skip_serializing_if = "Option::is_none")] pub result: Option<String>, /// <p>The job's start time.</p> #[serde(rename = "started")] #[serde(skip_serializing_if = "Option::is_none")] pub started: Option<f64>, /// <p><p>The job&#39;s status.</p> <p>Allowed values include:</p> <ul> <li> <p>PENDING: A pending status.</p> </li> <li> <p>PENDING<em>CONCURRENCY: A pending concurrency status.</p> </li> <li> <p>PENDING</em>DEVICE: A pending device status.</p> </li> <li> <p>PROCESSING: A processing status.</p> </li> <li> <p>SCHEDULING: A scheduling status.</p> </li> <li> <p>PREPARING: A preparing status.</p> </li> <li> <p>RUNNING: A running status.</p> </li> <li> <p>COMPLETED: A completed status.</p> </li> <li> <p>STOPPING: A stopping status.</p> </li> </ul></p> #[serde(rename = "status")] #[serde(skip_serializing_if = "Option::is_none")] pub status: Option<String>, /// <p>The job's stop time.</p> #[serde(rename = "stopped")] #[serde(skip_serializing_if = "Option::is_none")] pub stopped: Option<f64>, /// <p><p>The job&#39;s type.</p> <p>Allowed values include the following:</p> <ul> <li> <p>BUILTIN<em>FUZZ: The built-in fuzz type.</p> </li> <li> <p>BUILTIN</em>EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.</p> </li> <li> <p>APPIUM<em>JAVA</em>JUNIT: The Appium Java JUnit type.</p> </li> <li> <p>APPIUM<em>JAVA</em>TESTNG: The Appium Java TestNG type.</p> </li> <li> <p>APPIUM<em>PYTHON: The Appium Python type.</p> </li> <li> <p>APPIUM</em>WEB<em>JAVA</em>JUNIT: The Appium Java JUnit type for Web apps.</p> </li> <li> <p>APPIUM<em>WEB</em>JAVA<em>TESTNG: The Appium Java TestNG type for Web apps.</p> </li> <li> <p>APPIUM</em>WEB<em>PYTHON: The Appium Python type for Web apps.</p> </li> <li> <p>CALABASH: The Calabash type.</p> </li> <li> <p>INSTRUMENTATION: The Instrumentation type.</p> </li> <li> <p>UIAUTOMATION: The uiautomation type.</p> </li> <li> <p>UIAUTOMATOR: The uiautomator type.</p> </li> <li> <p>XCTEST: The XCode test type.</p> </li> <li> <p>XCTEST</em>UI: The XCode UI test type.</p> </li> </ul></p> #[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option<String>, } /// <p>Represents a request to the list artifacts operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListArtifactsRequest { /// <p>The Run, Job, Suite, or Test ARN.</p> #[serde(rename = "arn")] pub arn: String, /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, /// <p><p>The artifacts&#39; type.</p> <p>Allowed values include:</p> <ul> <li> <p>FILE: The artifacts are files.</p> </li> <li> <p>LOG: The artifacts are logs.</p> </li> <li> <p>SCREENSHOT: The artifacts are screenshots.</p> </li> </ul></p> #[serde(rename = "type")] pub type_: String, } /// <p>Represents the result of a list artifacts operation.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ListArtifactsResult { /// <p>Information about the artifacts.</p> #[serde(rename = "artifacts")] #[serde(skip_serializing_if = "Option::is_none")] pub artifacts: Option<Vec<Artifact>>, /// <p>If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListDeviceInstancesRequest { /// <p>An integer specifying the maximum number of items you want to return in the API response.</p> #[serde(rename = "maxResults")] #[serde(skip_serializing_if = "Option::is_none")] pub max_results: Option<i64>, /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ListDeviceInstancesResult { /// <p>An object containing information about your device instances.</p> #[serde(rename = "deviceInstances")] #[serde(skip_serializing_if = "Option::is_none")] pub device_instances: Option<Vec<DeviceInstance>>, /// <p>An identifier that can be used in the next call to this operation to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>Represents the result of a list device pools request.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListDevicePoolsRequest { /// <p>The project ARN.</p> #[serde(rename = "arn")] pub arn: String, /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, /// <p><p>The device pools&#39; type.</p> <p>Allowed values include:</p> <ul> <li> <p>CURATED: A device pool that is created and managed by AWS Device Farm.</p> </li> <li> <p>PRIVATE: A device pool that is created and managed by the device pool developer.</p> </li> </ul></p> #[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option<String>, } /// <p>Represents the result of a list device pools request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ListDevicePoolsResult { /// <p>Information about the device pools.</p> #[serde(rename = "devicePools")] #[serde(skip_serializing_if = "Option::is_none")] pub device_pools: Option<Vec<DevicePool>>, /// <p>If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>Represents the result of a list devices request.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListDevicesRequest { /// <p>The Amazon Resource Name (ARN) of the project.</p> #[serde(rename = "arn")] #[serde(skip_serializing_if = "Option::is_none")] pub arn: Option<String>, /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>Represents the result of a list devices operation.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ListDevicesResult { /// <p>Information about the devices.</p> #[serde(rename = "devices")] #[serde(skip_serializing_if = "Option::is_none")] pub devices: Option<Vec<Device>>, /// <p>If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListInstanceProfilesRequest { /// <p>An integer specifying the maximum number of items you want to return in the API response.</p> #[serde(rename = "maxResults")] #[serde(skip_serializing_if = "Option::is_none")] pub max_results: Option<i64>, /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ListInstanceProfilesResult { /// <p>An object containing information about your instance profiles.</p> #[serde(rename = "instanceProfiles")] #[serde(skip_serializing_if = "Option::is_none")] pub instance_profiles: Option<Vec<InstanceProfile>>, /// <p>An identifier that can be used in the next call to this operation to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>Represents a request to the list jobs operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListJobsRequest { /// <p>The run's Amazon Resource Name (ARN).</p> #[serde(rename = "arn")] pub arn: String, /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>Represents the result of a list jobs request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ListJobsResult { /// <p>Information about the jobs.</p> #[serde(rename = "jobs")] #[serde(skip_serializing_if = "Option::is_none")] pub jobs: Option<Vec<Job>>, /// <p>If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListNetworkProfilesRequest { /// <p>The Amazon Resource Name (ARN) of the project for which you want to list network profiles.</p> #[serde(rename = "arn")] pub arn: String, /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, /// <p>The type of network profile you wish to return information about. Valid values are listed below.</p> #[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option<String>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ListNetworkProfilesResult { /// <p>A list of the available network profiles.</p> #[serde(rename = "networkProfiles")] #[serde(skip_serializing_if = "Option::is_none")] pub network_profiles: Option<Vec<NetworkProfile>>, /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListOfferingPromotionsRequest { /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ListOfferingPromotionsResult { /// <p>An identifier to be used in the next call to this operation, to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, /// <p>Information about the offering promotions.</p> #[serde(rename = "offeringPromotions")] #[serde(skip_serializing_if = "Option::is_none")] pub offering_promotions: Option<Vec<OfferingPromotion>>, } /// <p>Represents the request to list the offering transaction history.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListOfferingTransactionsRequest { /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>Returns the transaction log of the specified offerings.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ListOfferingTransactionsResult { /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, /// <p>The audit log of subscriptions you have purchased and modified through AWS Device Farm.</p> #[serde(rename = "offeringTransactions")] #[serde(skip_serializing_if = "Option::is_none")] pub offering_transactions: Option<Vec<OfferingTransaction>>, } /// <p>Represents the request to list all offerings.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListOfferingsRequest { /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>Represents the return values of the list of offerings.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ListOfferingsResult { /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, /// <p>A value representing the list offering results.</p> #[serde(rename = "offerings")] #[serde(skip_serializing_if = "Option::is_none")] pub offerings: Option<Vec<Offering>>, } /// <p>Represents a request to the list projects operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListProjectsRequest { /// <p>Optional. If no Amazon Resource Name (ARN) is specified, then AWS Device Farm returns a list of all projects for the AWS account. You can also specify a project ARN.</p> #[serde(rename = "arn")] #[serde(skip_serializing_if = "Option::is_none")] pub arn: Option<String>, /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>Represents the result of a list projects request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ListProjectsResult { /// <p>If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, /// <p>Information about the projects.</p> #[serde(rename = "projects")] #[serde(skip_serializing_if = "Option::is_none")] pub projects: Option<Vec<Project>>, } /// <p>Represents the request to return information about the remote access session.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListRemoteAccessSessionsRequest { /// <p>The Amazon Resource Name (ARN) of the remote access session about which you are requesting information.</p> #[serde(rename = "arn")] pub arn: String, /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>Represents the response from the server after AWS Device Farm makes a request to return information about the remote access session.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ListRemoteAccessSessionsResult { /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, /// <p>A container representing the metadata from the service about each remote access session you are requesting.</p> #[serde(rename = "remoteAccessSessions")] #[serde(skip_serializing_if = "Option::is_none")] pub remote_access_sessions: Option<Vec<RemoteAccessSession>>, } /// <p>Represents a request to the list runs operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListRunsRequest { /// <p>The Amazon Resource Name (ARN) of the project for which you want to list runs.</p> #[serde(rename = "arn")] pub arn: String, /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>Represents the result of a list runs request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ListRunsResult { /// <p>If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, /// <p>Information about the runs.</p> #[serde(rename = "runs")] #[serde(skip_serializing_if = "Option::is_none")] pub runs: Option<Vec<Run>>, } /// <p>Represents a request to the list samples operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListSamplesRequest { /// <p>The Amazon Resource Name (ARN) of the project for which you want to list samples.</p> #[serde(rename = "arn")] pub arn: String, /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>Represents the result of a list samples request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ListSamplesResult { /// <p>If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, /// <p>Information about the samples.</p> #[serde(rename = "samples")] #[serde(skip_serializing_if = "Option::is_none")] pub samples: Option<Vec<Sample>>, } /// <p>Represents a request to the list suites operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListSuitesRequest { /// <p>The job's Amazon Resource Name (ARN).</p> #[serde(rename = "arn")] pub arn: String, /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>Represents the result of a list suites request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ListSuitesResult { /// <p>If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, /// <p>Information about the suites.</p> #[serde(rename = "suites")] #[serde(skip_serializing_if = "Option::is_none")] pub suites: Option<Vec<Suite>>, } /// <p>Represents a request to the list tests operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListTestsRequest { /// <p>The test suite's Amazon Resource Name (ARN).</p> #[serde(rename = "arn")] pub arn: String, /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>Represents the result of a list tests request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ListTestsResult { /// <p>If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, /// <p>Information about the tests.</p> #[serde(rename = "tests")] #[serde(skip_serializing_if = "Option::is_none")] pub tests: Option<Vec<Test>>, } /// <p>Represents a request to the list unique problems operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListUniqueProblemsRequest { /// <p>The unique problems' ARNs.</p> #[serde(rename = "arn")] pub arn: String, /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>Represents the result of a list unique problems request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ListUniqueProblemsResult { /// <p>If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, /// <p><p>Information about the unique problems.</p> <p>Allowed values include:</p> <ul> <li> <p>PENDING: A pending condition.</p> </li> <li> <p>PASSED: A passing condition.</p> </li> <li> <p>WARNED: A warning condition.</p> </li> <li> <p>FAILED: A failed condition.</p> </li> <li> <p>SKIPPED: A skipped condition.</p> </li> <li> <p>ERRORED: An error condition.</p> </li> <li> <p>STOPPED: A stopped condition.</p> </li> </ul></p> #[serde(rename = "uniqueProblems")] #[serde(skip_serializing_if = "Option::is_none")] pub unique_problems: Option<::std::collections::HashMap<String, Vec<UniqueProblem>>>, } /// <p>Represents a request to the list uploads operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ListUploadsRequest { /// <p>The Amazon Resource Name (ARN) of the project for which you want to list uploads.</p> #[serde(rename = "arn")] pub arn: String, /// <p>An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, } /// <p>Represents the result of a list uploads request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ListUploadsResult { /// <p>If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.</p> #[serde(rename = "nextToken")] #[serde(skip_serializing_if = "Option::is_none")] pub next_token: Option<String>, /// <p>Information about the uploads.</p> #[serde(rename = "uploads")] #[serde(skip_serializing_if = "Option::is_none")] pub uploads: Option<Vec<Upload>>, } /// <p>Represents a latitude and longitude pair, expressed in geographic coordinate system degrees (for example 47.6204, -122.3491).</p> <p>Elevation is currently not supported.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Location { /// <p>The latitude.</p> #[serde(rename = "latitude")] pub latitude: f64, /// <p>The longitude.</p> #[serde(rename = "longitude")] pub longitude: f64, } /// <p>A number representing the monetary amount for an offering or transaction.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct MonetaryAmount { /// <p>The numerical amount of an offering or transaction.</p> #[serde(rename = "amount")] #[serde(skip_serializing_if = "Option::is_none")] pub amount: Option<f64>, /// <p>The currency code of a monetary amount. For example, <code>USD</code> means "U.S. dollars."</p> #[serde(rename = "currencyCode")] #[serde(skip_serializing_if = "Option::is_none")] pub currency_code: Option<String>, } /// <p>An array of settings that describes characteristics of a network profile.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct NetworkProfile { /// <p>The Amazon Resource Name (ARN) of the network profile.</p> #[serde(rename = "arn")] #[serde(skip_serializing_if = "Option::is_none")] pub arn: Option<String>, /// <p>The description of the network profile.</p> #[serde(rename = "description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option<String>, /// <p>The data throughput rate in bits per second, as an integer from 0 to 104857600.</p> #[serde(rename = "downlinkBandwidthBits")] #[serde(skip_serializing_if = "Option::is_none")] pub downlink_bandwidth_bits: Option<i64>, /// <p>Delay time for all packets to destination in milliseconds as an integer from 0 to 2000.</p> #[serde(rename = "downlinkDelayMs")] #[serde(skip_serializing_if = "Option::is_none")] pub downlink_delay_ms: Option<i64>, /// <p>Time variation in the delay of received packets in milliseconds as an integer from 0 to 2000.</p> #[serde(rename = "downlinkJitterMs")] #[serde(skip_serializing_if = "Option::is_none")] pub downlink_jitter_ms: Option<i64>, /// <p>Proportion of received packets that fail to arrive from 0 to 100 percent.</p> #[serde(rename = "downlinkLossPercent")] #[serde(skip_serializing_if = "Option::is_none")] pub downlink_loss_percent: Option<i64>, /// <p>The name of the network profile.</p> #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, /// <p>The type of network profile. Valid values are listed below.</p> #[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option<String>, /// <p>The data throughput rate in bits per second, as an integer from 0 to 104857600.</p> #[serde(rename = "uplinkBandwidthBits")] #[serde(skip_serializing_if = "Option::is_none")] pub uplink_bandwidth_bits: Option<i64>, /// <p>Delay time for all packets to destination in milliseconds as an integer from 0 to 2000.</p> #[serde(rename = "uplinkDelayMs")] #[serde(skip_serializing_if = "Option::is_none")] pub uplink_delay_ms: Option<i64>, /// <p>Time variation in the delay of received packets in milliseconds as an integer from 0 to 2000.</p> #[serde(rename = "uplinkJitterMs")] #[serde(skip_serializing_if = "Option::is_none")] pub uplink_jitter_ms: Option<i64>, /// <p>Proportion of transmitted packets that fail to arrive from 0 to 100 percent.</p> #[serde(rename = "uplinkLossPercent")] #[serde(skip_serializing_if = "Option::is_none")] pub uplink_loss_percent: Option<i64>, } /// <p>Represents the metadata of a device offering.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct Offering { /// <p>A string describing the offering.</p> #[serde(rename = "description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option<String>, /// <p>The ID that corresponds to a device offering.</p> #[serde(rename = "id")] #[serde(skip_serializing_if = "Option::is_none")] pub id: Option<String>, /// <p>The platform of the device (e.g., ANDROID or IOS).</p> #[serde(rename = "platform")] #[serde(skip_serializing_if = "Option::is_none")] pub platform: Option<String>, /// <p>Specifies whether there are recurring charges for the offering.</p> #[serde(rename = "recurringCharges")] #[serde(skip_serializing_if = "Option::is_none")] pub recurring_charges: Option<Vec<RecurringCharge>>, /// <p>The type of offering (e.g., "RECURRING") for a device.</p> #[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option<String>, } /// <p>Represents information about an offering promotion.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct OfferingPromotion { /// <p>A string describing the offering promotion.</p> #[serde(rename = "description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option<String>, /// <p>The ID of the offering promotion.</p> #[serde(rename = "id")] #[serde(skip_serializing_if = "Option::is_none")] pub id: Option<String>, } /// <p>The status of the offering.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct OfferingStatus { /// <p>The date on which the offering is effective.</p> #[serde(rename = "effectiveOn")] #[serde(skip_serializing_if = "Option::is_none")] pub effective_on: Option<f64>, /// <p>Represents the metadata of an offering status.</p> #[serde(rename = "offering")] #[serde(skip_serializing_if = "Option::is_none")] pub offering: Option<Offering>, /// <p>The number of available devices in the offering.</p> #[serde(rename = "quantity")] #[serde(skip_serializing_if = "Option::is_none")] pub quantity: Option<i64>, /// <p>The type specified for the offering status.</p> #[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option<String>, } /// <p>Represents the metadata of an offering transaction.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct OfferingTransaction { /// <p>The cost of an offering transaction.</p> #[serde(rename = "cost")] #[serde(skip_serializing_if = "Option::is_none")] pub cost: Option<MonetaryAmount>, /// <p>The date on which an offering transaction was created.</p> #[serde(rename = "createdOn")] #[serde(skip_serializing_if = "Option::is_none")] pub created_on: Option<f64>, /// <p>The ID that corresponds to a device offering promotion.</p> #[serde(rename = "offeringPromotionId")] #[serde(skip_serializing_if = "Option::is_none")] pub offering_promotion_id: Option<String>, /// <p>The status of an offering transaction.</p> #[serde(rename = "offeringStatus")] #[serde(skip_serializing_if = "Option::is_none")] pub offering_status: Option<OfferingStatus>, /// <p>The transaction ID of the offering transaction.</p> #[serde(rename = "transactionId")] #[serde(skip_serializing_if = "Option::is_none")] pub transaction_id: Option<String>, } /// <p>Represents a specific warning or failure.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct Problem { /// <p>Information about the associated device.</p> #[serde(rename = "device")] #[serde(skip_serializing_if = "Option::is_none")] pub device: Option<Device>, /// <p>Information about the associated job.</p> #[serde(rename = "job")] #[serde(skip_serializing_if = "Option::is_none")] pub job: Option<ProblemDetail>, /// <p>A message about the problem's result.</p> #[serde(rename = "message")] #[serde(skip_serializing_if = "Option::is_none")] pub message: Option<String>, /// <p><p>The problem&#39;s result.</p> <p>Allowed values include:</p> <ul> <li> <p>PENDING: A pending condition.</p> </li> <li> <p>PASSED: A passing condition.</p> </li> <li> <p>WARNED: A warning condition.</p> </li> <li> <p>FAILED: A failed condition.</p> </li> <li> <p>SKIPPED: A skipped condition.</p> </li> <li> <p>ERRORED: An error condition.</p> </li> <li> <p>STOPPED: A stopped condition.</p> </li> </ul></p> #[serde(rename = "result")] #[serde(skip_serializing_if = "Option::is_none")] pub result: Option<String>, /// <p>Information about the associated run.</p> #[serde(rename = "run")] #[serde(skip_serializing_if = "Option::is_none")] pub run: Option<ProblemDetail>, /// <p>Information about the associated suite.</p> #[serde(rename = "suite")] #[serde(skip_serializing_if = "Option::is_none")] pub suite: Option<ProblemDetail>, /// <p>Information about the associated test.</p> #[serde(rename = "test")] #[serde(skip_serializing_if = "Option::is_none")] pub test: Option<ProblemDetail>, } /// <p>Information about a problem detail.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ProblemDetail { /// <p>The problem detail's ARN.</p> #[serde(rename = "arn")] #[serde(skip_serializing_if = "Option::is_none")] pub arn: Option<String>, /// <p>The problem detail's name.</p> #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, } /// <p>Represents an operating-system neutral workspace for running and managing tests.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct Project { /// <p>The project's ARN.</p> #[serde(rename = "arn")] #[serde(skip_serializing_if = "Option::is_none")] pub arn: Option<String>, /// <p>When the project was created.</p> #[serde(rename = "created")] #[serde(skip_serializing_if = "Option::is_none")] pub created: Option<f64>, /// <p>The default number of minutes (at the project level) a test run will execute before it times out. Default value is 60 minutes.</p> #[serde(rename = "defaultJobTimeoutMinutes")] #[serde(skip_serializing_if = "Option::is_none")] pub default_job_timeout_minutes: Option<i64>, /// <p>The project's name.</p> #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, } /// <p>Represents a request for a purchase offering.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct PurchaseOfferingRequest { /// <p>The ID of the offering.</p> #[serde(rename = "offeringId")] #[serde(skip_serializing_if = "Option::is_none")] pub offering_id: Option<String>, /// <p>The ID of the offering promotion to be applied to the purchase.</p> #[serde(rename = "offeringPromotionId")] #[serde(skip_serializing_if = "Option::is_none")] pub offering_promotion_id: Option<String>, /// <p>The number of device slots you wish to purchase in an offering request.</p> #[serde(rename = "quantity")] #[serde(skip_serializing_if = "Option::is_none")] pub quantity: Option<i64>, } /// <p>The result of the purchase offering (e.g., success or failure).</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct PurchaseOfferingResult { /// <p>Represents the offering transaction for the purchase result.</p> #[serde(rename = "offeringTransaction")] #[serde(skip_serializing_if = "Option::is_none")] pub offering_transaction: Option<OfferingTransaction>, } /// <p>Represents the set of radios and their states on a device. Examples of radios include Wi-Fi, GPS, Bluetooth, and NFC.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Radios { /// <p>True if Bluetooth is enabled at the beginning of the test; otherwise, false.</p> #[serde(rename = "bluetooth")] #[serde(skip_serializing_if = "Option::is_none")] pub bluetooth: Option<bool>, /// <p>True if GPS is enabled at the beginning of the test; otherwise, false.</p> #[serde(rename = "gps")] #[serde(skip_serializing_if = "Option::is_none")] pub gps: Option<bool>, /// <p>True if NFC is enabled at the beginning of the test; otherwise, false.</p> #[serde(rename = "nfc")] #[serde(skip_serializing_if = "Option::is_none")] pub nfc: Option<bool>, /// <p>True if Wi-Fi is enabled at the beginning of the test; otherwise, false.</p> #[serde(rename = "wifi")] #[serde(skip_serializing_if = "Option::is_none")] pub wifi: Option<bool>, } /// <p>Specifies whether charges for devices will be recurring.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct RecurringCharge { /// <p>The cost of the recurring charge.</p> #[serde(rename = "cost")] #[serde(skip_serializing_if = "Option::is_none")] pub cost: Option<MonetaryAmount>, /// <p>The frequency in which charges will recur.</p> #[serde(rename = "frequency")] #[serde(skip_serializing_if = "Option::is_none")] pub frequency: Option<String>, } /// <p>Represents information about the remote access session.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct RemoteAccessSession { /// <p>The Amazon Resource Name (ARN) of the remote access session.</p> #[serde(rename = "arn")] #[serde(skip_serializing_if = "Option::is_none")] pub arn: Option<String>, /// <p>The billing method of the remote access session. Possible values include <code>METERED</code> or <code>UNMETERED</code>. For more information about metered devices, see <a href="http://docs.aws.amazon.com/devicefarm/latest/developerguide/welcome.html#welcome-terminology">AWS Device Farm terminology</a>."</p> #[serde(rename = "billingMethod")] #[serde(skip_serializing_if = "Option::is_none")] pub billing_method: Option<String>, /// <p>Unique identifier of your client for the remote access session. Only returned if remote debugging is enabled for the remote access session.</p> #[serde(rename = "clientId")] #[serde(skip_serializing_if = "Option::is_none")] pub client_id: Option<String>, /// <p>The date and time the remote access session was created.</p> #[serde(rename = "created")] #[serde(skip_serializing_if = "Option::is_none")] pub created: Option<f64>, /// <p>The device (phone or tablet) used in the remote access session.</p> #[serde(rename = "device")] #[serde(skip_serializing_if = "Option::is_none")] pub device: Option<Device>, /// <p>The number of minutes a device is used in a remote access sesssion (including setup and teardown minutes).</p> #[serde(rename = "deviceMinutes")] #[serde(skip_serializing_if = "Option::is_none")] pub device_minutes: Option<DeviceMinutes>, /// <p>Unique device identifier for the remote device. Only returned if remote debugging is enabled for the remote access session.</p> #[serde(rename = "deviceUdid")] #[serde(skip_serializing_if = "Option::is_none")] pub device_udid: Option<String>, /// <p>The endpoint for the remote access sesssion.</p> #[serde(rename = "endpoint")] #[serde(skip_serializing_if = "Option::is_none")] pub endpoint: Option<String>, /// <p>IP address of the EC2 host where you need to connect to remotely debug devices. Only returned if remote debugging is enabled for the remote access session.</p> #[serde(rename = "hostAddress")] #[serde(skip_serializing_if = "Option::is_none")] pub host_address: Option<String>, /// <p>The Amazon Resource Name (ARN) of the instance.</p> #[serde(rename = "instanceArn")] #[serde(skip_serializing_if = "Option::is_none")] pub instance_arn: Option<String>, /// <p><p>The interaction mode of the remote access session. Valid values are:</p> <ul> <li> <p>INTERACTIVE: You can interact with the iOS device by viewing, touching, and rotating the screen. You <b>cannot</b> run XCUITest framework-based tests in this mode.</p> </li> <li> <p>NO<em>VIDEO: You are connected to the device but cannot interact with it or view the screen. This mode has the fastest test execution speed. You <b>can</b> run XCUITest framework-based tests in this mode.</p> </li> <li> <p>VIDEO</em>ONLY: You can view the screen but cannot touch or rotate it. You <b>can</b> run XCUITest framework-based tests and watch the screen in this mode.</p> </li> </ul></p> #[serde(rename = "interactionMode")] #[serde(skip_serializing_if = "Option::is_none")] pub interaction_mode: Option<String>, /// <p>A message about the remote access session.</p> #[serde(rename = "message")] #[serde(skip_serializing_if = "Option::is_none")] pub message: Option<String>, /// <p>The name of the remote access session.</p> #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, /// <p>This flag is set to <code>true</code> if remote debugging is enabled for the remote access session.</p> #[serde(rename = "remoteDebugEnabled")] #[serde(skip_serializing_if = "Option::is_none")] pub remote_debug_enabled: Option<bool>, /// <p>The Amazon Resource Name (ARN) for the app to be recorded in the remote access session.</p> #[serde(rename = "remoteRecordAppArn")] #[serde(skip_serializing_if = "Option::is_none")] pub remote_record_app_arn: Option<String>, /// <p>This flag is set to <code>true</code> if remote recording is enabled for the remote access session.</p> #[serde(rename = "remoteRecordEnabled")] #[serde(skip_serializing_if = "Option::is_none")] pub remote_record_enabled: Option<bool>, /// <p><p>The result of the remote access session. Can be any of the following:</p> <ul> <li> <p>PENDING: A pending condition.</p> </li> <li> <p>PASSED: A passing condition.</p> </li> <li> <p>WARNED: A warning condition.</p> </li> <li> <p>FAILED: A failed condition.</p> </li> <li> <p>SKIPPED: A skipped condition.</p> </li> <li> <p>ERRORED: An error condition.</p> </li> <li> <p>STOPPED: A stopped condition.</p> </li> </ul></p> #[serde(rename = "result")] #[serde(skip_serializing_if = "Option::is_none")] pub result: Option<String>, /// <p>When set to <code>true</code>, for private devices, Device Farm will not sign your app again. For public devices, Device Farm always signs your apps again and this parameter has no effect.</p> <p>For more information about how Device Farm re-signs your app(s), see <a href="https://aws.amazon.com/device-farm/faq/">Do you modify my app?</a> in the <i>AWS Device Farm FAQs</i>.</p> #[serde(rename = "skipAppResign")] #[serde(skip_serializing_if = "Option::is_none")] pub skip_app_resign: Option<bool>, /// <p>The date and time the remote access session was started.</p> #[serde(rename = "started")] #[serde(skip_serializing_if = "Option::is_none")] pub started: Option<f64>, /// <p><p>The status of the remote access session. Can be any of the following:</p> <ul> <li> <p>PENDING: A pending status.</p> </li> <li> <p>PENDING<em>CONCURRENCY: A pending concurrency status.</p> </li> <li> <p>PENDING</em>DEVICE: A pending device status.</p> </li> <li> <p>PROCESSING: A processing status.</p> </li> <li> <p>SCHEDULING: A scheduling status.</p> </li> <li> <p>PREPARING: A preparing status.</p> </li> <li> <p>RUNNING: A running status.</p> </li> <li> <p>COMPLETED: A completed status.</p> </li> <li> <p>STOPPING: A stopping status.</p> </li> </ul></p> #[serde(rename = "status")] #[serde(skip_serializing_if = "Option::is_none")] pub status: Option<String>, /// <p>The date and time the remote access session was stopped.</p> #[serde(rename = "stopped")] #[serde(skip_serializing_if = "Option::is_none")] pub stopped: Option<f64>, } /// <p>A request representing an offering renewal.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct RenewOfferingRequest { /// <p>The ID of a request to renew an offering.</p> #[serde(rename = "offeringId")] #[serde(skip_serializing_if = "Option::is_none")] pub offering_id: Option<String>, /// <p>The quantity requested in an offering renewal.</p> #[serde(rename = "quantity")] #[serde(skip_serializing_if = "Option::is_none")] pub quantity: Option<i64>, } /// <p>The result of a renewal offering.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct RenewOfferingResult { /// <p>Represents the status of the offering transaction for the renewal.</p> #[serde(rename = "offeringTransaction")] #[serde(skip_serializing_if = "Option::is_none")] pub offering_transaction: Option<OfferingTransaction>, } /// <p>Represents the screen resolution of a device in height and width, expressed in pixels.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct Resolution { /// <p>The screen resolution's height, expressed in pixels.</p> #[serde(rename = "height")] #[serde(skip_serializing_if = "Option::is_none")] pub height: Option<i64>, /// <p>The screen resolution's width, expressed in pixels.</p> #[serde(rename = "width")] #[serde(skip_serializing_if = "Option::is_none")] pub width: Option<i64>, } /// <p>Represents a condition for a device pool.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Rule { /// <p><p>The rule&#39;s stringified attribute. For example, specify the value as <code>&quot;&quot;abc&quot;&quot;</code>.</p> <p>Allowed values include:</p> <ul> <li> <p>ARN: The ARN.</p> </li> <li> <p>FORM<em>FACTOR: The form factor (for example, phone or tablet).</p> </li> <li> <p>MANUFACTURER: The manufacturer.</p> </li> <li> <p>PLATFORM: The platform (for example, Android or iOS).</p> </li> <li> <p>REMOTE</em>ACCESS<em>ENABLED: Whether the device is enabled for remote access.</p> </li> <li> <p>APPIUM</em>VERSION: The Appium version for the test.</p> </li> <li> <p>INSTANCE<em>ARN: The Amazon Resource Name (ARN) of the device instance.</p> </li> <li> <p>INSTANCE</em>LABELS: The label of the device instance.</p> </li> </ul></p> #[serde(rename = "attribute")] #[serde(skip_serializing_if = "Option::is_none")] pub attribute: Option<String>, /// <p><p>The rule&#39;s operator.</p> <ul> <li> <p>EQUALS: The equals operator.</p> </li> <li> <p>GREATER<em>THAN: The greater-than operator.</p> </li> <li> <p>IN: The in operator.</p> </li> <li> <p>LESS</em>THAN: The less-than operator.</p> </li> <li> <p>NOT_IN: The not-in operator.</p> </li> <li> <p>CONTAINS: The contains operator.</p> </li> </ul></p> #[serde(rename = "operator")] #[serde(skip_serializing_if = "Option::is_none")] pub operator: Option<String>, /// <p>The rule's value.</p> #[serde(rename = "value")] #[serde(skip_serializing_if = "Option::is_none")] pub value: Option<String>, } /// <p>Represents a test run on a set of devices with a given app package, test parameters, etc.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct Run { /// <p>An app to upload or that has been uploaded.</p> #[serde(rename = "appUpload")] #[serde(skip_serializing_if = "Option::is_none")] pub app_upload: Option<String>, /// <p>The run's ARN.</p> #[serde(rename = "arn")] #[serde(skip_serializing_if = "Option::is_none")] pub arn: Option<String>, /// <p>Specifies the billing method for a test run: <code>metered</code> or <code>unmetered</code>. If the parameter is not specified, the default value is <code>metered</code>.</p> #[serde(rename = "billingMethod")] #[serde(skip_serializing_if = "Option::is_none")] pub billing_method: Option<String>, /// <p>The total number of completed jobs.</p> #[serde(rename = "completedJobs")] #[serde(skip_serializing_if = "Option::is_none")] pub completed_jobs: Option<i64>, /// <p>The run's result counters.</p> #[serde(rename = "counters")] #[serde(skip_serializing_if = "Option::is_none")] pub counters: Option<Counters>, /// <p>When the run was created.</p> #[serde(rename = "created")] #[serde(skip_serializing_if = "Option::is_none")] pub created: Option<f64>, /// <p>Output <code>CustomerArtifactPaths</code> object for the test run.</p> #[serde(rename = "customerArtifactPaths")] #[serde(skip_serializing_if = "Option::is_none")] pub customer_artifact_paths: Option<CustomerArtifactPaths>, /// <p>Represents the total (metered or unmetered) minutes used by the test run.</p> #[serde(rename = "deviceMinutes")] #[serde(skip_serializing_if = "Option::is_none")] pub device_minutes: Option<DeviceMinutes>, /// <p>The ARN of the device pool for the run.</p> #[serde(rename = "devicePoolArn")] #[serde(skip_serializing_if = "Option::is_none")] pub device_pool_arn: Option<String>, /// <p>For fuzz tests, this is the number of events, between 1 and 10000, that the UI fuzz test should perform.</p> #[serde(rename = "eventCount")] #[serde(skip_serializing_if = "Option::is_none")] pub event_count: Option<i64>, /// <p>The number of minutes the job will execute before it times out.</p> #[serde(rename = "jobTimeoutMinutes")] #[serde(skip_serializing_if = "Option::is_none")] pub job_timeout_minutes: Option<i64>, /// <p>Information about the locale that is used for the run.</p> #[serde(rename = "locale")] #[serde(skip_serializing_if = "Option::is_none")] pub locale: Option<String>, /// <p>Information about the location that is used for the run.</p> #[serde(rename = "location")] #[serde(skip_serializing_if = "Option::is_none")] pub location: Option<Location>, /// <p>A message about the run's result.</p> #[serde(rename = "message")] #[serde(skip_serializing_if = "Option::is_none")] pub message: Option<String>, /// <p>The run's name.</p> #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, /// <p>The network profile being used for a test run.</p> #[serde(rename = "networkProfile")] #[serde(skip_serializing_if = "Option::is_none")] pub network_profile: Option<NetworkProfile>, /// <p>Read-only URL for an object in S3 bucket where you can get the parsing results of the test package. If the test package doesn't parse, the reason why it doesn't parse appears in the file that this URL points to.</p> #[serde(rename = "parsingResultUrl")] #[serde(skip_serializing_if = "Option::is_none")] pub parsing_result_url: Option<String>, /// <p><p>The run&#39;s platform.</p> <p>Allowed values include:</p> <ul> <li> <p>ANDROID: The Android platform.</p> </li> <li> <p>IOS: The iOS platform.</p> </li> </ul></p> #[serde(rename = "platform")] #[serde(skip_serializing_if = "Option::is_none")] pub platform: Option<String>, /// <p>Information about the radio states for the run.</p> #[serde(rename = "radios")] #[serde(skip_serializing_if = "Option::is_none")] pub radios: Option<Radios>, /// <p><p>The run&#39;s result.</p> <p>Allowed values include:</p> <ul> <li> <p>PENDING: A pending condition.</p> </li> <li> <p>PASSED: A passing condition.</p> </li> <li> <p>WARNED: A warning condition.</p> </li> <li> <p>FAILED: A failed condition.</p> </li> <li> <p>SKIPPED: A skipped condition.</p> </li> <li> <p>ERRORED: An error condition.</p> </li> <li> <p>STOPPED: A stopped condition.</p> </li> </ul></p> #[serde(rename = "result")] #[serde(skip_serializing_if = "Option::is_none")] pub result: Option<String>, /// <p>Supporting field for the result field. Set only if <code>result</code> is <code>SKIPPED</code>. <code>PARSING_FAILED</code> if the result is skipped because of test package parsing failure.</p> #[serde(rename = "resultCode")] #[serde(skip_serializing_if = "Option::is_none")] pub result_code: Option<String>, /// <p>For fuzz tests, this is a seed to use for randomizing the UI fuzz test. Using the same seed value between tests ensures identical event sequences.</p> #[serde(rename = "seed")] #[serde(skip_serializing_if = "Option::is_none")] pub seed: Option<i64>, /// <p>When set to <code>true</code>, for private devices, Device Farm will not sign your app again. For public devices, Device Farm always signs your apps again and this parameter has no effect.</p> <p>For more information about how Device Farm re-signs your app(s), see <a href="https://aws.amazon.com/device-farm/faq/">Do you modify my app?</a> in the <i>AWS Device Farm FAQs</i>.</p> #[serde(rename = "skipAppResign")] #[serde(skip_serializing_if = "Option::is_none")] pub skip_app_resign: Option<bool>, /// <p>The run's start time.</p> #[serde(rename = "started")] #[serde(skip_serializing_if = "Option::is_none")] pub started: Option<f64>, /// <p><p>The run&#39;s status.</p> <p>Allowed values include:</p> <ul> <li> <p>PENDING: A pending status.</p> </li> <li> <p>PENDING<em>CONCURRENCY: A pending concurrency status.</p> </li> <li> <p>PENDING</em>DEVICE: A pending device status.</p> </li> <li> <p>PROCESSING: A processing status.</p> </li> <li> <p>SCHEDULING: A scheduling status.</p> </li> <li> <p>PREPARING: A preparing status.</p> </li> <li> <p>RUNNING: A running status.</p> </li> <li> <p>COMPLETED: A completed status.</p> </li> <li> <p>STOPPING: A stopping status.</p> </li> </ul></p> #[serde(rename = "status")] #[serde(skip_serializing_if = "Option::is_none")] pub status: Option<String>, /// <p>The run's stop time.</p> #[serde(rename = "stopped")] #[serde(skip_serializing_if = "Option::is_none")] pub stopped: Option<f64>, /// <p>The total number of jobs for the run.</p> #[serde(rename = "totalJobs")] #[serde(skip_serializing_if = "Option::is_none")] pub total_jobs: Option<i64>, /// <p><p>The run&#39;s type.</p> <p>Must be one of the following values:</p> <ul> <li> <p>BUILTIN<em>FUZZ: The built-in fuzz type.</p> </li> <li> <p>BUILTIN</em>EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.</p> </li> <li> <p>APPIUM<em>JAVA</em>JUNIT: The Appium Java JUnit type.</p> </li> <li> <p>APPIUM<em>JAVA</em>TESTNG: The Appium Java TestNG type.</p> </li> <li> <p>APPIUM<em>PYTHON: The Appium Python type.</p> </li> <li> <p>APPIUM</em>WEB<em>JAVA</em>JUNIT: The Appium Java JUnit type for Web apps.</p> </li> <li> <p>APPIUM<em>WEB</em>JAVA<em>TESTNG: The Appium Java TestNG type for Web apps.</p> </li> <li> <p>APPIUM</em>WEB<em>PYTHON: The Appium Python type for Web apps.</p> </li> <li> <p>CALABASH: The Calabash type.</p> </li> <li> <p>INSTRUMENTATION: The Instrumentation type.</p> </li> <li> <p>UIAUTOMATION: The uiautomation type.</p> </li> <li> <p>UIAUTOMATOR: The uiautomator type.</p> </li> <li> <p>XCTEST: The XCode test type.</p> </li> <li> <p>XCTEST</em>UI: The XCode UI test type.</p> </li> </ul></p> #[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option<String>, /// <p>The Device Farm console URL for the recording of the run.</p> #[serde(rename = "webUrl")] #[serde(skip_serializing_if = "Option::is_none")] pub web_url: Option<String>, } /// <p>Represents a sample of performance data.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct Sample { /// <p>The sample's ARN.</p> #[serde(rename = "arn")] #[serde(skip_serializing_if = "Option::is_none")] pub arn: Option<String>, /// <p><p>The sample&#39;s type.</p> <p>Must be one of the following values:</p> <ul> <li> <p>CPU: A CPU sample type. This is expressed as the app processing CPU time (including child processes) as reported by process, as a percentage.</p> </li> <li> <p>MEMORY: A memory usage sample type. This is expressed as the total proportional set size of an app process, in kilobytes.</p> </li> <li> <p>NATIVE<em>AVG</em>DRAWTIME</p> </li> <li> <p>NATIVE<em>FPS</p> </li> <li> <p>NATIVE</em>FRAMES</p> </li> <li> <p>NATIVE<em>MAX</em>DRAWTIME</p> </li> <li> <p>NATIVE<em>MIN</em>DRAWTIME</p> </li> <li> <p>OPENGL<em>AVG</em>DRAWTIME</p> </li> <li> <p>OPENGL<em>FPS</p> </li> <li> <p>OPENGL</em>FRAMES</p> </li> <li> <p>OPENGL<em>MAX</em>DRAWTIME</p> </li> <li> <p>OPENGL<em>MIN</em>DRAWTIME</p> </li> <li> <p>RX</p> </li> <li> <p>RX<em>RATE: The total number of bytes per second (TCP and UDP) that are sent, by app process.</p> </li> <li> <p>THREADS: A threads sample type. This is expressed as the total number of threads per app process.</p> </li> <li> <p>TX</p> </li> <li> <p>TX</em>RATE: The total number of bytes per second (TCP and UDP) that are received, by app process.</p> </li> </ul></p> #[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option<String>, /// <p>The pre-signed Amazon S3 URL that can be used with a corresponding GET request to download the sample's file.</p> #[serde(rename = "url")] #[serde(skip_serializing_if = "Option::is_none")] pub url: Option<String>, } /// <p>Represents the settings for a run. Includes things like location, radio states, auxiliary apps, and network profiles.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ScheduleRunConfiguration { /// <p>A list of auxiliary apps for the run.</p> #[serde(rename = "auxiliaryApps")] #[serde(skip_serializing_if = "Option::is_none")] pub auxiliary_apps: Option<Vec<String>>, /// <p>Specifies the billing method for a test run: <code>metered</code> or <code>unmetered</code>. If the parameter is not specified, the default value is <code>metered</code>.</p> #[serde(rename = "billingMethod")] #[serde(skip_serializing_if = "Option::is_none")] pub billing_method: Option<String>, /// <p>Input <code>CustomerArtifactPaths</code> object for the scheduled run configuration.</p> #[serde(rename = "customerArtifactPaths")] #[serde(skip_serializing_if = "Option::is_none")] pub customer_artifact_paths: Option<CustomerArtifactPaths>, /// <p>The ARN of the extra data for the run. The extra data is a .zip file that AWS Device Farm will extract to external data for Android or the app's sandbox for iOS.</p> #[serde(rename = "extraDataPackageArn")] #[serde(skip_serializing_if = "Option::is_none")] pub extra_data_package_arn: Option<String>, /// <p>Information about the locale that is used for the run.</p> #[serde(rename = "locale")] #[serde(skip_serializing_if = "Option::is_none")] pub locale: Option<String>, /// <p>Information about the location that is used for the run.</p> #[serde(rename = "location")] #[serde(skip_serializing_if = "Option::is_none")] pub location: Option<Location>, /// <p>Reserved for internal use.</p> #[serde(rename = "networkProfileArn")] #[serde(skip_serializing_if = "Option::is_none")] pub network_profile_arn: Option<String>, /// <p>Information about the radio states for the run.</p> #[serde(rename = "radios")] #[serde(skip_serializing_if = "Option::is_none")] pub radios: Option<Radios>, } /// <p>Represents a request to the schedule run operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ScheduleRunRequest { /// <p>The ARN of the app to schedule a run.</p> #[serde(rename = "appArn")] #[serde(skip_serializing_if = "Option::is_none")] pub app_arn: Option<String>, /// <p>Information about the settings for the run to be scheduled.</p> #[serde(rename = "configuration")] #[serde(skip_serializing_if = "Option::is_none")] pub configuration: Option<ScheduleRunConfiguration>, /// <p>The ARN of the device pool for the run to be scheduled.</p> #[serde(rename = "devicePoolArn")] pub device_pool_arn: String, /// <p>Specifies configuration information about a test run, such as the execution timeout (in minutes).</p> #[serde(rename = "executionConfiguration")] #[serde(skip_serializing_if = "Option::is_none")] pub execution_configuration: Option<ExecutionConfiguration>, /// <p>The name for the run to be scheduled.</p> #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, /// <p>The ARN of the project for the run to be scheduled.</p> #[serde(rename = "projectArn")] pub project_arn: String, /// <p>Information about the test for the run to be scheduled.</p> #[serde(rename = "test")] pub test: ScheduleRunTest, } /// <p>Represents the result of a schedule run request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct ScheduleRunResult { /// <p>Information about the scheduled run.</p> #[serde(rename = "run")] #[serde(skip_serializing_if = "Option::is_none")] pub run: Option<Run>, } /// <p>Represents additional test settings.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct ScheduleRunTest { /// <p>The test's filter.</p> #[serde(rename = "filter")] #[serde(skip_serializing_if = "Option::is_none")] pub filter: Option<String>, /// <p><p>The test&#39;s parameters, such as the following test framework parameters and fixture settings:</p> <p>For Calabash tests:</p> <ul> <li> <p>profile: A cucumber profile, for example, &quot;my<em>profile</em>name&quot;.</p> </li> <li> <p>tags: You can limit execution to features or scenarios that have (or don&#39;t have) certain tags, for example, &quot;@smoke&quot; or &quot;@smoke,~@wip&quot;.</p> </li> </ul> <p>For Appium tests (all types):</p> <ul> <li> <p>appium<em>version: The Appium version. Currently supported values are &quot;1.4.16&quot;, &quot;1.6.3&quot;, &quot;latest&quot;, and &quot;default&quot;.</p> <ul> <li> <p>“latest” will run the latest Appium version supported by Device Farm (1.6.3).</p> </li> <li> <p>For “default”, Device Farm will choose a compatible version of Appium for the device. The current behavior is to run 1.4.16 on Android devices and iOS 9 and earlier, 1.6.3 for iOS 10 and later.</p> </li> <li> <p>This behavior is subject to change.</p> </li> </ul> </li> </ul> <p>For Fuzz tests (Android only):</p> <ul> <li> <p>event</em>count: The number of events, between 1 and 10000, that the UI fuzz test should perform.</p> </li> <li> <p>throttle: The time, in ms, between 0 and 1000, that the UI fuzz test should wait between events.</p> </li> <li> <p>seed: A seed to use for randomizing the UI fuzz test. Using the same seed value between tests ensures identical event sequences.</p> </li> </ul> <p>For Explorer tests:</p> <ul> <li> <p>username: A username to use if the Explorer encounters a login form. If not supplied, no username will be inserted.</p> </li> <li> <p>password: A password to use if the Explorer encounters a login form. If not supplied, no password will be inserted.</p> </li> </ul> <p>For Instrumentation:</p> <ul> <li> <p>filter: A test filter string. Examples:</p> <ul> <li> <p>Running a single test case: &quot;com.android.abc.Test1&quot;</p> </li> <li> <p>Running a single test: &quot;com.android.abc.Test1#smoke&quot;</p> </li> <li> <p>Running multiple tests: &quot;com.android.abc.Test1,com.android.abc.Test2&quot;</p> </li> </ul> </li> </ul> <p>For XCTest and XCTestUI:</p> <ul> <li> <p>filter: A test filter string. Examples:</p> <ul> <li> <p>Running a single test class: &quot;LoginTests&quot;</p> </li> <li> <p>Running a multiple test classes: &quot;LoginTests,SmokeTests&quot;</p> </li> <li> <p>Running a single test: &quot;LoginTests/testValid&quot;</p> </li> <li> <p>Running multiple tests: &quot;LoginTests/testValid,LoginTests/testInvalid&quot;</p> </li> </ul> </li> </ul> <p>For UIAutomator:</p> <ul> <li> <p>filter: A test filter string. Examples:</p> <ul> <li> <p>Running a single test case: &quot;com.android.abc.Test1&quot;</p> </li> <li> <p>Running a single test: &quot;com.android.abc.Test1#smoke&quot;</p> </li> <li> <p>Running multiple tests: &quot;com.android.abc.Test1,com.android.abc.Test2&quot;</p> </li> </ul> </li> </ul></p> #[serde(rename = "parameters")] #[serde(skip_serializing_if = "Option::is_none")] pub parameters: Option<::std::collections::HashMap<String, String>>, /// <p>The ARN of the uploaded test that will be run.</p> #[serde(rename = "testPackageArn")] #[serde(skip_serializing_if = "Option::is_none")] pub test_package_arn: Option<String>, /// <p><p>The test&#39;s type.</p> <p>Must be one of the following values:</p> <ul> <li> <p>BUILTIN<em>FUZZ: The built-in fuzz type.</p> </li> <li> <p>BUILTIN</em>EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.</p> </li> <li> <p>APPIUM<em>JAVA</em>JUNIT: The Appium Java JUnit type.</p> </li> <li> <p>APPIUM<em>JAVA</em>TESTNG: The Appium Java TestNG type.</p> </li> <li> <p>APPIUM<em>PYTHON: The Appium Python type.</p> </li> <li> <p>APPIUM</em>WEB<em>JAVA</em>JUNIT: The Appium Java JUnit type for Web apps.</p> </li> <li> <p>APPIUM<em>WEB</em>JAVA<em>TESTNG: The Appium Java TestNG type for Web apps.</p> </li> <li> <p>APPIUM</em>WEB<em>PYTHON: The Appium Python type for Web apps.</p> </li> <li> <p>CALABASH: The Calabash type.</p> </li> <li> <p>INSTRUMENTATION: The Instrumentation type.</p> </li> <li> <p>UIAUTOMATION: The uiautomation type.</p> </li> <li> <p>UIAUTOMATOR: The uiautomator type.</p> </li> <li> <p>XCTEST: The XCode test type.</p> </li> <li> <p>XCTEST</em>UI: The XCode UI test type.</p> </li> </ul></p> #[serde(rename = "type")] pub type_: String, } /// <p>Represents the request to stop the remote access session.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct StopRemoteAccessSessionRequest { /// <p>The Amazon Resource Name (ARN) of the remote access session you wish to stop.</p> #[serde(rename = "arn")] pub arn: String, } /// <p>Represents the response from the server that describes the remote access session when AWS Device Farm stops the session.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct StopRemoteAccessSessionResult { /// <p>A container representing the metadata from the service about the remote access session you are stopping.</p> #[serde(rename = "remoteAccessSession")] #[serde(skip_serializing_if = "Option::is_none")] pub remote_access_session: Option<RemoteAccessSession>, } /// <p>Represents the request to stop a specific run.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct StopRunRequest { /// <p>Represents the Amazon Resource Name (ARN) of the Device Farm run you wish to stop.</p> #[serde(rename = "arn")] pub arn: String, } /// <p>Represents the results of your stop run attempt.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct StopRunResult { /// <p>The run that was stopped.</p> #[serde(rename = "run")] #[serde(skip_serializing_if = "Option::is_none")] pub run: Option<Run>, } /// <p>Represents a collection of one or more tests.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct Suite { /// <p>The suite's ARN.</p> #[serde(rename = "arn")] #[serde(skip_serializing_if = "Option::is_none")] pub arn: Option<String>, /// <p>The suite's result counters.</p> #[serde(rename = "counters")] #[serde(skip_serializing_if = "Option::is_none")] pub counters: Option<Counters>, /// <p>When the suite was created.</p> #[serde(rename = "created")] #[serde(skip_serializing_if = "Option::is_none")] pub created: Option<f64>, /// <p>Represents the total (metered or unmetered) minutes used by the test suite.</p> #[serde(rename = "deviceMinutes")] #[serde(skip_serializing_if = "Option::is_none")] pub device_minutes: Option<DeviceMinutes>, /// <p>A message about the suite's result.</p> #[serde(rename = "message")] #[serde(skip_serializing_if = "Option::is_none")] pub message: Option<String>, /// <p>The suite's name.</p> #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, /// <p><p>The suite&#39;s result.</p> <p>Allowed values include:</p> <ul> <li> <p>PENDING: A pending condition.</p> </li> <li> <p>PASSED: A passing condition.</p> </li> <li> <p>WARNED: A warning condition.</p> </li> <li> <p>FAILED: A failed condition.</p> </li> <li> <p>SKIPPED: A skipped condition.</p> </li> <li> <p>ERRORED: An error condition.</p> </li> <li> <p>STOPPED: A stopped condition.</p> </li> </ul></p> #[serde(rename = "result")] #[serde(skip_serializing_if = "Option::is_none")] pub result: Option<String>, /// <p>The suite's start time.</p> #[serde(rename = "started")] #[serde(skip_serializing_if = "Option::is_none")] pub started: Option<f64>, /// <p><p>The suite&#39;s status.</p> <p>Allowed values include:</p> <ul> <li> <p>PENDING: A pending status.</p> </li> <li> <p>PENDING<em>CONCURRENCY: A pending concurrency status.</p> </li> <li> <p>PENDING</em>DEVICE: A pending device status.</p> </li> <li> <p>PROCESSING: A processing status.</p> </li> <li> <p>SCHEDULING: A scheduling status.</p> </li> <li> <p>PREPARING: A preparing status.</p> </li> <li> <p>RUNNING: A running status.</p> </li> <li> <p>COMPLETED: A completed status.</p> </li> <li> <p>STOPPING: A stopping status.</p> </li> </ul></p> #[serde(rename = "status")] #[serde(skip_serializing_if = "Option::is_none")] pub status: Option<String>, /// <p>The suite's stop time.</p> #[serde(rename = "stopped")] #[serde(skip_serializing_if = "Option::is_none")] pub stopped: Option<f64>, /// <p><p>The suite&#39;s type.</p> <p>Must be one of the following values:</p> <ul> <li> <p>BUILTIN<em>FUZZ: The built-in fuzz type.</p> </li> <li> <p>BUILTIN</em>EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.</p> </li> <li> <p>APPIUM<em>JAVA</em>JUNIT: The Appium Java JUnit type.</p> </li> <li> <p>APPIUM<em>JAVA</em>TESTNG: The Appium Java TestNG type.</p> </li> <li> <p>APPIUM<em>PYTHON: The Appium Python type.</p> </li> <li> <p>APPIUM</em>WEB<em>JAVA</em>JUNIT: The Appium Java JUnit type for Web apps.</p> </li> <li> <p>APPIUM<em>WEB</em>JAVA<em>TESTNG: The Appium Java TestNG type for Web apps.</p> </li> <li> <p>APPIUM</em>WEB<em>PYTHON: The Appium Python type for Web apps.</p> </li> <li> <p>CALABASH: The Calabash type.</p> </li> <li> <p>INSTRUMENTATION: The Instrumentation type.</p> </li> <li> <p>UIAUTOMATION: The uiautomation type.</p> </li> <li> <p>UIAUTOMATOR: The uiautomator type.</p> </li> <li> <p>XCTEST: The XCode test type.</p> </li> <li> <p>XCTEST</em>UI: The XCode UI test type.</p> </li> </ul></p> #[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option<String>, } /// <p>Represents a condition that is evaluated.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct Test { /// <p>The test's ARN.</p> #[serde(rename = "arn")] #[serde(skip_serializing_if = "Option::is_none")] pub arn: Option<String>, /// <p>The test's result counters.</p> #[serde(rename = "counters")] #[serde(skip_serializing_if = "Option::is_none")] pub counters: Option<Counters>, /// <p>When the test was created.</p> #[serde(rename = "created")] #[serde(skip_serializing_if = "Option::is_none")] pub created: Option<f64>, /// <p>Represents the total (metered or unmetered) minutes used by the test.</p> #[serde(rename = "deviceMinutes")] #[serde(skip_serializing_if = "Option::is_none")] pub device_minutes: Option<DeviceMinutes>, /// <p>A message about the test's result.</p> #[serde(rename = "message")] #[serde(skip_serializing_if = "Option::is_none")] pub message: Option<String>, /// <p>The test's name.</p> #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, /// <p><p>The test&#39;s result.</p> <p>Allowed values include:</p> <ul> <li> <p>PENDING: A pending condition.</p> </li> <li> <p>PASSED: A passing condition.</p> </li> <li> <p>WARNED: A warning condition.</p> </li> <li> <p>FAILED: A failed condition.</p> </li> <li> <p>SKIPPED: A skipped condition.</p> </li> <li> <p>ERRORED: An error condition.</p> </li> <li> <p>STOPPED: A stopped condition.</p> </li> </ul></p> #[serde(rename = "result")] #[serde(skip_serializing_if = "Option::is_none")] pub result: Option<String>, /// <p>The test's start time.</p> #[serde(rename = "started")] #[serde(skip_serializing_if = "Option::is_none")] pub started: Option<f64>, /// <p><p>The test&#39;s status.</p> <p>Allowed values include:</p> <ul> <li> <p>PENDING: A pending status.</p> </li> <li> <p>PENDING<em>CONCURRENCY: A pending concurrency status.</p> </li> <li> <p>PENDING</em>DEVICE: A pending device status.</p> </li> <li> <p>PROCESSING: A processing status.</p> </li> <li> <p>SCHEDULING: A scheduling status.</p> </li> <li> <p>PREPARING: A preparing status.</p> </li> <li> <p>RUNNING: A running status.</p> </li> <li> <p>COMPLETED: A completed status.</p> </li> <li> <p>STOPPING: A stopping status.</p> </li> </ul></p> #[serde(rename = "status")] #[serde(skip_serializing_if = "Option::is_none")] pub status: Option<String>, /// <p>The test's stop time.</p> #[serde(rename = "stopped")] #[serde(skip_serializing_if = "Option::is_none")] pub stopped: Option<f64>, /// <p><p>The test&#39;s type.</p> <p>Must be one of the following values:</p> <ul> <li> <p>BUILTIN<em>FUZZ: The built-in fuzz type.</p> </li> <li> <p>BUILTIN</em>EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time.</p> </li> <li> <p>APPIUM<em>JAVA</em>JUNIT: The Appium Java JUnit type.</p> </li> <li> <p>APPIUM<em>JAVA</em>TESTNG: The Appium Java TestNG type.</p> </li> <li> <p>APPIUM<em>PYTHON: The Appium Python type.</p> </li> <li> <p>APPIUM</em>WEB<em>JAVA</em>JUNIT: The Appium Java JUnit type for Web apps.</p> </li> <li> <p>APPIUM<em>WEB</em>JAVA<em>TESTNG: The Appium Java TestNG type for Web apps.</p> </li> <li> <p>APPIUM</em>WEB<em>PYTHON: The Appium Python type for Web apps.</p> </li> <li> <p>CALABASH: The Calabash type.</p> </li> <li> <p>INSTRUMENTATION: The Instrumentation type.</p> </li> <li> <p>UIAUTOMATION: The uiautomation type.</p> </li> <li> <p>UIAUTOMATOR: The uiautomator type.</p> </li> <li> <p>XCTEST: The XCode test type.</p> </li> <li> <p>XCTEST</em>UI: The XCode UI test type.</p> </li> </ul></p> #[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option<String>, } /// <p>Represents information about free trial device minutes for an AWS account.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct TrialMinutes { /// <p>The number of free trial minutes remaining in the account.</p> #[serde(rename = "remaining")] #[serde(skip_serializing_if = "Option::is_none")] pub remaining: Option<f64>, /// <p>The total number of free trial minutes that the account started with.</p> #[serde(rename = "total")] #[serde(skip_serializing_if = "Option::is_none")] pub total: Option<f64>, } /// <p>A collection of one or more problems, grouped by their result.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct UniqueProblem { /// <p>A message about the unique problems' result.</p> #[serde(rename = "message")] #[serde(skip_serializing_if = "Option::is_none")] pub message: Option<String>, /// <p>Information about the problems.</p> #[serde(rename = "problems")] #[serde(skip_serializing_if = "Option::is_none")] pub problems: Option<Vec<Problem>>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateDeviceInstanceRequest { /// <p>The Amazon Resource Name (ARN) of the device instance.</p> #[serde(rename = "arn")] pub arn: String, /// <p>An array of strings that you want to associate with the device instance.</p> #[serde(rename = "labels")] #[serde(skip_serializing_if = "Option::is_none")] pub labels: Option<Vec<String>>, /// <p>The Amazon Resource Name (ARN) of the profile that you want to associate with the device instance.</p> #[serde(rename = "profileArn")] #[serde(skip_serializing_if = "Option::is_none")] pub profile_arn: Option<String>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct UpdateDeviceInstanceResult { /// <p>An object containing information about your device instance.</p> #[serde(rename = "deviceInstance")] #[serde(skip_serializing_if = "Option::is_none")] pub device_instance: Option<DeviceInstance>, } /// <p>Represents a request to the update device pool operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateDevicePoolRequest { /// <p>The Amazon Resourc Name (ARN) of the Device Farm device pool you wish to update.</p> #[serde(rename = "arn")] pub arn: String, /// <p>A description of the device pool you wish to update.</p> #[serde(rename = "description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option<String>, /// <p>A string representing the name of the device pool you wish to update.</p> #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, /// <p>Represents the rules you wish to modify for the device pool. Updating rules is optional; however, if you choose to update rules for your request, the update will replace the existing rules.</p> #[serde(rename = "rules")] #[serde(skip_serializing_if = "Option::is_none")] pub rules: Option<Vec<Rule>>, } /// <p>Represents the result of an update device pool request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct UpdateDevicePoolResult { /// <p>The device pool you just updated.</p> #[serde(rename = "devicePool")] #[serde(skip_serializing_if = "Option::is_none")] pub device_pool: Option<DevicePool>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateInstanceProfileRequest { /// <p>The Amazon Resource Name (ARN) of the instance profile.</p> #[serde(rename = "arn")] pub arn: String, /// <p>The updated description for your instance profile.</p> #[serde(rename = "description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option<String>, /// <p>An array of strings specifying the list of app packages that should not be cleaned up from the device after a test run is over.</p> <p>The list of packages is only considered if you set <code>packageCleanup</code> to <code>true</code>.</p> #[serde(rename = "excludeAppPackagesFromCleanup")] #[serde(skip_serializing_if = "Option::is_none")] pub exclude_app_packages_from_cleanup: Option<Vec<String>>, /// <p>The updated name for your instance profile.</p> #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, /// <p>The updated choice for whether you want to specify package cleanup. The default value is <code>false</code> for private devices.</p> #[serde(rename = "packageCleanup")] #[serde(skip_serializing_if = "Option::is_none")] pub package_cleanup: Option<bool>, /// <p>The updated choice for whether you want to reboot the device after use. The default value is <code>true</code>.</p> #[serde(rename = "rebootAfterUse")] #[serde(skip_serializing_if = "Option::is_none")] pub reboot_after_use: Option<bool>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct UpdateInstanceProfileResult { /// <p>An object containing information about your instance profile.</p> #[serde(rename = "instanceProfile")] #[serde(skip_serializing_if = "Option::is_none")] pub instance_profile: Option<InstanceProfile>, } #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateNetworkProfileRequest { /// <p>The Amazon Resource Name (ARN) of the project for which you want to update network profile settings.</p> #[serde(rename = "arn")] pub arn: String, /// <p>The descriptoin of the network profile about which you are returning information.</p> #[serde(rename = "description")] #[serde(skip_serializing_if = "Option::is_none")] pub description: Option<String>, /// <p>The data throughput rate in bits per second, as an integer from 0 to 104857600.</p> #[serde(rename = "downlinkBandwidthBits")] #[serde(skip_serializing_if = "Option::is_none")] pub downlink_bandwidth_bits: Option<i64>, /// <p>Delay time for all packets to destination in milliseconds as an integer from 0 to 2000.</p> #[serde(rename = "downlinkDelayMs")] #[serde(skip_serializing_if = "Option::is_none")] pub downlink_delay_ms: Option<i64>, /// <p>Time variation in the delay of received packets in milliseconds as an integer from 0 to 2000.</p> #[serde(rename = "downlinkJitterMs")] #[serde(skip_serializing_if = "Option::is_none")] pub downlink_jitter_ms: Option<i64>, /// <p>Proportion of received packets that fail to arrive from 0 to 100 percent.</p> #[serde(rename = "downlinkLossPercent")] #[serde(skip_serializing_if = "Option::is_none")] pub downlink_loss_percent: Option<i64>, /// <p>The name of the network profile about which you are returning information.</p> #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, /// <p>The type of network profile you wish to return information about. Valid values are listed below.</p> #[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option<String>, /// <p>The data throughput rate in bits per second, as an integer from 0 to 104857600.</p> #[serde(rename = "uplinkBandwidthBits")] #[serde(skip_serializing_if = "Option::is_none")] pub uplink_bandwidth_bits: Option<i64>, /// <p>Delay time for all packets to destination in milliseconds as an integer from 0 to 2000.</p> #[serde(rename = "uplinkDelayMs")] #[serde(skip_serializing_if = "Option::is_none")] pub uplink_delay_ms: Option<i64>, /// <p>Time variation in the delay of received packets in milliseconds as an integer from 0 to 2000.</p> #[serde(rename = "uplinkJitterMs")] #[serde(skip_serializing_if = "Option::is_none")] pub uplink_jitter_ms: Option<i64>, /// <p>Proportion of transmitted packets that fail to arrive from 0 to 100 percent.</p> #[serde(rename = "uplinkLossPercent")] #[serde(skip_serializing_if = "Option::is_none")] pub uplink_loss_percent: Option<i64>, } #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct UpdateNetworkProfileResult { /// <p>A list of the available network profiles.</p> #[serde(rename = "networkProfile")] #[serde(skip_serializing_if = "Option::is_none")] pub network_profile: Option<NetworkProfile>, } /// <p>Represents a request to the update project operation.</p> #[derive(Default, Debug, Clone, PartialEq, Serialize)] pub struct UpdateProjectRequest { /// <p>The Amazon Resource Name (ARN) of the project whose name you wish to update.</p> #[serde(rename = "arn")] pub arn: String, /// <p>The number of minutes a test run in the project will execute before it times out.</p> #[serde(rename = "defaultJobTimeoutMinutes")] #[serde(skip_serializing_if = "Option::is_none")] pub default_job_timeout_minutes: Option<i64>, /// <p>A string representing the new name of the project that you are updating.</p> #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, } /// <p>Represents the result of an update project request.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct UpdateProjectResult { /// <p>The project you wish to update.</p> #[serde(rename = "project")] #[serde(skip_serializing_if = "Option::is_none")] pub project: Option<Project>, } /// <p>An app or a set of one or more tests to upload or that have been uploaded.</p> #[derive(Default, Debug, Clone, PartialEq, Deserialize)] pub struct Upload { /// <p>The upload's ARN.</p> #[serde(rename = "arn")] #[serde(skip_serializing_if = "Option::is_none")] pub arn: Option<String>, /// <p>The upload's content type (for example, "application/octet-stream").</p> #[serde(rename = "contentType")] #[serde(skip_serializing_if = "Option::is_none")] pub content_type: Option<String>, /// <p>When the upload was created.</p> #[serde(rename = "created")] #[serde(skip_serializing_if = "Option::is_none")] pub created: Option<f64>, /// <p>A message about the upload's result.</p> #[serde(rename = "message")] #[serde(skip_serializing_if = "Option::is_none")] pub message: Option<String>, /// <p>The upload's metadata. For example, for Android, this contains information that is parsed from the manifest and is displayed in the AWS Device Farm console after the associated app is uploaded.</p> #[serde(rename = "metadata")] #[serde(skip_serializing_if = "Option::is_none")] pub metadata: Option<String>, /// <p>The upload's file name.</p> #[serde(rename = "name")] #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, /// <p><p>The upload&#39;s status.</p> <p>Must be one of the following values:</p> <ul> <li> <p>FAILED: A failed status.</p> </li> <li> <p>INITIALIZED: An initialized status.</p> </li> <li> <p>PROCESSING: A processing status.</p> </li> <li> <p>SUCCEEDED: A succeeded status.</p> </li> </ul></p> #[serde(rename = "status")] #[serde(skip_serializing_if = "Option::is_none")] pub status: Option<String>, /// <p><p>The upload&#39;s type.</p> <p>Must be one of the following values:</p> <ul> <li> <p>ANDROID<em>APP: An Android upload.</p> </li> <li> <p>IOS</em>APP: An iOS upload.</p> </li> <li> <p>WEB<em>APP: A web appliction upload.</p> </li> <li> <p>EXTERNAL</em>DATA: An external data upload.</p> </li> <li> <p>APPIUM<em>JAVA</em>JUNIT<em>TEST</em>PACKAGE: An Appium Java JUnit test package upload.</p> </li> <li> <p>APPIUM<em>JAVA</em>TESTNG<em>TEST</em>PACKAGE: An Appium Java TestNG test package upload.</p> </li> <li> <p>APPIUM<em>PYTHON</em>TEST<em>PACKAGE: An Appium Python test package upload.</p> </li> <li> <p>APPIUM</em>WEB<em>JAVA</em>JUNIT<em>TEST</em>PACKAGE: An Appium Java JUnit test package upload.</p> </li> <li> <p>APPIUM<em>WEB</em>JAVA<em>TESTNG</em>TEST<em>PACKAGE: An Appium Java TestNG test package upload.</p> </li> <li> <p>APPIUM</em>WEB<em>PYTHON</em>TEST<em>PACKAGE: An Appium Python test package upload.</p> </li> <li> <p>CALABASH</em>TEST<em>PACKAGE: A Calabash test package upload.</p> </li> <li> <p>INSTRUMENTATION</em>TEST<em>PACKAGE: An instrumentation upload.</p> </li> <li> <p>UIAUTOMATION</em>TEST<em>PACKAGE: A uiautomation test package upload.</p> </li> <li> <p>UIAUTOMATOR</em>TEST<em>PACKAGE: A uiautomator test package upload.</p> </li> <li> <p>XCTEST</em>TEST<em>PACKAGE: An XCode test package upload.</p> </li> <li> <p>XCTEST</em>UI<em>TEST</em>PACKAGE: An XCode UI test package upload.</p> </li> </ul></p> #[serde(rename = "type")] #[serde(skip_serializing_if = "Option::is_none")] pub type_: Option<String>, /// <p>The pre-signed Amazon S3 URL that was used to store a file through a corresponding PUT request.</p> #[serde(rename = "url")] #[serde(skip_serializing_if = "Option::is_none")] pub url: Option<String>, } /// Errors returned by CreateDevicePool #[derive(Debug, PartialEq)] pub enum CreateDevicePoolError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CreateDevicePoolError { pub fn from_body(body: &str) -> CreateDevicePoolError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { CreateDevicePoolError::Argument(String::from(error_message)) } "LimitExceededException" => { CreateDevicePoolError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { CreateDevicePoolError::NotFound(String::from(error_message)) } "ServiceAccountException" => { CreateDevicePoolError::ServiceAccount(String::from(error_message)) } "ValidationException" => { CreateDevicePoolError::Validation(error_message.to_string()) } _ => CreateDevicePoolError::Unknown(String::from(body)), } } Err(_) => CreateDevicePoolError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for CreateDevicePoolError { fn from(err: serde_json::error::Error) -> CreateDevicePoolError { CreateDevicePoolError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for CreateDevicePoolError { fn from(err: CredentialsError) -> CreateDevicePoolError { CreateDevicePoolError::Credentials(err) } } impl From<HttpDispatchError> for CreateDevicePoolError { fn from(err: HttpDispatchError) -> CreateDevicePoolError { CreateDevicePoolError::HttpDispatch(err) } } impl From<io::Error> for CreateDevicePoolError { fn from(err: io::Error) -> CreateDevicePoolError { CreateDevicePoolError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CreateDevicePoolError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CreateDevicePoolError { fn description(&self) -> &str { match *self { CreateDevicePoolError::Argument(ref cause) => cause, CreateDevicePoolError::LimitExceeded(ref cause) => cause, CreateDevicePoolError::NotFound(ref cause) => cause, CreateDevicePoolError::ServiceAccount(ref cause) => cause, CreateDevicePoolError::Validation(ref cause) => cause, CreateDevicePoolError::Credentials(ref err) => err.description(), CreateDevicePoolError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), CreateDevicePoolError::Unknown(ref cause) => cause, } } } /// Errors returned by CreateInstanceProfile #[derive(Debug, PartialEq)] pub enum CreateInstanceProfileError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CreateInstanceProfileError { pub fn from_body(body: &str) -> CreateInstanceProfileError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { CreateInstanceProfileError::Argument(String::from(error_message)) } "LimitExceededException" => { CreateInstanceProfileError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { CreateInstanceProfileError::NotFound(String::from(error_message)) } "ServiceAccountException" => { CreateInstanceProfileError::ServiceAccount(String::from(error_message)) } "ValidationException" => { CreateInstanceProfileError::Validation(error_message.to_string()) } _ => CreateInstanceProfileError::Unknown(String::from(body)), } } Err(_) => CreateInstanceProfileError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for CreateInstanceProfileError { fn from(err: serde_json::error::Error) -> CreateInstanceProfileError { CreateInstanceProfileError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for CreateInstanceProfileError { fn from(err: CredentialsError) -> CreateInstanceProfileError { CreateInstanceProfileError::Credentials(err) } } impl From<HttpDispatchError> for CreateInstanceProfileError { fn from(err: HttpDispatchError) -> CreateInstanceProfileError { CreateInstanceProfileError::HttpDispatch(err) } } impl From<io::Error> for CreateInstanceProfileError { fn from(err: io::Error) -> CreateInstanceProfileError { CreateInstanceProfileError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CreateInstanceProfileError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CreateInstanceProfileError { fn description(&self) -> &str { match *self { CreateInstanceProfileError::Argument(ref cause) => cause, CreateInstanceProfileError::LimitExceeded(ref cause) => cause, CreateInstanceProfileError::NotFound(ref cause) => cause, CreateInstanceProfileError::ServiceAccount(ref cause) => cause, CreateInstanceProfileError::Validation(ref cause) => cause, CreateInstanceProfileError::Credentials(ref err) => err.description(), CreateInstanceProfileError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } CreateInstanceProfileError::Unknown(ref cause) => cause, } } } /// Errors returned by CreateNetworkProfile #[derive(Debug, PartialEq)] pub enum CreateNetworkProfileError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CreateNetworkProfileError { pub fn from_body(body: &str) -> CreateNetworkProfileError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { CreateNetworkProfileError::Argument(String::from(error_message)) } "LimitExceededException" => { CreateNetworkProfileError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { CreateNetworkProfileError::NotFound(String::from(error_message)) } "ServiceAccountException" => { CreateNetworkProfileError::ServiceAccount(String::from(error_message)) } "ValidationException" => { CreateNetworkProfileError::Validation(error_message.to_string()) } _ => CreateNetworkProfileError::Unknown(String::from(body)), } } Err(_) => CreateNetworkProfileError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for CreateNetworkProfileError { fn from(err: serde_json::error::Error) -> CreateNetworkProfileError { CreateNetworkProfileError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for CreateNetworkProfileError { fn from(err: CredentialsError) -> CreateNetworkProfileError { CreateNetworkProfileError::Credentials(err) } } impl From<HttpDispatchError> for CreateNetworkProfileError { fn from(err: HttpDispatchError) -> CreateNetworkProfileError { CreateNetworkProfileError::HttpDispatch(err) } } impl From<io::Error> for CreateNetworkProfileError { fn from(err: io::Error) -> CreateNetworkProfileError { CreateNetworkProfileError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CreateNetworkProfileError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CreateNetworkProfileError { fn description(&self) -> &str { match *self { CreateNetworkProfileError::Argument(ref cause) => cause, CreateNetworkProfileError::LimitExceeded(ref cause) => cause, CreateNetworkProfileError::NotFound(ref cause) => cause, CreateNetworkProfileError::ServiceAccount(ref cause) => cause, CreateNetworkProfileError::Validation(ref cause) => cause, CreateNetworkProfileError::Credentials(ref err) => err.description(), CreateNetworkProfileError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } CreateNetworkProfileError::Unknown(ref cause) => cause, } } } /// Errors returned by CreateProject #[derive(Debug, PartialEq)] pub enum CreateProjectError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CreateProjectError { pub fn from_body(body: &str) -> CreateProjectError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { CreateProjectError::Argument(String::from(error_message)) } "LimitExceededException" => { CreateProjectError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { CreateProjectError::NotFound(String::from(error_message)) } "ServiceAccountException" => { CreateProjectError::ServiceAccount(String::from(error_message)) } "ValidationException" => { CreateProjectError::Validation(error_message.to_string()) } _ => CreateProjectError::Unknown(String::from(body)), } } Err(_) => CreateProjectError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for CreateProjectError { fn from(err: serde_json::error::Error) -> CreateProjectError { CreateProjectError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for CreateProjectError { fn from(err: CredentialsError) -> CreateProjectError { CreateProjectError::Credentials(err) } } impl From<HttpDispatchError> for CreateProjectError { fn from(err: HttpDispatchError) -> CreateProjectError { CreateProjectError::HttpDispatch(err) } } impl From<io::Error> for CreateProjectError { fn from(err: io::Error) -> CreateProjectError { CreateProjectError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CreateProjectError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CreateProjectError { fn description(&self) -> &str { match *self { CreateProjectError::Argument(ref cause) => cause, CreateProjectError::LimitExceeded(ref cause) => cause, CreateProjectError::NotFound(ref cause) => cause, CreateProjectError::ServiceAccount(ref cause) => cause, CreateProjectError::Validation(ref cause) => cause, CreateProjectError::Credentials(ref err) => err.description(), CreateProjectError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), CreateProjectError::Unknown(ref cause) => cause, } } } /// Errors returned by CreateRemoteAccessSession #[derive(Debug, PartialEq)] pub enum CreateRemoteAccessSessionError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CreateRemoteAccessSessionError { pub fn from_body(body: &str) -> CreateRemoteAccessSessionError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { CreateRemoteAccessSessionError::Argument(String::from(error_message)) } "LimitExceededException" => { CreateRemoteAccessSessionError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { CreateRemoteAccessSessionError::NotFound(String::from(error_message)) } "ServiceAccountException" => { CreateRemoteAccessSessionError::ServiceAccount(String::from(error_message)) } "ValidationException" => { CreateRemoteAccessSessionError::Validation(error_message.to_string()) } _ => CreateRemoteAccessSessionError::Unknown(String::from(body)), } } Err(_) => CreateRemoteAccessSessionError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for CreateRemoteAccessSessionError { fn from(err: serde_json::error::Error) -> CreateRemoteAccessSessionError { CreateRemoteAccessSessionError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for CreateRemoteAccessSessionError { fn from(err: CredentialsError) -> CreateRemoteAccessSessionError { CreateRemoteAccessSessionError::Credentials(err) } } impl From<HttpDispatchError> for CreateRemoteAccessSessionError { fn from(err: HttpDispatchError) -> CreateRemoteAccessSessionError { CreateRemoteAccessSessionError::HttpDispatch(err) } } impl From<io::Error> for CreateRemoteAccessSessionError { fn from(err: io::Error) -> CreateRemoteAccessSessionError { CreateRemoteAccessSessionError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CreateRemoteAccessSessionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CreateRemoteAccessSessionError { fn description(&self) -> &str { match *self { CreateRemoteAccessSessionError::Argument(ref cause) => cause, CreateRemoteAccessSessionError::LimitExceeded(ref cause) => cause, CreateRemoteAccessSessionError::NotFound(ref cause) => cause, CreateRemoteAccessSessionError::ServiceAccount(ref cause) => cause, CreateRemoteAccessSessionError::Validation(ref cause) => cause, CreateRemoteAccessSessionError::Credentials(ref err) => err.description(), CreateRemoteAccessSessionError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } CreateRemoteAccessSessionError::Unknown(ref cause) => cause, } } } /// Errors returned by CreateUpload #[derive(Debug, PartialEq)] pub enum CreateUploadError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CreateUploadError { pub fn from_body(body: &str) -> CreateUploadError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => CreateUploadError::Argument(String::from(error_message)), "LimitExceededException" => { CreateUploadError::LimitExceeded(String::from(error_message)) } "NotFoundException" => CreateUploadError::NotFound(String::from(error_message)), "ServiceAccountException" => { CreateUploadError::ServiceAccount(String::from(error_message)) } "ValidationException" => { CreateUploadError::Validation(error_message.to_string()) } _ => CreateUploadError::Unknown(String::from(body)), } } Err(_) => CreateUploadError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for CreateUploadError { fn from(err: serde_json::error::Error) -> CreateUploadError { CreateUploadError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for CreateUploadError { fn from(err: CredentialsError) -> CreateUploadError { CreateUploadError::Credentials(err) } } impl From<HttpDispatchError> for CreateUploadError { fn from(err: HttpDispatchError) -> CreateUploadError { CreateUploadError::HttpDispatch(err) } } impl From<io::Error> for CreateUploadError { fn from(err: io::Error) -> CreateUploadError { CreateUploadError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CreateUploadError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CreateUploadError { fn description(&self) -> &str { match *self { CreateUploadError::Argument(ref cause) => cause, CreateUploadError::LimitExceeded(ref cause) => cause, CreateUploadError::NotFound(ref cause) => cause, CreateUploadError::ServiceAccount(ref cause) => cause, CreateUploadError::Validation(ref cause) => cause, CreateUploadError::Credentials(ref err) => err.description(), CreateUploadError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), CreateUploadError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteDevicePool #[derive(Debug, PartialEq)] pub enum DeleteDevicePoolError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteDevicePoolError { pub fn from_body(body: &str) -> DeleteDevicePoolError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { DeleteDevicePoolError::Argument(String::from(error_message)) } "LimitExceededException" => { DeleteDevicePoolError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { DeleteDevicePoolError::NotFound(String::from(error_message)) } "ServiceAccountException" => { DeleteDevicePoolError::ServiceAccount(String::from(error_message)) } "ValidationException" => { DeleteDevicePoolError::Validation(error_message.to_string()) } _ => DeleteDevicePoolError::Unknown(String::from(body)), } } Err(_) => DeleteDevicePoolError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for DeleteDevicePoolError { fn from(err: serde_json::error::Error) -> DeleteDevicePoolError { DeleteDevicePoolError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for DeleteDevicePoolError { fn from(err: CredentialsError) -> DeleteDevicePoolError { DeleteDevicePoolError::Credentials(err) } } impl From<HttpDispatchError> for DeleteDevicePoolError { fn from(err: HttpDispatchError) -> DeleteDevicePoolError { DeleteDevicePoolError::HttpDispatch(err) } } impl From<io::Error> for DeleteDevicePoolError { fn from(err: io::Error) -> DeleteDevicePoolError { DeleteDevicePoolError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteDevicePoolError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteDevicePoolError { fn description(&self) -> &str { match *self { DeleteDevicePoolError::Argument(ref cause) => cause, DeleteDevicePoolError::LimitExceeded(ref cause) => cause, DeleteDevicePoolError::NotFound(ref cause) => cause, DeleteDevicePoolError::ServiceAccount(ref cause) => cause, DeleteDevicePoolError::Validation(ref cause) => cause, DeleteDevicePoolError::Credentials(ref err) => err.description(), DeleteDevicePoolError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), DeleteDevicePoolError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteInstanceProfile #[derive(Debug, PartialEq)] pub enum DeleteInstanceProfileError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteInstanceProfileError { pub fn from_body(body: &str) -> DeleteInstanceProfileError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { DeleteInstanceProfileError::Argument(String::from(error_message)) } "LimitExceededException" => { DeleteInstanceProfileError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { DeleteInstanceProfileError::NotFound(String::from(error_message)) } "ServiceAccountException" => { DeleteInstanceProfileError::ServiceAccount(String::from(error_message)) } "ValidationException" => { DeleteInstanceProfileError::Validation(error_message.to_string()) } _ => DeleteInstanceProfileError::Unknown(String::from(body)), } } Err(_) => DeleteInstanceProfileError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for DeleteInstanceProfileError { fn from(err: serde_json::error::Error) -> DeleteInstanceProfileError { DeleteInstanceProfileError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for DeleteInstanceProfileError { fn from(err: CredentialsError) -> DeleteInstanceProfileError { DeleteInstanceProfileError::Credentials(err) } } impl From<HttpDispatchError> for DeleteInstanceProfileError { fn from(err: HttpDispatchError) -> DeleteInstanceProfileError { DeleteInstanceProfileError::HttpDispatch(err) } } impl From<io::Error> for DeleteInstanceProfileError { fn from(err: io::Error) -> DeleteInstanceProfileError { DeleteInstanceProfileError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteInstanceProfileError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteInstanceProfileError { fn description(&self) -> &str { match *self { DeleteInstanceProfileError::Argument(ref cause) => cause, DeleteInstanceProfileError::LimitExceeded(ref cause) => cause, DeleteInstanceProfileError::NotFound(ref cause) => cause, DeleteInstanceProfileError::ServiceAccount(ref cause) => cause, DeleteInstanceProfileError::Validation(ref cause) => cause, DeleteInstanceProfileError::Credentials(ref err) => err.description(), DeleteInstanceProfileError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteInstanceProfileError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteNetworkProfile #[derive(Debug, PartialEq)] pub enum DeleteNetworkProfileError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteNetworkProfileError { pub fn from_body(body: &str) -> DeleteNetworkProfileError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { DeleteNetworkProfileError::Argument(String::from(error_message)) } "LimitExceededException" => { DeleteNetworkProfileError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { DeleteNetworkProfileError::NotFound(String::from(error_message)) } "ServiceAccountException" => { DeleteNetworkProfileError::ServiceAccount(String::from(error_message)) } "ValidationException" => { DeleteNetworkProfileError::Validation(error_message.to_string()) } _ => DeleteNetworkProfileError::Unknown(String::from(body)), } } Err(_) => DeleteNetworkProfileError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for DeleteNetworkProfileError { fn from(err: serde_json::error::Error) -> DeleteNetworkProfileError { DeleteNetworkProfileError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for DeleteNetworkProfileError { fn from(err: CredentialsError) -> DeleteNetworkProfileError { DeleteNetworkProfileError::Credentials(err) } } impl From<HttpDispatchError> for DeleteNetworkProfileError { fn from(err: HttpDispatchError) -> DeleteNetworkProfileError { DeleteNetworkProfileError::HttpDispatch(err) } } impl From<io::Error> for DeleteNetworkProfileError { fn from(err: io::Error) -> DeleteNetworkProfileError { DeleteNetworkProfileError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteNetworkProfileError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteNetworkProfileError { fn description(&self) -> &str { match *self { DeleteNetworkProfileError::Argument(ref cause) => cause, DeleteNetworkProfileError::LimitExceeded(ref cause) => cause, DeleteNetworkProfileError::NotFound(ref cause) => cause, DeleteNetworkProfileError::ServiceAccount(ref cause) => cause, DeleteNetworkProfileError::Validation(ref cause) => cause, DeleteNetworkProfileError::Credentials(ref err) => err.description(), DeleteNetworkProfileError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteNetworkProfileError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteProject #[derive(Debug, PartialEq)] pub enum DeleteProjectError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteProjectError { pub fn from_body(body: &str) -> DeleteProjectError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { DeleteProjectError::Argument(String::from(error_message)) } "LimitExceededException" => { DeleteProjectError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { DeleteProjectError::NotFound(String::from(error_message)) } "ServiceAccountException" => { DeleteProjectError::ServiceAccount(String::from(error_message)) } "ValidationException" => { DeleteProjectError::Validation(error_message.to_string()) } _ => DeleteProjectError::Unknown(String::from(body)), } } Err(_) => DeleteProjectError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for DeleteProjectError { fn from(err: serde_json::error::Error) -> DeleteProjectError { DeleteProjectError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for DeleteProjectError { fn from(err: CredentialsError) -> DeleteProjectError { DeleteProjectError::Credentials(err) } } impl From<HttpDispatchError> for DeleteProjectError { fn from(err: HttpDispatchError) -> DeleteProjectError { DeleteProjectError::HttpDispatch(err) } } impl From<io::Error> for DeleteProjectError { fn from(err: io::Error) -> DeleteProjectError { DeleteProjectError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteProjectError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteProjectError { fn description(&self) -> &str { match *self { DeleteProjectError::Argument(ref cause) => cause, DeleteProjectError::LimitExceeded(ref cause) => cause, DeleteProjectError::NotFound(ref cause) => cause, DeleteProjectError::ServiceAccount(ref cause) => cause, DeleteProjectError::Validation(ref cause) => cause, DeleteProjectError::Credentials(ref err) => err.description(), DeleteProjectError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), DeleteProjectError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteRemoteAccessSession #[derive(Debug, PartialEq)] pub enum DeleteRemoteAccessSessionError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteRemoteAccessSessionError { pub fn from_body(body: &str) -> DeleteRemoteAccessSessionError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { DeleteRemoteAccessSessionError::Argument(String::from(error_message)) } "LimitExceededException" => { DeleteRemoteAccessSessionError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { DeleteRemoteAccessSessionError::NotFound(String::from(error_message)) } "ServiceAccountException" => { DeleteRemoteAccessSessionError::ServiceAccount(String::from(error_message)) } "ValidationException" => { DeleteRemoteAccessSessionError::Validation(error_message.to_string()) } _ => DeleteRemoteAccessSessionError::Unknown(String::from(body)), } } Err(_) => DeleteRemoteAccessSessionError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for DeleteRemoteAccessSessionError { fn from(err: serde_json::error::Error) -> DeleteRemoteAccessSessionError { DeleteRemoteAccessSessionError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for DeleteRemoteAccessSessionError { fn from(err: CredentialsError) -> DeleteRemoteAccessSessionError { DeleteRemoteAccessSessionError::Credentials(err) } } impl From<HttpDispatchError> for DeleteRemoteAccessSessionError { fn from(err: HttpDispatchError) -> DeleteRemoteAccessSessionError { DeleteRemoteAccessSessionError::HttpDispatch(err) } } impl From<io::Error> for DeleteRemoteAccessSessionError { fn from(err: io::Error) -> DeleteRemoteAccessSessionError { DeleteRemoteAccessSessionError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteRemoteAccessSessionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteRemoteAccessSessionError { fn description(&self) -> &str { match *self { DeleteRemoteAccessSessionError::Argument(ref cause) => cause, DeleteRemoteAccessSessionError::LimitExceeded(ref cause) => cause, DeleteRemoteAccessSessionError::NotFound(ref cause) => cause, DeleteRemoteAccessSessionError::ServiceAccount(ref cause) => cause, DeleteRemoteAccessSessionError::Validation(ref cause) => cause, DeleteRemoteAccessSessionError::Credentials(ref err) => err.description(), DeleteRemoteAccessSessionError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteRemoteAccessSessionError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteRun #[derive(Debug, PartialEq)] pub enum DeleteRunError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteRunError { pub fn from_body(body: &str) -> DeleteRunError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => DeleteRunError::Argument(String::from(error_message)), "LimitExceededException" => { DeleteRunError::LimitExceeded(String::from(error_message)) } "NotFoundException" => DeleteRunError::NotFound(String::from(error_message)), "ServiceAccountException" => { DeleteRunError::ServiceAccount(String::from(error_message)) } "ValidationException" => DeleteRunError::Validation(error_message.to_string()), _ => DeleteRunError::Unknown(String::from(body)), } } Err(_) => DeleteRunError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for DeleteRunError { fn from(err: serde_json::error::Error) -> DeleteRunError { DeleteRunError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for DeleteRunError { fn from(err: CredentialsError) -> DeleteRunError { DeleteRunError::Credentials(err) } } impl From<HttpDispatchError> for DeleteRunError { fn from(err: HttpDispatchError) -> DeleteRunError { DeleteRunError::HttpDispatch(err) } } impl From<io::Error> for DeleteRunError { fn from(err: io::Error) -> DeleteRunError { DeleteRunError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteRunError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteRunError { fn description(&self) -> &str { match *self { DeleteRunError::Argument(ref cause) => cause, DeleteRunError::LimitExceeded(ref cause) => cause, DeleteRunError::NotFound(ref cause) => cause, DeleteRunError::ServiceAccount(ref cause) => cause, DeleteRunError::Validation(ref cause) => cause, DeleteRunError::Credentials(ref err) => err.description(), DeleteRunError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), DeleteRunError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteUpload #[derive(Debug, PartialEq)] pub enum DeleteUploadError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteUploadError { pub fn from_body(body: &str) -> DeleteUploadError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => DeleteUploadError::Argument(String::from(error_message)), "LimitExceededException" => { DeleteUploadError::LimitExceeded(String::from(error_message)) } "NotFoundException" => DeleteUploadError::NotFound(String::from(error_message)), "ServiceAccountException" => { DeleteUploadError::ServiceAccount(String::from(error_message)) } "ValidationException" => { DeleteUploadError::Validation(error_message.to_string()) } _ => DeleteUploadError::Unknown(String::from(body)), } } Err(_) => DeleteUploadError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for DeleteUploadError { fn from(err: serde_json::error::Error) -> DeleteUploadError { DeleteUploadError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for DeleteUploadError { fn from(err: CredentialsError) -> DeleteUploadError { DeleteUploadError::Credentials(err) } } impl From<HttpDispatchError> for DeleteUploadError { fn from(err: HttpDispatchError) -> DeleteUploadError { DeleteUploadError::HttpDispatch(err) } } impl From<io::Error> for DeleteUploadError { fn from(err: io::Error) -> DeleteUploadError { DeleteUploadError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteUploadError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteUploadError { fn description(&self) -> &str { match *self { DeleteUploadError::Argument(ref cause) => cause, DeleteUploadError::LimitExceeded(ref cause) => cause, DeleteUploadError::NotFound(ref cause) => cause, DeleteUploadError::ServiceAccount(ref cause) => cause, DeleteUploadError::Validation(ref cause) => cause, DeleteUploadError::Credentials(ref err) => err.description(), DeleteUploadError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), DeleteUploadError::Unknown(ref cause) => cause, } } } /// Errors returned by GetAccountSettings #[derive(Debug, PartialEq)] pub enum GetAccountSettingsError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetAccountSettingsError { pub fn from_body(body: &str) -> GetAccountSettingsError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { GetAccountSettingsError::Argument(String::from(error_message)) } "LimitExceededException" => { GetAccountSettingsError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { GetAccountSettingsError::NotFound(String::from(error_message)) } "ServiceAccountException" => { GetAccountSettingsError::ServiceAccount(String::from(error_message)) } "ValidationException" => { GetAccountSettingsError::Validation(error_message.to_string()) } _ => GetAccountSettingsError::Unknown(String::from(body)), } } Err(_) => GetAccountSettingsError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for GetAccountSettingsError { fn from(err: serde_json::error::Error) -> GetAccountSettingsError { GetAccountSettingsError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for GetAccountSettingsError { fn from(err: CredentialsError) -> GetAccountSettingsError { GetAccountSettingsError::Credentials(err) } } impl From<HttpDispatchError> for GetAccountSettingsError { fn from(err: HttpDispatchError) -> GetAccountSettingsError { GetAccountSettingsError::HttpDispatch(err) } } impl From<io::Error> for GetAccountSettingsError { fn from(err: io::Error) -> GetAccountSettingsError { GetAccountSettingsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetAccountSettingsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetAccountSettingsError { fn description(&self) -> &str { match *self { GetAccountSettingsError::Argument(ref cause) => cause, GetAccountSettingsError::LimitExceeded(ref cause) => cause, GetAccountSettingsError::NotFound(ref cause) => cause, GetAccountSettingsError::ServiceAccount(ref cause) => cause, GetAccountSettingsError::Validation(ref cause) => cause, GetAccountSettingsError::Credentials(ref err) => err.description(), GetAccountSettingsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetAccountSettingsError::Unknown(ref cause) => cause, } } } /// Errors returned by GetDevice #[derive(Debug, PartialEq)] pub enum GetDeviceError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetDeviceError { pub fn from_body(body: &str) -> GetDeviceError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => GetDeviceError::Argument(String::from(error_message)), "LimitExceededException" => { GetDeviceError::LimitExceeded(String::from(error_message)) } "NotFoundException" => GetDeviceError::NotFound(String::from(error_message)), "ServiceAccountException" => { GetDeviceError::ServiceAccount(String::from(error_message)) } "ValidationException" => GetDeviceError::Validation(error_message.to_string()), _ => GetDeviceError::Unknown(String::from(body)), } } Err(_) => GetDeviceError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for GetDeviceError { fn from(err: serde_json::error::Error) -> GetDeviceError { GetDeviceError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for GetDeviceError { fn from(err: CredentialsError) -> GetDeviceError { GetDeviceError::Credentials(err) } } impl From<HttpDispatchError> for GetDeviceError { fn from(err: HttpDispatchError) -> GetDeviceError { GetDeviceError::HttpDispatch(err) } } impl From<io::Error> for GetDeviceError { fn from(err: io::Error) -> GetDeviceError { GetDeviceError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetDeviceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetDeviceError { fn description(&self) -> &str { match *self { GetDeviceError::Argument(ref cause) => cause, GetDeviceError::LimitExceeded(ref cause) => cause, GetDeviceError::NotFound(ref cause) => cause, GetDeviceError::ServiceAccount(ref cause) => cause, GetDeviceError::Validation(ref cause) => cause, GetDeviceError::Credentials(ref err) => err.description(), GetDeviceError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetDeviceError::Unknown(ref cause) => cause, } } } /// Errors returned by GetDeviceInstance #[derive(Debug, PartialEq)] pub enum GetDeviceInstanceError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetDeviceInstanceError { pub fn from_body(body: &str) -> GetDeviceInstanceError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { GetDeviceInstanceError::Argument(String::from(error_message)) } "LimitExceededException" => { GetDeviceInstanceError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { GetDeviceInstanceError::NotFound(String::from(error_message)) } "ServiceAccountException" => { GetDeviceInstanceError::ServiceAccount(String::from(error_message)) } "ValidationException" => { GetDeviceInstanceError::Validation(error_message.to_string()) } _ => GetDeviceInstanceError::Unknown(String::from(body)), } } Err(_) => GetDeviceInstanceError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for GetDeviceInstanceError { fn from(err: serde_json::error::Error) -> GetDeviceInstanceError { GetDeviceInstanceError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for GetDeviceInstanceError { fn from(err: CredentialsError) -> GetDeviceInstanceError { GetDeviceInstanceError::Credentials(err) } } impl From<HttpDispatchError> for GetDeviceInstanceError { fn from(err: HttpDispatchError) -> GetDeviceInstanceError { GetDeviceInstanceError::HttpDispatch(err) } } impl From<io::Error> for GetDeviceInstanceError { fn from(err: io::Error) -> GetDeviceInstanceError { GetDeviceInstanceError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetDeviceInstanceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetDeviceInstanceError { fn description(&self) -> &str { match *self { GetDeviceInstanceError::Argument(ref cause) => cause, GetDeviceInstanceError::LimitExceeded(ref cause) => cause, GetDeviceInstanceError::NotFound(ref cause) => cause, GetDeviceInstanceError::ServiceAccount(ref cause) => cause, GetDeviceInstanceError::Validation(ref cause) => cause, GetDeviceInstanceError::Credentials(ref err) => err.description(), GetDeviceInstanceError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetDeviceInstanceError::Unknown(ref cause) => cause, } } } /// Errors returned by GetDevicePool #[derive(Debug, PartialEq)] pub enum GetDevicePoolError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetDevicePoolError { pub fn from_body(body: &str) -> GetDevicePoolError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { GetDevicePoolError::Argument(String::from(error_message)) } "LimitExceededException" => { GetDevicePoolError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { GetDevicePoolError::NotFound(String::from(error_message)) } "ServiceAccountException" => { GetDevicePoolError::ServiceAccount(String::from(error_message)) } "ValidationException" => { GetDevicePoolError::Validation(error_message.to_string()) } _ => GetDevicePoolError::Unknown(String::from(body)), } } Err(_) => GetDevicePoolError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for GetDevicePoolError { fn from(err: serde_json::error::Error) -> GetDevicePoolError { GetDevicePoolError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for GetDevicePoolError { fn from(err: CredentialsError) -> GetDevicePoolError { GetDevicePoolError::Credentials(err) } } impl From<HttpDispatchError> for GetDevicePoolError { fn from(err: HttpDispatchError) -> GetDevicePoolError { GetDevicePoolError::HttpDispatch(err) } } impl From<io::Error> for GetDevicePoolError { fn from(err: io::Error) -> GetDevicePoolError { GetDevicePoolError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetDevicePoolError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetDevicePoolError { fn description(&self) -> &str { match *self { GetDevicePoolError::Argument(ref cause) => cause, GetDevicePoolError::LimitExceeded(ref cause) => cause, GetDevicePoolError::NotFound(ref cause) => cause, GetDevicePoolError::ServiceAccount(ref cause) => cause, GetDevicePoolError::Validation(ref cause) => cause, GetDevicePoolError::Credentials(ref err) => err.description(), GetDevicePoolError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetDevicePoolError::Unknown(ref cause) => cause, } } } /// Errors returned by GetDevicePoolCompatibility #[derive(Debug, PartialEq)] pub enum GetDevicePoolCompatibilityError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetDevicePoolCompatibilityError { pub fn from_body(body: &str) -> GetDevicePoolCompatibilityError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { GetDevicePoolCompatibilityError::Argument(String::from(error_message)) } "LimitExceededException" => { GetDevicePoolCompatibilityError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { GetDevicePoolCompatibilityError::NotFound(String::from(error_message)) } "ServiceAccountException" => { GetDevicePoolCompatibilityError::ServiceAccount(String::from(error_message)) } "ValidationException" => { GetDevicePoolCompatibilityError::Validation(error_message.to_string()) } _ => GetDevicePoolCompatibilityError::Unknown(String::from(body)), } } Err(_) => GetDevicePoolCompatibilityError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for GetDevicePoolCompatibilityError { fn from(err: serde_json::error::Error) -> GetDevicePoolCompatibilityError { GetDevicePoolCompatibilityError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for GetDevicePoolCompatibilityError { fn from(err: CredentialsError) -> GetDevicePoolCompatibilityError { GetDevicePoolCompatibilityError::Credentials(err) } } impl From<HttpDispatchError> for GetDevicePoolCompatibilityError { fn from(err: HttpDispatchError) -> GetDevicePoolCompatibilityError { GetDevicePoolCompatibilityError::HttpDispatch(err) } } impl From<io::Error> for GetDevicePoolCompatibilityError { fn from(err: io::Error) -> GetDevicePoolCompatibilityError { GetDevicePoolCompatibilityError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetDevicePoolCompatibilityError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetDevicePoolCompatibilityError { fn description(&self) -> &str { match *self { GetDevicePoolCompatibilityError::Argument(ref cause) => cause, GetDevicePoolCompatibilityError::LimitExceeded(ref cause) => cause, GetDevicePoolCompatibilityError::NotFound(ref cause) => cause, GetDevicePoolCompatibilityError::ServiceAccount(ref cause) => cause, GetDevicePoolCompatibilityError::Validation(ref cause) => cause, GetDevicePoolCompatibilityError::Credentials(ref err) => err.description(), GetDevicePoolCompatibilityError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetDevicePoolCompatibilityError::Unknown(ref cause) => cause, } } } /// Errors returned by GetInstanceProfile #[derive(Debug, PartialEq)] pub enum GetInstanceProfileError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetInstanceProfileError { pub fn from_body(body: &str) -> GetInstanceProfileError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { GetInstanceProfileError::Argument(String::from(error_message)) } "LimitExceededException" => { GetInstanceProfileError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { GetInstanceProfileError::NotFound(String::from(error_message)) } "ServiceAccountException" => { GetInstanceProfileError::ServiceAccount(String::from(error_message)) } "ValidationException" => { GetInstanceProfileError::Validation(error_message.to_string()) } _ => GetInstanceProfileError::Unknown(String::from(body)), } } Err(_) => GetInstanceProfileError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for GetInstanceProfileError { fn from(err: serde_json::error::Error) -> GetInstanceProfileError { GetInstanceProfileError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for GetInstanceProfileError { fn from(err: CredentialsError) -> GetInstanceProfileError { GetInstanceProfileError::Credentials(err) } } impl From<HttpDispatchError> for GetInstanceProfileError { fn from(err: HttpDispatchError) -> GetInstanceProfileError { GetInstanceProfileError::HttpDispatch(err) } } impl From<io::Error> for GetInstanceProfileError { fn from(err: io::Error) -> GetInstanceProfileError { GetInstanceProfileError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetInstanceProfileError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetInstanceProfileError { fn description(&self) -> &str { match *self { GetInstanceProfileError::Argument(ref cause) => cause, GetInstanceProfileError::LimitExceeded(ref cause) => cause, GetInstanceProfileError::NotFound(ref cause) => cause, GetInstanceProfileError::ServiceAccount(ref cause) => cause, GetInstanceProfileError::Validation(ref cause) => cause, GetInstanceProfileError::Credentials(ref err) => err.description(), GetInstanceProfileError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetInstanceProfileError::Unknown(ref cause) => cause, } } } /// Errors returned by GetJob #[derive(Debug, PartialEq)] pub enum GetJobError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetJobError { pub fn from_body(body: &str) -> GetJobError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => GetJobError::Argument(String::from(error_message)), "LimitExceededException" => { GetJobError::LimitExceeded(String::from(error_message)) } "NotFoundException" => GetJobError::NotFound(String::from(error_message)), "ServiceAccountException" => { GetJobError::ServiceAccount(String::from(error_message)) } "ValidationException" => GetJobError::Validation(error_message.to_string()), _ => GetJobError::Unknown(String::from(body)), } } Err(_) => GetJobError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for GetJobError { fn from(err: serde_json::error::Error) -> GetJobError { GetJobError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for GetJobError { fn from(err: CredentialsError) -> GetJobError { GetJobError::Credentials(err) } } impl From<HttpDispatchError> for GetJobError { fn from(err: HttpDispatchError) -> GetJobError { GetJobError::HttpDispatch(err) } } impl From<io::Error> for GetJobError { fn from(err: io::Error) -> GetJobError { GetJobError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetJobError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetJobError { fn description(&self) -> &str { match *self { GetJobError::Argument(ref cause) => cause, GetJobError::LimitExceeded(ref cause) => cause, GetJobError::NotFound(ref cause) => cause, GetJobError::ServiceAccount(ref cause) => cause, GetJobError::Validation(ref cause) => cause, GetJobError::Credentials(ref err) => err.description(), GetJobError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetJobError::Unknown(ref cause) => cause, } } } /// Errors returned by GetNetworkProfile #[derive(Debug, PartialEq)] pub enum GetNetworkProfileError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetNetworkProfileError { pub fn from_body(body: &str) -> GetNetworkProfileError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { GetNetworkProfileError::Argument(String::from(error_message)) } "LimitExceededException" => { GetNetworkProfileError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { GetNetworkProfileError::NotFound(String::from(error_message)) } "ServiceAccountException" => { GetNetworkProfileError::ServiceAccount(String::from(error_message)) } "ValidationException" => { GetNetworkProfileError::Validation(error_message.to_string()) } _ => GetNetworkProfileError::Unknown(String::from(body)), } } Err(_) => GetNetworkProfileError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for GetNetworkProfileError { fn from(err: serde_json::error::Error) -> GetNetworkProfileError { GetNetworkProfileError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for GetNetworkProfileError { fn from(err: CredentialsError) -> GetNetworkProfileError { GetNetworkProfileError::Credentials(err) } } impl From<HttpDispatchError> for GetNetworkProfileError { fn from(err: HttpDispatchError) -> GetNetworkProfileError { GetNetworkProfileError::HttpDispatch(err) } } impl From<io::Error> for GetNetworkProfileError { fn from(err: io::Error) -> GetNetworkProfileError { GetNetworkProfileError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetNetworkProfileError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetNetworkProfileError { fn description(&self) -> &str { match *self { GetNetworkProfileError::Argument(ref cause) => cause, GetNetworkProfileError::LimitExceeded(ref cause) => cause, GetNetworkProfileError::NotFound(ref cause) => cause, GetNetworkProfileError::ServiceAccount(ref cause) => cause, GetNetworkProfileError::Validation(ref cause) => cause, GetNetworkProfileError::Credentials(ref err) => err.description(), GetNetworkProfileError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetNetworkProfileError::Unknown(ref cause) => cause, } } } /// Errors returned by GetOfferingStatus #[derive(Debug, PartialEq)] pub enum GetOfferingStatusError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>Exception gets thrown when a user is not eligible to perform the specified transaction.</p> NotEligible(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetOfferingStatusError { pub fn from_body(body: &str) -> GetOfferingStatusError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { GetOfferingStatusError::Argument(String::from(error_message)) } "LimitExceededException" => { GetOfferingStatusError::LimitExceeded(String::from(error_message)) } "NotEligibleException" => { GetOfferingStatusError::NotEligible(String::from(error_message)) } "NotFoundException" => { GetOfferingStatusError::NotFound(String::from(error_message)) } "ServiceAccountException" => { GetOfferingStatusError::ServiceAccount(String::from(error_message)) } "ValidationException" => { GetOfferingStatusError::Validation(error_message.to_string()) } _ => GetOfferingStatusError::Unknown(String::from(body)), } } Err(_) => GetOfferingStatusError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for GetOfferingStatusError { fn from(err: serde_json::error::Error) -> GetOfferingStatusError { GetOfferingStatusError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for GetOfferingStatusError { fn from(err: CredentialsError) -> GetOfferingStatusError { GetOfferingStatusError::Credentials(err) } } impl From<HttpDispatchError> for GetOfferingStatusError { fn from(err: HttpDispatchError) -> GetOfferingStatusError { GetOfferingStatusError::HttpDispatch(err) } } impl From<io::Error> for GetOfferingStatusError { fn from(err: io::Error) -> GetOfferingStatusError { GetOfferingStatusError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetOfferingStatusError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetOfferingStatusError { fn description(&self) -> &str { match *self { GetOfferingStatusError::Argument(ref cause) => cause, GetOfferingStatusError::LimitExceeded(ref cause) => cause, GetOfferingStatusError::NotEligible(ref cause) => cause, GetOfferingStatusError::NotFound(ref cause) => cause, GetOfferingStatusError::ServiceAccount(ref cause) => cause, GetOfferingStatusError::Validation(ref cause) => cause, GetOfferingStatusError::Credentials(ref err) => err.description(), GetOfferingStatusError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetOfferingStatusError::Unknown(ref cause) => cause, } } } /// Errors returned by GetProject #[derive(Debug, PartialEq)] pub enum GetProjectError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetProjectError { pub fn from_body(body: &str) -> GetProjectError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => GetProjectError::Argument(String::from(error_message)), "LimitExceededException" => { GetProjectError::LimitExceeded(String::from(error_message)) } "NotFoundException" => GetProjectError::NotFound(String::from(error_message)), "ServiceAccountException" => { GetProjectError::ServiceAccount(String::from(error_message)) } "ValidationException" => GetProjectError::Validation(error_message.to_string()), _ => GetProjectError::Unknown(String::from(body)), } } Err(_) => GetProjectError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for GetProjectError { fn from(err: serde_json::error::Error) -> GetProjectError { GetProjectError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for GetProjectError { fn from(err: CredentialsError) -> GetProjectError { GetProjectError::Credentials(err) } } impl From<HttpDispatchError> for GetProjectError { fn from(err: HttpDispatchError) -> GetProjectError { GetProjectError::HttpDispatch(err) } } impl From<io::Error> for GetProjectError { fn from(err: io::Error) -> GetProjectError { GetProjectError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetProjectError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetProjectError { fn description(&self) -> &str { match *self { GetProjectError::Argument(ref cause) => cause, GetProjectError::LimitExceeded(ref cause) => cause, GetProjectError::NotFound(ref cause) => cause, GetProjectError::ServiceAccount(ref cause) => cause, GetProjectError::Validation(ref cause) => cause, GetProjectError::Credentials(ref err) => err.description(), GetProjectError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetProjectError::Unknown(ref cause) => cause, } } } /// Errors returned by GetRemoteAccessSession #[derive(Debug, PartialEq)] pub enum GetRemoteAccessSessionError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetRemoteAccessSessionError { pub fn from_body(body: &str) -> GetRemoteAccessSessionError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { GetRemoteAccessSessionError::Argument(String::from(error_message)) } "LimitExceededException" => { GetRemoteAccessSessionError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { GetRemoteAccessSessionError::NotFound(String::from(error_message)) } "ServiceAccountException" => { GetRemoteAccessSessionError::ServiceAccount(String::from(error_message)) } "ValidationException" => { GetRemoteAccessSessionError::Validation(error_message.to_string()) } _ => GetRemoteAccessSessionError::Unknown(String::from(body)), } } Err(_) => GetRemoteAccessSessionError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for GetRemoteAccessSessionError { fn from(err: serde_json::error::Error) -> GetRemoteAccessSessionError { GetRemoteAccessSessionError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for GetRemoteAccessSessionError { fn from(err: CredentialsError) -> GetRemoteAccessSessionError { GetRemoteAccessSessionError::Credentials(err) } } impl From<HttpDispatchError> for GetRemoteAccessSessionError { fn from(err: HttpDispatchError) -> GetRemoteAccessSessionError { GetRemoteAccessSessionError::HttpDispatch(err) } } impl From<io::Error> for GetRemoteAccessSessionError { fn from(err: io::Error) -> GetRemoteAccessSessionError { GetRemoteAccessSessionError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetRemoteAccessSessionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetRemoteAccessSessionError { fn description(&self) -> &str { match *self { GetRemoteAccessSessionError::Argument(ref cause) => cause, GetRemoteAccessSessionError::LimitExceeded(ref cause) => cause, GetRemoteAccessSessionError::NotFound(ref cause) => cause, GetRemoteAccessSessionError::ServiceAccount(ref cause) => cause, GetRemoteAccessSessionError::Validation(ref cause) => cause, GetRemoteAccessSessionError::Credentials(ref err) => err.description(), GetRemoteAccessSessionError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetRemoteAccessSessionError::Unknown(ref cause) => cause, } } } /// Errors returned by GetRun #[derive(Debug, PartialEq)] pub enum GetRunError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetRunError { pub fn from_body(body: &str) -> GetRunError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => GetRunError::Argument(String::from(error_message)), "LimitExceededException" => { GetRunError::LimitExceeded(String::from(error_message)) } "NotFoundException" => GetRunError::NotFound(String::from(error_message)), "ServiceAccountException" => { GetRunError::ServiceAccount(String::from(error_message)) } "ValidationException" => GetRunError::Validation(error_message.to_string()), _ => GetRunError::Unknown(String::from(body)), } } Err(_) => GetRunError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for GetRunError { fn from(err: serde_json::error::Error) -> GetRunError { GetRunError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for GetRunError { fn from(err: CredentialsError) -> GetRunError { GetRunError::Credentials(err) } } impl From<HttpDispatchError> for GetRunError { fn from(err: HttpDispatchError) -> GetRunError { GetRunError::HttpDispatch(err) } } impl From<io::Error> for GetRunError { fn from(err: io::Error) -> GetRunError { GetRunError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetRunError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetRunError { fn description(&self) -> &str { match *self { GetRunError::Argument(ref cause) => cause, GetRunError::LimitExceeded(ref cause) => cause, GetRunError::NotFound(ref cause) => cause, GetRunError::ServiceAccount(ref cause) => cause, GetRunError::Validation(ref cause) => cause, GetRunError::Credentials(ref err) => err.description(), GetRunError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetRunError::Unknown(ref cause) => cause, } } } /// Errors returned by GetSuite #[derive(Debug, PartialEq)] pub enum GetSuiteError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetSuiteError { pub fn from_body(body: &str) -> GetSuiteError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => GetSuiteError::Argument(String::from(error_message)), "LimitExceededException" => { GetSuiteError::LimitExceeded(String::from(error_message)) } "NotFoundException" => GetSuiteError::NotFound(String::from(error_message)), "ServiceAccountException" => { GetSuiteError::ServiceAccount(String::from(error_message)) } "ValidationException" => GetSuiteError::Validation(error_message.to_string()), _ => GetSuiteError::Unknown(String::from(body)), } } Err(_) => GetSuiteError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for GetSuiteError { fn from(err: serde_json::error::Error) -> GetSuiteError { GetSuiteError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for GetSuiteError { fn from(err: CredentialsError) -> GetSuiteError { GetSuiteError::Credentials(err) } } impl From<HttpDispatchError> for GetSuiteError { fn from(err: HttpDispatchError) -> GetSuiteError { GetSuiteError::HttpDispatch(err) } } impl From<io::Error> for GetSuiteError { fn from(err: io::Error) -> GetSuiteError { GetSuiteError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetSuiteError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetSuiteError { fn description(&self) -> &str { match *self { GetSuiteError::Argument(ref cause) => cause, GetSuiteError::LimitExceeded(ref cause) => cause, GetSuiteError::NotFound(ref cause) => cause, GetSuiteError::ServiceAccount(ref cause) => cause, GetSuiteError::Validation(ref cause) => cause, GetSuiteError::Credentials(ref err) => err.description(), GetSuiteError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetSuiteError::Unknown(ref cause) => cause, } } } /// Errors returned by GetTest #[derive(Debug, PartialEq)] pub enum GetTestError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetTestError { pub fn from_body(body: &str) -> GetTestError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => GetTestError::Argument(String::from(error_message)), "LimitExceededException" => { GetTestError::LimitExceeded(String::from(error_message)) } "NotFoundException" => GetTestError::NotFound(String::from(error_message)), "ServiceAccountException" => { GetTestError::ServiceAccount(String::from(error_message)) } "ValidationException" => GetTestError::Validation(error_message.to_string()), _ => GetTestError::Unknown(String::from(body)), } } Err(_) => GetTestError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for GetTestError { fn from(err: serde_json::error::Error) -> GetTestError { GetTestError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for GetTestError { fn from(err: CredentialsError) -> GetTestError { GetTestError::Credentials(err) } } impl From<HttpDispatchError> for GetTestError { fn from(err: HttpDispatchError) -> GetTestError { GetTestError::HttpDispatch(err) } } impl From<io::Error> for GetTestError { fn from(err: io::Error) -> GetTestError { GetTestError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetTestError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetTestError { fn description(&self) -> &str { match *self { GetTestError::Argument(ref cause) => cause, GetTestError::LimitExceeded(ref cause) => cause, GetTestError::NotFound(ref cause) => cause, GetTestError::ServiceAccount(ref cause) => cause, GetTestError::Validation(ref cause) => cause, GetTestError::Credentials(ref err) => err.description(), GetTestError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetTestError::Unknown(ref cause) => cause, } } } /// Errors returned by GetUpload #[derive(Debug, PartialEq)] pub enum GetUploadError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetUploadError { pub fn from_body(body: &str) -> GetUploadError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => GetUploadError::Argument(String::from(error_message)), "LimitExceededException" => { GetUploadError::LimitExceeded(String::from(error_message)) } "NotFoundException" => GetUploadError::NotFound(String::from(error_message)), "ServiceAccountException" => { GetUploadError::ServiceAccount(String::from(error_message)) } "ValidationException" => GetUploadError::Validation(error_message.to_string()), _ => GetUploadError::Unknown(String::from(body)), } } Err(_) => GetUploadError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for GetUploadError { fn from(err: serde_json::error::Error) -> GetUploadError { GetUploadError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for GetUploadError { fn from(err: CredentialsError) -> GetUploadError { GetUploadError::Credentials(err) } } impl From<HttpDispatchError> for GetUploadError { fn from(err: HttpDispatchError) -> GetUploadError { GetUploadError::HttpDispatch(err) } } impl From<io::Error> for GetUploadError { fn from(err: io::Error) -> GetUploadError { GetUploadError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetUploadError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetUploadError { fn description(&self) -> &str { match *self { GetUploadError::Argument(ref cause) => cause, GetUploadError::LimitExceeded(ref cause) => cause, GetUploadError::NotFound(ref cause) => cause, GetUploadError::ServiceAccount(ref cause) => cause, GetUploadError::Validation(ref cause) => cause, GetUploadError::Credentials(ref err) => err.description(), GetUploadError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetUploadError::Unknown(ref cause) => cause, } } } /// Errors returned by InstallToRemoteAccessSession #[derive(Debug, PartialEq)] pub enum InstallToRemoteAccessSessionError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl InstallToRemoteAccessSessionError { pub fn from_body(body: &str) -> InstallToRemoteAccessSessionError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { InstallToRemoteAccessSessionError::Argument(String::from(error_message)) } "LimitExceededException" => InstallToRemoteAccessSessionError::LimitExceeded( String::from(error_message), ), "NotFoundException" => { InstallToRemoteAccessSessionError::NotFound(String::from(error_message)) } "ServiceAccountException" => InstallToRemoteAccessSessionError::ServiceAccount( String::from(error_message), ), "ValidationException" => { InstallToRemoteAccessSessionError::Validation(error_message.to_string()) } _ => InstallToRemoteAccessSessionError::Unknown(String::from(body)), } } Err(_) => InstallToRemoteAccessSessionError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for InstallToRemoteAccessSessionError { fn from(err: serde_json::error::Error) -> InstallToRemoteAccessSessionError { InstallToRemoteAccessSessionError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for InstallToRemoteAccessSessionError { fn from(err: CredentialsError) -> InstallToRemoteAccessSessionError { InstallToRemoteAccessSessionError::Credentials(err) } } impl From<HttpDispatchError> for InstallToRemoteAccessSessionError { fn from(err: HttpDispatchError) -> InstallToRemoteAccessSessionError { InstallToRemoteAccessSessionError::HttpDispatch(err) } } impl From<io::Error> for InstallToRemoteAccessSessionError { fn from(err: io::Error) -> InstallToRemoteAccessSessionError { InstallToRemoteAccessSessionError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for InstallToRemoteAccessSessionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for InstallToRemoteAccessSessionError { fn description(&self) -> &str { match *self { InstallToRemoteAccessSessionError::Argument(ref cause) => cause, InstallToRemoteAccessSessionError::LimitExceeded(ref cause) => cause, InstallToRemoteAccessSessionError::NotFound(ref cause) => cause, InstallToRemoteAccessSessionError::ServiceAccount(ref cause) => cause, InstallToRemoteAccessSessionError::Validation(ref cause) => cause, InstallToRemoteAccessSessionError::Credentials(ref err) => err.description(), InstallToRemoteAccessSessionError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } InstallToRemoteAccessSessionError::Unknown(ref cause) => cause, } } } /// Errors returned by ListArtifacts #[derive(Debug, PartialEq)] pub enum ListArtifactsError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListArtifactsError { pub fn from_body(body: &str) -> ListArtifactsError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { ListArtifactsError::Argument(String::from(error_message)) } "LimitExceededException" => { ListArtifactsError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { ListArtifactsError::NotFound(String::from(error_message)) } "ServiceAccountException" => { ListArtifactsError::ServiceAccount(String::from(error_message)) } "ValidationException" => { ListArtifactsError::Validation(error_message.to_string()) } _ => ListArtifactsError::Unknown(String::from(body)), } } Err(_) => ListArtifactsError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for ListArtifactsError { fn from(err: serde_json::error::Error) -> ListArtifactsError { ListArtifactsError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for ListArtifactsError { fn from(err: CredentialsError) -> ListArtifactsError { ListArtifactsError::Credentials(err) } } impl From<HttpDispatchError> for ListArtifactsError { fn from(err: HttpDispatchError) -> ListArtifactsError { ListArtifactsError::HttpDispatch(err) } } impl From<io::Error> for ListArtifactsError { fn from(err: io::Error) -> ListArtifactsError { ListArtifactsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListArtifactsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListArtifactsError { fn description(&self) -> &str { match *self { ListArtifactsError::Argument(ref cause) => cause, ListArtifactsError::LimitExceeded(ref cause) => cause, ListArtifactsError::NotFound(ref cause) => cause, ListArtifactsError::ServiceAccount(ref cause) => cause, ListArtifactsError::Validation(ref cause) => cause, ListArtifactsError::Credentials(ref err) => err.description(), ListArtifactsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListArtifactsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListDeviceInstances #[derive(Debug, PartialEq)] pub enum ListDeviceInstancesError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListDeviceInstancesError { pub fn from_body(body: &str) -> ListDeviceInstancesError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { ListDeviceInstancesError::Argument(String::from(error_message)) } "LimitExceededException" => { ListDeviceInstancesError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { ListDeviceInstancesError::NotFound(String::from(error_message)) } "ServiceAccountException" => { ListDeviceInstancesError::ServiceAccount(String::from(error_message)) } "ValidationException" => { ListDeviceInstancesError::Validation(error_message.to_string()) } _ => ListDeviceInstancesError::Unknown(String::from(body)), } } Err(_) => ListDeviceInstancesError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for ListDeviceInstancesError { fn from(err: serde_json::error::Error) -> ListDeviceInstancesError { ListDeviceInstancesError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for ListDeviceInstancesError { fn from(err: CredentialsError) -> ListDeviceInstancesError { ListDeviceInstancesError::Credentials(err) } } impl From<HttpDispatchError> for ListDeviceInstancesError { fn from(err: HttpDispatchError) -> ListDeviceInstancesError { ListDeviceInstancesError::HttpDispatch(err) } } impl From<io::Error> for ListDeviceInstancesError { fn from(err: io::Error) -> ListDeviceInstancesError { ListDeviceInstancesError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListDeviceInstancesError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListDeviceInstancesError { fn description(&self) -> &str { match *self { ListDeviceInstancesError::Argument(ref cause) => cause, ListDeviceInstancesError::LimitExceeded(ref cause) => cause, ListDeviceInstancesError::NotFound(ref cause) => cause, ListDeviceInstancesError::ServiceAccount(ref cause) => cause, ListDeviceInstancesError::Validation(ref cause) => cause, ListDeviceInstancesError::Credentials(ref err) => err.description(), ListDeviceInstancesError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ListDeviceInstancesError::Unknown(ref cause) => cause, } } } /// Errors returned by ListDevicePools #[derive(Debug, PartialEq)] pub enum ListDevicePoolsError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListDevicePoolsError { pub fn from_body(body: &str) -> ListDevicePoolsError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { ListDevicePoolsError::Argument(String::from(error_message)) } "LimitExceededException" => { ListDevicePoolsError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { ListDevicePoolsError::NotFound(String::from(error_message)) } "ServiceAccountException" => { ListDevicePoolsError::ServiceAccount(String::from(error_message)) } "ValidationException" => { ListDevicePoolsError::Validation(error_message.to_string()) } _ => ListDevicePoolsError::Unknown(String::from(body)), } } Err(_) => ListDevicePoolsError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for ListDevicePoolsError { fn from(err: serde_json::error::Error) -> ListDevicePoolsError { ListDevicePoolsError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for ListDevicePoolsError { fn from(err: CredentialsError) -> ListDevicePoolsError { ListDevicePoolsError::Credentials(err) } } impl From<HttpDispatchError> for ListDevicePoolsError { fn from(err: HttpDispatchError) -> ListDevicePoolsError { ListDevicePoolsError::HttpDispatch(err) } } impl From<io::Error> for ListDevicePoolsError { fn from(err: io::Error) -> ListDevicePoolsError { ListDevicePoolsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListDevicePoolsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListDevicePoolsError { fn description(&self) -> &str { match *self { ListDevicePoolsError::Argument(ref cause) => cause, ListDevicePoolsError::LimitExceeded(ref cause) => cause, ListDevicePoolsError::NotFound(ref cause) => cause, ListDevicePoolsError::ServiceAccount(ref cause) => cause, ListDevicePoolsError::Validation(ref cause) => cause, ListDevicePoolsError::Credentials(ref err) => err.description(), ListDevicePoolsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListDevicePoolsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListDevices #[derive(Debug, PartialEq)] pub enum ListDevicesError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListDevicesError { pub fn from_body(body: &str) -> ListDevicesError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => ListDevicesError::Argument(String::from(error_message)), "LimitExceededException" => { ListDevicesError::LimitExceeded(String::from(error_message)) } "NotFoundException" => ListDevicesError::NotFound(String::from(error_message)), "ServiceAccountException" => { ListDevicesError::ServiceAccount(String::from(error_message)) } "ValidationException" => { ListDevicesError::Validation(error_message.to_string()) } _ => ListDevicesError::Unknown(String::from(body)), } } Err(_) => ListDevicesError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for ListDevicesError { fn from(err: serde_json::error::Error) -> ListDevicesError { ListDevicesError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for ListDevicesError { fn from(err: CredentialsError) -> ListDevicesError { ListDevicesError::Credentials(err) } } impl From<HttpDispatchError> for ListDevicesError { fn from(err: HttpDispatchError) -> ListDevicesError { ListDevicesError::HttpDispatch(err) } } impl From<io::Error> for ListDevicesError { fn from(err: io::Error) -> ListDevicesError { ListDevicesError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListDevicesError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListDevicesError { fn description(&self) -> &str { match *self { ListDevicesError::Argument(ref cause) => cause, ListDevicesError::LimitExceeded(ref cause) => cause, ListDevicesError::NotFound(ref cause) => cause, ListDevicesError::ServiceAccount(ref cause) => cause, ListDevicesError::Validation(ref cause) => cause, ListDevicesError::Credentials(ref err) => err.description(), ListDevicesError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListDevicesError::Unknown(ref cause) => cause, } } } /// Errors returned by ListInstanceProfiles #[derive(Debug, PartialEq)] pub enum ListInstanceProfilesError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListInstanceProfilesError { pub fn from_body(body: &str) -> ListInstanceProfilesError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { ListInstanceProfilesError::Argument(String::from(error_message)) } "LimitExceededException" => { ListInstanceProfilesError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { ListInstanceProfilesError::NotFound(String::from(error_message)) } "ServiceAccountException" => { ListInstanceProfilesError::ServiceAccount(String::from(error_message)) } "ValidationException" => { ListInstanceProfilesError::Validation(error_message.to_string()) } _ => ListInstanceProfilesError::Unknown(String::from(body)), } } Err(_) => ListInstanceProfilesError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for ListInstanceProfilesError { fn from(err: serde_json::error::Error) -> ListInstanceProfilesError { ListInstanceProfilesError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for ListInstanceProfilesError { fn from(err: CredentialsError) -> ListInstanceProfilesError { ListInstanceProfilesError::Credentials(err) } } impl From<HttpDispatchError> for ListInstanceProfilesError { fn from(err: HttpDispatchError) -> ListInstanceProfilesError { ListInstanceProfilesError::HttpDispatch(err) } } impl From<io::Error> for ListInstanceProfilesError { fn from(err: io::Error) -> ListInstanceProfilesError { ListInstanceProfilesError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListInstanceProfilesError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListInstanceProfilesError { fn description(&self) -> &str { match *self { ListInstanceProfilesError::Argument(ref cause) => cause, ListInstanceProfilesError::LimitExceeded(ref cause) => cause, ListInstanceProfilesError::NotFound(ref cause) => cause, ListInstanceProfilesError::ServiceAccount(ref cause) => cause, ListInstanceProfilesError::Validation(ref cause) => cause, ListInstanceProfilesError::Credentials(ref err) => err.description(), ListInstanceProfilesError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ListInstanceProfilesError::Unknown(ref cause) => cause, } } } /// Errors returned by ListJobs #[derive(Debug, PartialEq)] pub enum ListJobsError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListJobsError { pub fn from_body(body: &str) -> ListJobsError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => ListJobsError::Argument(String::from(error_message)), "LimitExceededException" => { ListJobsError::LimitExceeded(String::from(error_message)) } "NotFoundException" => ListJobsError::NotFound(String::from(error_message)), "ServiceAccountException" => { ListJobsError::ServiceAccount(String::from(error_message)) } "ValidationException" => ListJobsError::Validation(error_message.to_string()), _ => ListJobsError::Unknown(String::from(body)), } } Err(_) => ListJobsError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for ListJobsError { fn from(err: serde_json::error::Error) -> ListJobsError { ListJobsError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for ListJobsError { fn from(err: CredentialsError) -> ListJobsError { ListJobsError::Credentials(err) } } impl From<HttpDispatchError> for ListJobsError { fn from(err: HttpDispatchError) -> ListJobsError { ListJobsError::HttpDispatch(err) } } impl From<io::Error> for ListJobsError { fn from(err: io::Error) -> ListJobsError { ListJobsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListJobsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListJobsError { fn description(&self) -> &str { match *self { ListJobsError::Argument(ref cause) => cause, ListJobsError::LimitExceeded(ref cause) => cause, ListJobsError::NotFound(ref cause) => cause, ListJobsError::ServiceAccount(ref cause) => cause, ListJobsError::Validation(ref cause) => cause, ListJobsError::Credentials(ref err) => err.description(), ListJobsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListJobsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListNetworkProfiles #[derive(Debug, PartialEq)] pub enum ListNetworkProfilesError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListNetworkProfilesError { pub fn from_body(body: &str) -> ListNetworkProfilesError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { ListNetworkProfilesError::Argument(String::from(error_message)) } "LimitExceededException" => { ListNetworkProfilesError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { ListNetworkProfilesError::NotFound(String::from(error_message)) } "ServiceAccountException" => { ListNetworkProfilesError::ServiceAccount(String::from(error_message)) } "ValidationException" => { ListNetworkProfilesError::Validation(error_message.to_string()) } _ => ListNetworkProfilesError::Unknown(String::from(body)), } } Err(_) => ListNetworkProfilesError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for ListNetworkProfilesError { fn from(err: serde_json::error::Error) -> ListNetworkProfilesError { ListNetworkProfilesError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for ListNetworkProfilesError { fn from(err: CredentialsError) -> ListNetworkProfilesError { ListNetworkProfilesError::Credentials(err) } } impl From<HttpDispatchError> for ListNetworkProfilesError { fn from(err: HttpDispatchError) -> ListNetworkProfilesError { ListNetworkProfilesError::HttpDispatch(err) } } impl From<io::Error> for ListNetworkProfilesError { fn from(err: io::Error) -> ListNetworkProfilesError { ListNetworkProfilesError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListNetworkProfilesError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListNetworkProfilesError { fn description(&self) -> &str { match *self { ListNetworkProfilesError::Argument(ref cause) => cause, ListNetworkProfilesError::LimitExceeded(ref cause) => cause, ListNetworkProfilesError::NotFound(ref cause) => cause, ListNetworkProfilesError::ServiceAccount(ref cause) => cause, ListNetworkProfilesError::Validation(ref cause) => cause, ListNetworkProfilesError::Credentials(ref err) => err.description(), ListNetworkProfilesError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ListNetworkProfilesError::Unknown(ref cause) => cause, } } } /// Errors returned by ListOfferingPromotions #[derive(Debug, PartialEq)] pub enum ListOfferingPromotionsError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>Exception gets thrown when a user is not eligible to perform the specified transaction.</p> NotEligible(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListOfferingPromotionsError { pub fn from_body(body: &str) -> ListOfferingPromotionsError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { ListOfferingPromotionsError::Argument(String::from(error_message)) } "LimitExceededException" => { ListOfferingPromotionsError::LimitExceeded(String::from(error_message)) } "NotEligibleException" => { ListOfferingPromotionsError::NotEligible(String::from(error_message)) } "NotFoundException" => { ListOfferingPromotionsError::NotFound(String::from(error_message)) } "ServiceAccountException" => { ListOfferingPromotionsError::ServiceAccount(String::from(error_message)) } "ValidationException" => { ListOfferingPromotionsError::Validation(error_message.to_string()) } _ => ListOfferingPromotionsError::Unknown(String::from(body)), } } Err(_) => ListOfferingPromotionsError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for ListOfferingPromotionsError { fn from(err: serde_json::error::Error) -> ListOfferingPromotionsError { ListOfferingPromotionsError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for ListOfferingPromotionsError { fn from(err: CredentialsError) -> ListOfferingPromotionsError { ListOfferingPromotionsError::Credentials(err) } } impl From<HttpDispatchError> for ListOfferingPromotionsError { fn from(err: HttpDispatchError) -> ListOfferingPromotionsError { ListOfferingPromotionsError::HttpDispatch(err) } } impl From<io::Error> for ListOfferingPromotionsError { fn from(err: io::Error) -> ListOfferingPromotionsError { ListOfferingPromotionsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListOfferingPromotionsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListOfferingPromotionsError { fn description(&self) -> &str { match *self { ListOfferingPromotionsError::Argument(ref cause) => cause, ListOfferingPromotionsError::LimitExceeded(ref cause) => cause, ListOfferingPromotionsError::NotEligible(ref cause) => cause, ListOfferingPromotionsError::NotFound(ref cause) => cause, ListOfferingPromotionsError::ServiceAccount(ref cause) => cause, ListOfferingPromotionsError::Validation(ref cause) => cause, ListOfferingPromotionsError::Credentials(ref err) => err.description(), ListOfferingPromotionsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ListOfferingPromotionsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListOfferingTransactions #[derive(Debug, PartialEq)] pub enum ListOfferingTransactionsError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>Exception gets thrown when a user is not eligible to perform the specified transaction.</p> NotEligible(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListOfferingTransactionsError { pub fn from_body(body: &str) -> ListOfferingTransactionsError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { ListOfferingTransactionsError::Argument(String::from(error_message)) } "LimitExceededException" => { ListOfferingTransactionsError::LimitExceeded(String::from(error_message)) } "NotEligibleException" => { ListOfferingTransactionsError::NotEligible(String::from(error_message)) } "NotFoundException" => { ListOfferingTransactionsError::NotFound(String::from(error_message)) } "ServiceAccountException" => { ListOfferingTransactionsError::ServiceAccount(String::from(error_message)) } "ValidationException" => { ListOfferingTransactionsError::Validation(error_message.to_string()) } _ => ListOfferingTransactionsError::Unknown(String::from(body)), } } Err(_) => ListOfferingTransactionsError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for ListOfferingTransactionsError { fn from(err: serde_json::error::Error) -> ListOfferingTransactionsError { ListOfferingTransactionsError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for ListOfferingTransactionsError { fn from(err: CredentialsError) -> ListOfferingTransactionsError { ListOfferingTransactionsError::Credentials(err) } } impl From<HttpDispatchError> for ListOfferingTransactionsError { fn from(err: HttpDispatchError) -> ListOfferingTransactionsError { ListOfferingTransactionsError::HttpDispatch(err) } } impl From<io::Error> for ListOfferingTransactionsError { fn from(err: io::Error) -> ListOfferingTransactionsError { ListOfferingTransactionsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListOfferingTransactionsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListOfferingTransactionsError { fn description(&self) -> &str { match *self { ListOfferingTransactionsError::Argument(ref cause) => cause, ListOfferingTransactionsError::LimitExceeded(ref cause) => cause, ListOfferingTransactionsError::NotEligible(ref cause) => cause, ListOfferingTransactionsError::NotFound(ref cause) => cause, ListOfferingTransactionsError::ServiceAccount(ref cause) => cause, ListOfferingTransactionsError::Validation(ref cause) => cause, ListOfferingTransactionsError::Credentials(ref err) => err.description(), ListOfferingTransactionsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ListOfferingTransactionsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListOfferings #[derive(Debug, PartialEq)] pub enum ListOfferingsError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>Exception gets thrown when a user is not eligible to perform the specified transaction.</p> NotEligible(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListOfferingsError { pub fn from_body(body: &str) -> ListOfferingsError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { ListOfferingsError::Argument(String::from(error_message)) } "LimitExceededException" => { ListOfferingsError::LimitExceeded(String::from(error_message)) } "NotEligibleException" => { ListOfferingsError::NotEligible(String::from(error_message)) } "NotFoundException" => { ListOfferingsError::NotFound(String::from(error_message)) } "ServiceAccountException" => { ListOfferingsError::ServiceAccount(String::from(error_message)) } "ValidationException" => { ListOfferingsError::Validation(error_message.to_string()) } _ => ListOfferingsError::Unknown(String::from(body)), } } Err(_) => ListOfferingsError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for ListOfferingsError { fn from(err: serde_json::error::Error) -> ListOfferingsError { ListOfferingsError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for ListOfferingsError { fn from(err: CredentialsError) -> ListOfferingsError { ListOfferingsError::Credentials(err) } } impl From<HttpDispatchError> for ListOfferingsError { fn from(err: HttpDispatchError) -> ListOfferingsError { ListOfferingsError::HttpDispatch(err) } } impl From<io::Error> for ListOfferingsError { fn from(err: io::Error) -> ListOfferingsError { ListOfferingsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListOfferingsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListOfferingsError { fn description(&self) -> &str { match *self { ListOfferingsError::Argument(ref cause) => cause, ListOfferingsError::LimitExceeded(ref cause) => cause, ListOfferingsError::NotEligible(ref cause) => cause, ListOfferingsError::NotFound(ref cause) => cause, ListOfferingsError::ServiceAccount(ref cause) => cause, ListOfferingsError::Validation(ref cause) => cause, ListOfferingsError::Credentials(ref err) => err.description(), ListOfferingsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListOfferingsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListProjects #[derive(Debug, PartialEq)] pub enum ListProjectsError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListProjectsError { pub fn from_body(body: &str) -> ListProjectsError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => ListProjectsError::Argument(String::from(error_message)), "LimitExceededException" => { ListProjectsError::LimitExceeded(String::from(error_message)) } "NotFoundException" => ListProjectsError::NotFound(String::from(error_message)), "ServiceAccountException" => { ListProjectsError::ServiceAccount(String::from(error_message)) } "ValidationException" => { ListProjectsError::Validation(error_message.to_string()) } _ => ListProjectsError::Unknown(String::from(body)), } } Err(_) => ListProjectsError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for ListProjectsError { fn from(err: serde_json::error::Error) -> ListProjectsError { ListProjectsError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for ListProjectsError { fn from(err: CredentialsError) -> ListProjectsError { ListProjectsError::Credentials(err) } } impl From<HttpDispatchError> for ListProjectsError { fn from(err: HttpDispatchError) -> ListProjectsError { ListProjectsError::HttpDispatch(err) } } impl From<io::Error> for ListProjectsError { fn from(err: io::Error) -> ListProjectsError { ListProjectsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListProjectsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListProjectsError { fn description(&self) -> &str { match *self { ListProjectsError::Argument(ref cause) => cause, ListProjectsError::LimitExceeded(ref cause) => cause, ListProjectsError::NotFound(ref cause) => cause, ListProjectsError::ServiceAccount(ref cause) => cause, ListProjectsError::Validation(ref cause) => cause, ListProjectsError::Credentials(ref err) => err.description(), ListProjectsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListProjectsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListRemoteAccessSessions #[derive(Debug, PartialEq)] pub enum ListRemoteAccessSessionsError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListRemoteAccessSessionsError { pub fn from_body(body: &str) -> ListRemoteAccessSessionsError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { ListRemoteAccessSessionsError::Argument(String::from(error_message)) } "LimitExceededException" => { ListRemoteAccessSessionsError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { ListRemoteAccessSessionsError::NotFound(String::from(error_message)) } "ServiceAccountException" => { ListRemoteAccessSessionsError::ServiceAccount(String::from(error_message)) } "ValidationException" => { ListRemoteAccessSessionsError::Validation(error_message.to_string()) } _ => ListRemoteAccessSessionsError::Unknown(String::from(body)), } } Err(_) => ListRemoteAccessSessionsError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for ListRemoteAccessSessionsError { fn from(err: serde_json::error::Error) -> ListRemoteAccessSessionsError { ListRemoteAccessSessionsError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for ListRemoteAccessSessionsError { fn from(err: CredentialsError) -> ListRemoteAccessSessionsError { ListRemoteAccessSessionsError::Credentials(err) } } impl From<HttpDispatchError> for ListRemoteAccessSessionsError { fn from(err: HttpDispatchError) -> ListRemoteAccessSessionsError { ListRemoteAccessSessionsError::HttpDispatch(err) } } impl From<io::Error> for ListRemoteAccessSessionsError { fn from(err: io::Error) -> ListRemoteAccessSessionsError { ListRemoteAccessSessionsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListRemoteAccessSessionsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListRemoteAccessSessionsError { fn description(&self) -> &str { match *self { ListRemoteAccessSessionsError::Argument(ref cause) => cause, ListRemoteAccessSessionsError::LimitExceeded(ref cause) => cause, ListRemoteAccessSessionsError::NotFound(ref cause) => cause, ListRemoteAccessSessionsError::ServiceAccount(ref cause) => cause, ListRemoteAccessSessionsError::Validation(ref cause) => cause, ListRemoteAccessSessionsError::Credentials(ref err) => err.description(), ListRemoteAccessSessionsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ListRemoteAccessSessionsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListRuns #[derive(Debug, PartialEq)] pub enum ListRunsError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListRunsError { pub fn from_body(body: &str) -> ListRunsError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => ListRunsError::Argument(String::from(error_message)), "LimitExceededException" => { ListRunsError::LimitExceeded(String::from(error_message)) } "NotFoundException" => ListRunsError::NotFound(String::from(error_message)), "ServiceAccountException" => { ListRunsError::ServiceAccount(String::from(error_message)) } "ValidationException" => ListRunsError::Validation(error_message.to_string()), _ => ListRunsError::Unknown(String::from(body)), } } Err(_) => ListRunsError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for ListRunsError { fn from(err: serde_json::error::Error) -> ListRunsError { ListRunsError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for ListRunsError { fn from(err: CredentialsError) -> ListRunsError { ListRunsError::Credentials(err) } } impl From<HttpDispatchError> for ListRunsError { fn from(err: HttpDispatchError) -> ListRunsError { ListRunsError::HttpDispatch(err) } } impl From<io::Error> for ListRunsError { fn from(err: io::Error) -> ListRunsError { ListRunsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListRunsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListRunsError { fn description(&self) -> &str { match *self { ListRunsError::Argument(ref cause) => cause, ListRunsError::LimitExceeded(ref cause) => cause, ListRunsError::NotFound(ref cause) => cause, ListRunsError::ServiceAccount(ref cause) => cause, ListRunsError::Validation(ref cause) => cause, ListRunsError::Credentials(ref err) => err.description(), ListRunsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListRunsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListSamples #[derive(Debug, PartialEq)] pub enum ListSamplesError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListSamplesError { pub fn from_body(body: &str) -> ListSamplesError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => ListSamplesError::Argument(String::from(error_message)), "LimitExceededException" => { ListSamplesError::LimitExceeded(String::from(error_message)) } "NotFoundException" => ListSamplesError::NotFound(String::from(error_message)), "ServiceAccountException" => { ListSamplesError::ServiceAccount(String::from(error_message)) } "ValidationException" => { ListSamplesError::Validation(error_message.to_string()) } _ => ListSamplesError::Unknown(String::from(body)), } } Err(_) => ListSamplesError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for ListSamplesError { fn from(err: serde_json::error::Error) -> ListSamplesError { ListSamplesError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for ListSamplesError { fn from(err: CredentialsError) -> ListSamplesError { ListSamplesError::Credentials(err) } } impl From<HttpDispatchError> for ListSamplesError { fn from(err: HttpDispatchError) -> ListSamplesError { ListSamplesError::HttpDispatch(err) } } impl From<io::Error> for ListSamplesError { fn from(err: io::Error) -> ListSamplesError { ListSamplesError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListSamplesError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListSamplesError { fn description(&self) -> &str { match *self { ListSamplesError::Argument(ref cause) => cause, ListSamplesError::LimitExceeded(ref cause) => cause, ListSamplesError::NotFound(ref cause) => cause, ListSamplesError::ServiceAccount(ref cause) => cause, ListSamplesError::Validation(ref cause) => cause, ListSamplesError::Credentials(ref err) => err.description(), ListSamplesError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListSamplesError::Unknown(ref cause) => cause, } } } /// Errors returned by ListSuites #[derive(Debug, PartialEq)] pub enum ListSuitesError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListSuitesError { pub fn from_body(body: &str) -> ListSuitesError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => ListSuitesError::Argument(String::from(error_message)), "LimitExceededException" => { ListSuitesError::LimitExceeded(String::from(error_message)) } "NotFoundException" => ListSuitesError::NotFound(String::from(error_message)), "ServiceAccountException" => { ListSuitesError::ServiceAccount(String::from(error_message)) } "ValidationException" => ListSuitesError::Validation(error_message.to_string()), _ => ListSuitesError::Unknown(String::from(body)), } } Err(_) => ListSuitesError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for ListSuitesError { fn from(err: serde_json::error::Error) -> ListSuitesError { ListSuitesError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for ListSuitesError { fn from(err: CredentialsError) -> ListSuitesError { ListSuitesError::Credentials(err) } } impl From<HttpDispatchError> for ListSuitesError { fn from(err: HttpDispatchError) -> ListSuitesError { ListSuitesError::HttpDispatch(err) } } impl From<io::Error> for ListSuitesError { fn from(err: io::Error) -> ListSuitesError { ListSuitesError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListSuitesError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListSuitesError { fn description(&self) -> &str { match *self { ListSuitesError::Argument(ref cause) => cause, ListSuitesError::LimitExceeded(ref cause) => cause, ListSuitesError::NotFound(ref cause) => cause, ListSuitesError::ServiceAccount(ref cause) => cause, ListSuitesError::Validation(ref cause) => cause, ListSuitesError::Credentials(ref err) => err.description(), ListSuitesError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListSuitesError::Unknown(ref cause) => cause, } } } /// Errors returned by ListTests #[derive(Debug, PartialEq)] pub enum ListTestsError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListTestsError { pub fn from_body(body: &str) -> ListTestsError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => ListTestsError::Argument(String::from(error_message)), "LimitExceededException" => { ListTestsError::LimitExceeded(String::from(error_message)) } "NotFoundException" => ListTestsError::NotFound(String::from(error_message)), "ServiceAccountException" => { ListTestsError::ServiceAccount(String::from(error_message)) } "ValidationException" => ListTestsError::Validation(error_message.to_string()), _ => ListTestsError::Unknown(String::from(body)), } } Err(_) => ListTestsError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for ListTestsError { fn from(err: serde_json::error::Error) -> ListTestsError { ListTestsError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for ListTestsError { fn from(err: CredentialsError) -> ListTestsError { ListTestsError::Credentials(err) } } impl From<HttpDispatchError> for ListTestsError { fn from(err: HttpDispatchError) -> ListTestsError { ListTestsError::HttpDispatch(err) } } impl From<io::Error> for ListTestsError { fn from(err: io::Error) -> ListTestsError { ListTestsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListTestsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListTestsError { fn description(&self) -> &str { match *self { ListTestsError::Argument(ref cause) => cause, ListTestsError::LimitExceeded(ref cause) => cause, ListTestsError::NotFound(ref cause) => cause, ListTestsError::ServiceAccount(ref cause) => cause, ListTestsError::Validation(ref cause) => cause, ListTestsError::Credentials(ref err) => err.description(), ListTestsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListTestsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListUniqueProblems #[derive(Debug, PartialEq)] pub enum ListUniqueProblemsError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListUniqueProblemsError { pub fn from_body(body: &str) -> ListUniqueProblemsError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { ListUniqueProblemsError::Argument(String::from(error_message)) } "LimitExceededException" => { ListUniqueProblemsError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { ListUniqueProblemsError::NotFound(String::from(error_message)) } "ServiceAccountException" => { ListUniqueProblemsError::ServiceAccount(String::from(error_message)) } "ValidationException" => { ListUniqueProblemsError::Validation(error_message.to_string()) } _ => ListUniqueProblemsError::Unknown(String::from(body)), } } Err(_) => ListUniqueProblemsError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for ListUniqueProblemsError { fn from(err: serde_json::error::Error) -> ListUniqueProblemsError { ListUniqueProblemsError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for ListUniqueProblemsError { fn from(err: CredentialsError) -> ListUniqueProblemsError { ListUniqueProblemsError::Credentials(err) } } impl From<HttpDispatchError> for ListUniqueProblemsError { fn from(err: HttpDispatchError) -> ListUniqueProblemsError { ListUniqueProblemsError::HttpDispatch(err) } } impl From<io::Error> for ListUniqueProblemsError { fn from(err: io::Error) -> ListUniqueProblemsError { ListUniqueProblemsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListUniqueProblemsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListUniqueProblemsError { fn description(&self) -> &str { match *self { ListUniqueProblemsError::Argument(ref cause) => cause, ListUniqueProblemsError::LimitExceeded(ref cause) => cause, ListUniqueProblemsError::NotFound(ref cause) => cause, ListUniqueProblemsError::ServiceAccount(ref cause) => cause, ListUniqueProblemsError::Validation(ref cause) => cause, ListUniqueProblemsError::Credentials(ref err) => err.description(), ListUniqueProblemsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ListUniqueProblemsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListUploads #[derive(Debug, PartialEq)] pub enum ListUploadsError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListUploadsError { pub fn from_body(body: &str) -> ListUploadsError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => ListUploadsError::Argument(String::from(error_message)), "LimitExceededException" => { ListUploadsError::LimitExceeded(String::from(error_message)) } "NotFoundException" => ListUploadsError::NotFound(String::from(error_message)), "ServiceAccountException" => { ListUploadsError::ServiceAccount(String::from(error_message)) } "ValidationException" => { ListUploadsError::Validation(error_message.to_string()) } _ => ListUploadsError::Unknown(String::from(body)), } } Err(_) => ListUploadsError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for ListUploadsError { fn from(err: serde_json::error::Error) -> ListUploadsError { ListUploadsError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for ListUploadsError { fn from(err: CredentialsError) -> ListUploadsError { ListUploadsError::Credentials(err) } } impl From<HttpDispatchError> for ListUploadsError { fn from(err: HttpDispatchError) -> ListUploadsError { ListUploadsError::HttpDispatch(err) } } impl From<io::Error> for ListUploadsError { fn from(err: io::Error) -> ListUploadsError { ListUploadsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListUploadsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListUploadsError { fn description(&self) -> &str { match *self { ListUploadsError::Argument(ref cause) => cause, ListUploadsError::LimitExceeded(ref cause) => cause, ListUploadsError::NotFound(ref cause) => cause, ListUploadsError::ServiceAccount(ref cause) => cause, ListUploadsError::Validation(ref cause) => cause, ListUploadsError::Credentials(ref err) => err.description(), ListUploadsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListUploadsError::Unknown(ref cause) => cause, } } } /// Errors returned by PurchaseOffering #[derive(Debug, PartialEq)] pub enum PurchaseOfferingError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>Exception gets thrown when a user is not eligible to perform the specified transaction.</p> NotEligible(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PurchaseOfferingError { pub fn from_body(body: &str) -> PurchaseOfferingError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { PurchaseOfferingError::Argument(String::from(error_message)) } "LimitExceededException" => { PurchaseOfferingError::LimitExceeded(String::from(error_message)) } "NotEligibleException" => { PurchaseOfferingError::NotEligible(String::from(error_message)) } "NotFoundException" => { PurchaseOfferingError::NotFound(String::from(error_message)) } "ServiceAccountException" => { PurchaseOfferingError::ServiceAccount(String::from(error_message)) } "ValidationException" => { PurchaseOfferingError::Validation(error_message.to_string()) } _ => PurchaseOfferingError::Unknown(String::from(body)), } } Err(_) => PurchaseOfferingError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for PurchaseOfferingError { fn from(err: serde_json::error::Error) -> PurchaseOfferingError { PurchaseOfferingError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for PurchaseOfferingError { fn from(err: CredentialsError) -> PurchaseOfferingError { PurchaseOfferingError::Credentials(err) } } impl From<HttpDispatchError> for PurchaseOfferingError { fn from(err: HttpDispatchError) -> PurchaseOfferingError { PurchaseOfferingError::HttpDispatch(err) } } impl From<io::Error> for PurchaseOfferingError { fn from(err: io::Error) -> PurchaseOfferingError { PurchaseOfferingError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PurchaseOfferingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PurchaseOfferingError { fn description(&self) -> &str { match *self { PurchaseOfferingError::Argument(ref cause) => cause, PurchaseOfferingError::LimitExceeded(ref cause) => cause, PurchaseOfferingError::NotEligible(ref cause) => cause, PurchaseOfferingError::NotFound(ref cause) => cause, PurchaseOfferingError::ServiceAccount(ref cause) => cause, PurchaseOfferingError::Validation(ref cause) => cause, PurchaseOfferingError::Credentials(ref err) => err.description(), PurchaseOfferingError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), PurchaseOfferingError::Unknown(ref cause) => cause, } } } /// Errors returned by RenewOffering #[derive(Debug, PartialEq)] pub enum RenewOfferingError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>Exception gets thrown when a user is not eligible to perform the specified transaction.</p> NotEligible(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl RenewOfferingError { pub fn from_body(body: &str) -> RenewOfferingError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { RenewOfferingError::Argument(String::from(error_message)) } "LimitExceededException" => { RenewOfferingError::LimitExceeded(String::from(error_message)) } "NotEligibleException" => { RenewOfferingError::NotEligible(String::from(error_message)) } "NotFoundException" => { RenewOfferingError::NotFound(String::from(error_message)) } "ServiceAccountException" => { RenewOfferingError::ServiceAccount(String::from(error_message)) } "ValidationException" => { RenewOfferingError::Validation(error_message.to_string()) } _ => RenewOfferingError::Unknown(String::from(body)), } } Err(_) => RenewOfferingError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for RenewOfferingError { fn from(err: serde_json::error::Error) -> RenewOfferingError { RenewOfferingError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for RenewOfferingError { fn from(err: CredentialsError) -> RenewOfferingError { RenewOfferingError::Credentials(err) } } impl From<HttpDispatchError> for RenewOfferingError { fn from(err: HttpDispatchError) -> RenewOfferingError { RenewOfferingError::HttpDispatch(err) } } impl From<io::Error> for RenewOfferingError { fn from(err: io::Error) -> RenewOfferingError { RenewOfferingError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for RenewOfferingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for RenewOfferingError { fn description(&self) -> &str { match *self { RenewOfferingError::Argument(ref cause) => cause, RenewOfferingError::LimitExceeded(ref cause) => cause, RenewOfferingError::NotEligible(ref cause) => cause, RenewOfferingError::NotFound(ref cause) => cause, RenewOfferingError::ServiceAccount(ref cause) => cause, RenewOfferingError::Validation(ref cause) => cause, RenewOfferingError::Credentials(ref err) => err.description(), RenewOfferingError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), RenewOfferingError::Unknown(ref cause) => cause, } } } /// Errors returned by ScheduleRun #[derive(Debug, PartialEq)] pub enum ScheduleRunError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>An entity with the same name already exists.</p> Idempotency(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ScheduleRunError { pub fn from_body(body: &str) -> ScheduleRunError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => ScheduleRunError::Argument(String::from(error_message)), "IdempotencyException" => { ScheduleRunError::Idempotency(String::from(error_message)) } "LimitExceededException" => { ScheduleRunError::LimitExceeded(String::from(error_message)) } "NotFoundException" => ScheduleRunError::NotFound(String::from(error_message)), "ServiceAccountException" => { ScheduleRunError::ServiceAccount(String::from(error_message)) } "ValidationException" => { ScheduleRunError::Validation(error_message.to_string()) } _ => ScheduleRunError::Unknown(String::from(body)), } } Err(_) => ScheduleRunError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for ScheduleRunError { fn from(err: serde_json::error::Error) -> ScheduleRunError { ScheduleRunError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for ScheduleRunError { fn from(err: CredentialsError) -> ScheduleRunError { ScheduleRunError::Credentials(err) } } impl From<HttpDispatchError> for ScheduleRunError { fn from(err: HttpDispatchError) -> ScheduleRunError { ScheduleRunError::HttpDispatch(err) } } impl From<io::Error> for ScheduleRunError { fn from(err: io::Error) -> ScheduleRunError { ScheduleRunError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ScheduleRunError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ScheduleRunError { fn description(&self) -> &str { match *self { ScheduleRunError::Argument(ref cause) => cause, ScheduleRunError::Idempotency(ref cause) => cause, ScheduleRunError::LimitExceeded(ref cause) => cause, ScheduleRunError::NotFound(ref cause) => cause, ScheduleRunError::ServiceAccount(ref cause) => cause, ScheduleRunError::Validation(ref cause) => cause, ScheduleRunError::Credentials(ref err) => err.description(), ScheduleRunError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ScheduleRunError::Unknown(ref cause) => cause, } } } /// Errors returned by StopRemoteAccessSession #[derive(Debug, PartialEq)] pub enum StopRemoteAccessSessionError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl StopRemoteAccessSessionError { pub fn from_body(body: &str) -> StopRemoteAccessSessionError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { StopRemoteAccessSessionError::Argument(String::from(error_message)) } "LimitExceededException" => { StopRemoteAccessSessionError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { StopRemoteAccessSessionError::NotFound(String::from(error_message)) } "ServiceAccountException" => { StopRemoteAccessSessionError::ServiceAccount(String::from(error_message)) } "ValidationException" => { StopRemoteAccessSessionError::Validation(error_message.to_string()) } _ => StopRemoteAccessSessionError::Unknown(String::from(body)), } } Err(_) => StopRemoteAccessSessionError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for StopRemoteAccessSessionError { fn from(err: serde_json::error::Error) -> StopRemoteAccessSessionError { StopRemoteAccessSessionError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for StopRemoteAccessSessionError { fn from(err: CredentialsError) -> StopRemoteAccessSessionError { StopRemoteAccessSessionError::Credentials(err) } } impl From<HttpDispatchError> for StopRemoteAccessSessionError { fn from(err: HttpDispatchError) -> StopRemoteAccessSessionError { StopRemoteAccessSessionError::HttpDispatch(err) } } impl From<io::Error> for StopRemoteAccessSessionError { fn from(err: io::Error) -> StopRemoteAccessSessionError { StopRemoteAccessSessionError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for StopRemoteAccessSessionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for StopRemoteAccessSessionError { fn description(&self) -> &str { match *self { StopRemoteAccessSessionError::Argument(ref cause) => cause, StopRemoteAccessSessionError::LimitExceeded(ref cause) => cause, StopRemoteAccessSessionError::NotFound(ref cause) => cause, StopRemoteAccessSessionError::ServiceAccount(ref cause) => cause, StopRemoteAccessSessionError::Validation(ref cause) => cause, StopRemoteAccessSessionError::Credentials(ref err) => err.description(), StopRemoteAccessSessionError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } StopRemoteAccessSessionError::Unknown(ref cause) => cause, } } } /// Errors returned by StopRun #[derive(Debug, PartialEq)] pub enum StopRunError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl StopRunError { pub fn from_body(body: &str) -> StopRunError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => StopRunError::Argument(String::from(error_message)), "LimitExceededException" => { StopRunError::LimitExceeded(String::from(error_message)) } "NotFoundException" => StopRunError::NotFound(String::from(error_message)), "ServiceAccountException" => { StopRunError::ServiceAccount(String::from(error_message)) } "ValidationException" => StopRunError::Validation(error_message.to_string()), _ => StopRunError::Unknown(String::from(body)), } } Err(_) => StopRunError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for StopRunError { fn from(err: serde_json::error::Error) -> StopRunError { StopRunError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for StopRunError { fn from(err: CredentialsError) -> StopRunError { StopRunError::Credentials(err) } } impl From<HttpDispatchError> for StopRunError { fn from(err: HttpDispatchError) -> StopRunError { StopRunError::HttpDispatch(err) } } impl From<io::Error> for StopRunError { fn from(err: io::Error) -> StopRunError { StopRunError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for StopRunError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for StopRunError { fn description(&self) -> &str { match *self { StopRunError::Argument(ref cause) => cause, StopRunError::LimitExceeded(ref cause) => cause, StopRunError::NotFound(ref cause) => cause, StopRunError::ServiceAccount(ref cause) => cause, StopRunError::Validation(ref cause) => cause, StopRunError::Credentials(ref err) => err.description(), StopRunError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), StopRunError::Unknown(ref cause) => cause, } } } /// Errors returned by UpdateDeviceInstance #[derive(Debug, PartialEq)] pub enum UpdateDeviceInstanceError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl UpdateDeviceInstanceError { pub fn from_body(body: &str) -> UpdateDeviceInstanceError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { UpdateDeviceInstanceError::Argument(String::from(error_message)) } "LimitExceededException" => { UpdateDeviceInstanceError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { UpdateDeviceInstanceError::NotFound(String::from(error_message)) } "ServiceAccountException" => { UpdateDeviceInstanceError::ServiceAccount(String::from(error_message)) } "ValidationException" => { UpdateDeviceInstanceError::Validation(error_message.to_string()) } _ => UpdateDeviceInstanceError::Unknown(String::from(body)), } } Err(_) => UpdateDeviceInstanceError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for UpdateDeviceInstanceError { fn from(err: serde_json::error::Error) -> UpdateDeviceInstanceError { UpdateDeviceInstanceError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for UpdateDeviceInstanceError { fn from(err: CredentialsError) -> UpdateDeviceInstanceError { UpdateDeviceInstanceError::Credentials(err) } } impl From<HttpDispatchError> for UpdateDeviceInstanceError { fn from(err: HttpDispatchError) -> UpdateDeviceInstanceError { UpdateDeviceInstanceError::HttpDispatch(err) } } impl From<io::Error> for UpdateDeviceInstanceError { fn from(err: io::Error) -> UpdateDeviceInstanceError { UpdateDeviceInstanceError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for UpdateDeviceInstanceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for UpdateDeviceInstanceError { fn description(&self) -> &str { match *self { UpdateDeviceInstanceError::Argument(ref cause) => cause, UpdateDeviceInstanceError::LimitExceeded(ref cause) => cause, UpdateDeviceInstanceError::NotFound(ref cause) => cause, UpdateDeviceInstanceError::ServiceAccount(ref cause) => cause, UpdateDeviceInstanceError::Validation(ref cause) => cause, UpdateDeviceInstanceError::Credentials(ref err) => err.description(), UpdateDeviceInstanceError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } UpdateDeviceInstanceError::Unknown(ref cause) => cause, } } } /// Errors returned by UpdateDevicePool #[derive(Debug, PartialEq)] pub enum UpdateDevicePoolError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl UpdateDevicePoolError { pub fn from_body(body: &str) -> UpdateDevicePoolError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { UpdateDevicePoolError::Argument(String::from(error_message)) } "LimitExceededException" => { UpdateDevicePoolError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { UpdateDevicePoolError::NotFound(String::from(error_message)) } "ServiceAccountException" => { UpdateDevicePoolError::ServiceAccount(String::from(error_message)) } "ValidationException" => { UpdateDevicePoolError::Validation(error_message.to_string()) } _ => UpdateDevicePoolError::Unknown(String::from(body)), } } Err(_) => UpdateDevicePoolError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for UpdateDevicePoolError { fn from(err: serde_json::error::Error) -> UpdateDevicePoolError { UpdateDevicePoolError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for UpdateDevicePoolError { fn from(err: CredentialsError) -> UpdateDevicePoolError { UpdateDevicePoolError::Credentials(err) } } impl From<HttpDispatchError> for UpdateDevicePoolError { fn from(err: HttpDispatchError) -> UpdateDevicePoolError { UpdateDevicePoolError::HttpDispatch(err) } } impl From<io::Error> for UpdateDevicePoolError { fn from(err: io::Error) -> UpdateDevicePoolError { UpdateDevicePoolError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for UpdateDevicePoolError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for UpdateDevicePoolError { fn description(&self) -> &str { match *self { UpdateDevicePoolError::Argument(ref cause) => cause, UpdateDevicePoolError::LimitExceeded(ref cause) => cause, UpdateDevicePoolError::NotFound(ref cause) => cause, UpdateDevicePoolError::ServiceAccount(ref cause) => cause, UpdateDevicePoolError::Validation(ref cause) => cause, UpdateDevicePoolError::Credentials(ref err) => err.description(), UpdateDevicePoolError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), UpdateDevicePoolError::Unknown(ref cause) => cause, } } } /// Errors returned by UpdateInstanceProfile #[derive(Debug, PartialEq)] pub enum UpdateInstanceProfileError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl UpdateInstanceProfileError { pub fn from_body(body: &str) -> UpdateInstanceProfileError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { UpdateInstanceProfileError::Argument(String::from(error_message)) } "LimitExceededException" => { UpdateInstanceProfileError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { UpdateInstanceProfileError::NotFound(String::from(error_message)) } "ServiceAccountException" => { UpdateInstanceProfileError::ServiceAccount(String::from(error_message)) } "ValidationException" => { UpdateInstanceProfileError::Validation(error_message.to_string()) } _ => UpdateInstanceProfileError::Unknown(String::from(body)), } } Err(_) => UpdateInstanceProfileError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for UpdateInstanceProfileError { fn from(err: serde_json::error::Error) -> UpdateInstanceProfileError { UpdateInstanceProfileError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for UpdateInstanceProfileError { fn from(err: CredentialsError) -> UpdateInstanceProfileError { UpdateInstanceProfileError::Credentials(err) } } impl From<HttpDispatchError> for UpdateInstanceProfileError { fn from(err: HttpDispatchError) -> UpdateInstanceProfileError { UpdateInstanceProfileError::HttpDispatch(err) } } impl From<io::Error> for UpdateInstanceProfileError { fn from(err: io::Error) -> UpdateInstanceProfileError { UpdateInstanceProfileError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for UpdateInstanceProfileError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for UpdateInstanceProfileError { fn description(&self) -> &str { match *self { UpdateInstanceProfileError::Argument(ref cause) => cause, UpdateInstanceProfileError::LimitExceeded(ref cause) => cause, UpdateInstanceProfileError::NotFound(ref cause) => cause, UpdateInstanceProfileError::ServiceAccount(ref cause) => cause, UpdateInstanceProfileError::Validation(ref cause) => cause, UpdateInstanceProfileError::Credentials(ref err) => err.description(), UpdateInstanceProfileError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } UpdateInstanceProfileError::Unknown(ref cause) => cause, } } } /// Errors returned by UpdateNetworkProfile #[derive(Debug, PartialEq)] pub enum UpdateNetworkProfileError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl UpdateNetworkProfileError { pub fn from_body(body: &str) -> UpdateNetworkProfileError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { UpdateNetworkProfileError::Argument(String::from(error_message)) } "LimitExceededException" => { UpdateNetworkProfileError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { UpdateNetworkProfileError::NotFound(String::from(error_message)) } "ServiceAccountException" => { UpdateNetworkProfileError::ServiceAccount(String::from(error_message)) } "ValidationException" => { UpdateNetworkProfileError::Validation(error_message.to_string()) } _ => UpdateNetworkProfileError::Unknown(String::from(body)), } } Err(_) => UpdateNetworkProfileError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for UpdateNetworkProfileError { fn from(err: serde_json::error::Error) -> UpdateNetworkProfileError { UpdateNetworkProfileError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for UpdateNetworkProfileError { fn from(err: CredentialsError) -> UpdateNetworkProfileError { UpdateNetworkProfileError::Credentials(err) } } impl From<HttpDispatchError> for UpdateNetworkProfileError { fn from(err: HttpDispatchError) -> UpdateNetworkProfileError { UpdateNetworkProfileError::HttpDispatch(err) } } impl From<io::Error> for UpdateNetworkProfileError { fn from(err: io::Error) -> UpdateNetworkProfileError { UpdateNetworkProfileError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for UpdateNetworkProfileError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for UpdateNetworkProfileError { fn description(&self) -> &str { match *self { UpdateNetworkProfileError::Argument(ref cause) => cause, UpdateNetworkProfileError::LimitExceeded(ref cause) => cause, UpdateNetworkProfileError::NotFound(ref cause) => cause, UpdateNetworkProfileError::ServiceAccount(ref cause) => cause, UpdateNetworkProfileError::Validation(ref cause) => cause, UpdateNetworkProfileError::Credentials(ref err) => err.description(), UpdateNetworkProfileError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } UpdateNetworkProfileError::Unknown(ref cause) => cause, } } } /// Errors returned by UpdateProject #[derive(Debug, PartialEq)] pub enum UpdateProjectError { /// <p>An invalid argument was specified.</p> Argument(String), /// <p>A limit was exceeded.</p> LimitExceeded(String), /// <p>The specified entity was not found.</p> NotFound(String), /// <p>There was a problem with the service account.</p> ServiceAccount(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl UpdateProjectError { pub fn from_body(body: &str) -> UpdateProjectError { match from_str::<SerdeJsonValue>(body) { Ok(json) => { let raw_error_type = json .get("__type") .and_then(|e| e.as_str()) .unwrap_or("Unknown"); let error_message = json.get("message").and_then(|m| m.as_str()).unwrap_or(body); let pieces: Vec<&str> = raw_error_type.split("#").collect(); let error_type = pieces.last().expect("Expected error type"); match *error_type { "ArgumentException" => { UpdateProjectError::Argument(String::from(error_message)) } "LimitExceededException" => { UpdateProjectError::LimitExceeded(String::from(error_message)) } "NotFoundException" => { UpdateProjectError::NotFound(String::from(error_message)) } "ServiceAccountException" => { UpdateProjectError::ServiceAccount(String::from(error_message)) } "ValidationException" => { UpdateProjectError::Validation(error_message.to_string()) } _ => UpdateProjectError::Unknown(String::from(body)), } } Err(_) => UpdateProjectError::Unknown(String::from(body)), } } } impl From<serde_json::error::Error> for UpdateProjectError { fn from(err: serde_json::error::Error) -> UpdateProjectError { UpdateProjectError::Unknown(err.description().to_string()) } } impl From<CredentialsError> for UpdateProjectError { fn from(err: CredentialsError) -> UpdateProjectError { UpdateProjectError::Credentials(err) } } impl From<HttpDispatchError> for UpdateProjectError { fn from(err: HttpDispatchError) -> UpdateProjectError { UpdateProjectError::HttpDispatch(err) } } impl From<io::Error> for UpdateProjectError { fn from(err: io::Error) -> UpdateProjectError { UpdateProjectError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for UpdateProjectError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for UpdateProjectError { fn description(&self) -> &str { match *self { UpdateProjectError::Argument(ref cause) => cause, UpdateProjectError::LimitExceeded(ref cause) => cause, UpdateProjectError::NotFound(ref cause) => cause, UpdateProjectError::ServiceAccount(ref cause) => cause, UpdateProjectError::Validation(ref cause) => cause, UpdateProjectError::Credentials(ref err) => err.description(), UpdateProjectError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), UpdateProjectError::Unknown(ref cause) => cause, } } } /// Trait representing the capabilities of the AWS Device Farm API. AWS Device Farm clients implement this trait. pub trait DeviceFarm { /// <p>Creates a device pool.</p> fn create_device_pool( &self, input: CreateDevicePoolRequest, ) -> RusotoFuture<CreateDevicePoolResult, CreateDevicePoolError>; /// <p>Creates a profile that can be applied to one or more private fleet device instances.</p> fn create_instance_profile( &self, input: CreateInstanceProfileRequest, ) -> RusotoFuture<CreateInstanceProfileResult, CreateInstanceProfileError>; /// <p>Creates a network profile.</p> fn create_network_profile( &self, input: CreateNetworkProfileRequest, ) -> RusotoFuture<CreateNetworkProfileResult, CreateNetworkProfileError>; /// <p>Creates a new project.</p> fn create_project( &self, input: CreateProjectRequest, ) -> RusotoFuture<CreateProjectResult, CreateProjectError>; /// <p>Specifies and starts a remote access session.</p> fn create_remote_access_session( &self, input: CreateRemoteAccessSessionRequest, ) -> RusotoFuture<CreateRemoteAccessSessionResult, CreateRemoteAccessSessionError>; /// <p>Uploads an app or test scripts.</p> fn create_upload( &self, input: CreateUploadRequest, ) -> RusotoFuture<CreateUploadResult, CreateUploadError>; /// <p>Deletes a device pool given the pool ARN. Does not allow deletion of curated pools owned by the system.</p> fn delete_device_pool( &self, input: DeleteDevicePoolRequest, ) -> RusotoFuture<DeleteDevicePoolResult, DeleteDevicePoolError>; /// <p>Deletes a profile that can be applied to one or more private device instances.</p> fn delete_instance_profile( &self, input: DeleteInstanceProfileRequest, ) -> RusotoFuture<DeleteInstanceProfileResult, DeleteInstanceProfileError>; /// <p>Deletes a network profile.</p> fn delete_network_profile( &self, input: DeleteNetworkProfileRequest, ) -> RusotoFuture<DeleteNetworkProfileResult, DeleteNetworkProfileError>; /// <p>Deletes an AWS Device Farm project, given the project ARN.</p> <p> <b>Note</b> Deleting this resource does not stop an in-progress run.</p> fn delete_project( &self, input: DeleteProjectRequest, ) -> RusotoFuture<DeleteProjectResult, DeleteProjectError>; /// <p>Deletes a completed remote access session and its results.</p> fn delete_remote_access_session( &self, input: DeleteRemoteAccessSessionRequest, ) -> RusotoFuture<DeleteRemoteAccessSessionResult, DeleteRemoteAccessSessionError>; /// <p>Deletes the run, given the run ARN.</p> <p> <b>Note</b> Deleting this resource does not stop an in-progress run.</p> fn delete_run(&self, input: DeleteRunRequest) -> RusotoFuture<DeleteRunResult, DeleteRunError>; /// <p>Deletes an upload given the upload ARN.</p> fn delete_upload( &self, input: DeleteUploadRequest, ) -> RusotoFuture<DeleteUploadResult, DeleteUploadError>; /// <p>Returns the number of unmetered iOS and/or unmetered Android devices that have been purchased by the account.</p> fn get_account_settings( &self, ) -> RusotoFuture<GetAccountSettingsResult, GetAccountSettingsError>; /// <p>Gets information about a unique device type.</p> fn get_device(&self, input: GetDeviceRequest) -> RusotoFuture<GetDeviceResult, GetDeviceError>; /// <p>Returns information about a device instance belonging to a private device fleet.</p> fn get_device_instance( &self, input: GetDeviceInstanceRequest, ) -> RusotoFuture<GetDeviceInstanceResult, GetDeviceInstanceError>; /// <p>Gets information about a device pool.</p> fn get_device_pool( &self, input: GetDevicePoolRequest, ) -> RusotoFuture<GetDevicePoolResult, GetDevicePoolError>; /// <p>Gets information about compatibility with a device pool.</p> fn get_device_pool_compatibility( &self, input: GetDevicePoolCompatibilityRequest, ) -> RusotoFuture<GetDevicePoolCompatibilityResult, GetDevicePoolCompatibilityError>; /// <p>Returns information about the specified instance profile.</p> fn get_instance_profile( &self, input: GetInstanceProfileRequest, ) -> RusotoFuture<GetInstanceProfileResult, GetInstanceProfileError>; /// <p>Gets information about a job.</p> fn get_job(&self, input: GetJobRequest) -> RusotoFuture<GetJobResult, GetJobError>; /// <p>Returns information about a network profile.</p> fn get_network_profile( &self, input: GetNetworkProfileRequest, ) -> RusotoFuture<GetNetworkProfileResult, GetNetworkProfileError>; /// <p>Gets the current status and future status of all offerings purchased by an AWS account. The response indicates how many offerings are currently available and the offerings that will be available in the next period. The API returns a <code>NotEligible</code> error if the user is not permitted to invoke the operation. Please contact <a href="mailto:[email protected]">[email protected]</a> if you believe that you should be able to invoke this operation.</p> fn get_offering_status( &self, input: GetOfferingStatusRequest, ) -> RusotoFuture<GetOfferingStatusResult, GetOfferingStatusError>; /// <p>Gets information about a project.</p> fn get_project( &self, input: GetProjectRequest, ) -> RusotoFuture<GetProjectResult, GetProjectError>; /// <p>Returns a link to a currently running remote access session.</p> fn get_remote_access_session( &self, input: GetRemoteAccessSessionRequest, ) -> RusotoFuture<GetRemoteAccessSessionResult, GetRemoteAccessSessionError>; /// <p>Gets information about a run.</p> fn get_run(&self, input: GetRunRequest) -> RusotoFuture<GetRunResult, GetRunError>; /// <p>Gets information about a suite.</p> fn get_suite(&self, input: GetSuiteRequest) -> RusotoFuture<GetSuiteResult, GetSuiteError>; /// <p>Gets information about a test.</p> fn get_test(&self, input: GetTestRequest) -> RusotoFuture<GetTestResult, GetTestError>; /// <p>Gets information about an upload.</p> fn get_upload(&self, input: GetUploadRequest) -> RusotoFuture<GetUploadResult, GetUploadError>; /// <p>Installs an application to the device in a remote access session. For Android applications, the file must be in .apk format. For iOS applications, the file must be in .ipa format.</p> fn install_to_remote_access_session( &self, input: InstallToRemoteAccessSessionRequest, ) -> RusotoFuture<InstallToRemoteAccessSessionResult, InstallToRemoteAccessSessionError>; /// <p>Gets information about artifacts.</p> fn list_artifacts( &self, input: ListArtifactsRequest, ) -> RusotoFuture<ListArtifactsResult, ListArtifactsError>; /// <p>Returns information about the private device instances associated with one or more AWS accounts.</p> fn list_device_instances( &self, input: ListDeviceInstancesRequest, ) -> RusotoFuture<ListDeviceInstancesResult, ListDeviceInstancesError>; /// <p>Gets information about device pools.</p> fn list_device_pools( &self, input: ListDevicePoolsRequest, ) -> RusotoFuture<ListDevicePoolsResult, ListDevicePoolsError>; /// <p>Gets information about unique device types.</p> fn list_devices( &self, input: ListDevicesRequest, ) -> RusotoFuture<ListDevicesResult, ListDevicesError>; /// <p>Returns information about all the instance profiles in an AWS account.</p> fn list_instance_profiles( &self, input: ListInstanceProfilesRequest, ) -> RusotoFuture<ListInstanceProfilesResult, ListInstanceProfilesError>; /// <p>Gets information about jobs for a given test run.</p> fn list_jobs(&self, input: ListJobsRequest) -> RusotoFuture<ListJobsResult, ListJobsError>; /// <p>Returns the list of available network profiles.</p> fn list_network_profiles( &self, input: ListNetworkProfilesRequest, ) -> RusotoFuture<ListNetworkProfilesResult, ListNetworkProfilesError>; /// <p>Returns a list of offering promotions. Each offering promotion record contains the ID and description of the promotion. The API returns a <code>NotEligible</code> error if the caller is not permitted to invoke the operation. Contact <a href="mailto:[email protected]">[email protected]</a> if you believe that you should be able to invoke this operation.</p> fn list_offering_promotions( &self, input: ListOfferingPromotionsRequest, ) -> RusotoFuture<ListOfferingPromotionsResult, ListOfferingPromotionsError>; /// <p>Returns a list of all historical purchases, renewals, and system renewal transactions for an AWS account. The list is paginated and ordered by a descending timestamp (most recent transactions are first). The API returns a <code>NotEligible</code> error if the user is not permitted to invoke the operation. Please contact <a href="mailto:[email protected]">[email protected]</a> if you believe that you should be able to invoke this operation.</p> fn list_offering_transactions( &self, input: ListOfferingTransactionsRequest, ) -> RusotoFuture<ListOfferingTransactionsResult, ListOfferingTransactionsError>; /// <p>Returns a list of products or offerings that the user can manage through the API. Each offering record indicates the recurring price per unit and the frequency for that offering. The API returns a <code>NotEligible</code> error if the user is not permitted to invoke the operation. Please contact <a href="mailto:[email protected]">[email protected]</a> if you believe that you should be able to invoke this operation.</p> fn list_offerings( &self, input: ListOfferingsRequest, ) -> RusotoFuture<ListOfferingsResult, ListOfferingsError>; /// <p>Gets information about projects.</p> fn list_projects( &self, input: ListProjectsRequest, ) -> RusotoFuture<ListProjectsResult, ListProjectsError>; /// <p>Returns a list of all currently running remote access sessions.</p> fn list_remote_access_sessions( &self, input: ListRemoteAccessSessionsRequest, ) -> RusotoFuture<ListRemoteAccessSessionsResult, ListRemoteAccessSessionsError>; /// <p>Gets information about runs, given an AWS Device Farm project ARN.</p> fn list_runs(&self, input: ListRunsRequest) -> RusotoFuture<ListRunsResult, ListRunsError>; /// <p>Gets information about samples, given an AWS Device Farm project ARN</p> fn list_samples( &self, input: ListSamplesRequest, ) -> RusotoFuture<ListSamplesResult, ListSamplesError>; /// <p>Gets information about test suites for a given job.</p> fn list_suites( &self, input: ListSuitesRequest, ) -> RusotoFuture<ListSuitesResult, ListSuitesError>; /// <p>Gets information about tests in a given test suite.</p> fn list_tests(&self, input: ListTestsRequest) -> RusotoFuture<ListTestsResult, ListTestsError>; /// <p>Gets information about unique problems.</p> fn list_unique_problems( &self, input: ListUniqueProblemsRequest, ) -> RusotoFuture<ListUniqueProblemsResult, ListUniqueProblemsError>; /// <p>Gets information about uploads, given an AWS Device Farm project ARN.</p> fn list_uploads( &self, input: ListUploadsRequest, ) -> RusotoFuture<ListUploadsResult, ListUploadsError>; /// <p>Immediately purchases offerings for an AWS account. Offerings renew with the latest total purchased quantity for an offering, unless the renewal was overridden. The API returns a <code>NotEligible</code> error if the user is not permitted to invoke the operation. Please contact <a href="mailto:[email protected]">[email protected]</a> if you believe that you should be able to invoke this operation.</p> fn purchase_offering( &self, input: PurchaseOfferingRequest, ) -> RusotoFuture<PurchaseOfferingResult, PurchaseOfferingError>; /// <p>Explicitly sets the quantity of devices to renew for an offering, starting from the <code>effectiveDate</code> of the next period. The API returns a <code>NotEligible</code> error if the user is not permitted to invoke the operation. Please contact <a href="mailto:[email protected]">[email protected]</a> if you believe that you should be able to invoke this operation.</p> fn renew_offering( &self, input: RenewOfferingRequest, ) -> RusotoFuture<RenewOfferingResult, RenewOfferingError>; /// <p>Schedules a run.</p> fn schedule_run( &self, input: ScheduleRunRequest, ) -> RusotoFuture<ScheduleRunResult, ScheduleRunError>; /// <p>Ends a specified remote access session.</p> fn stop_remote_access_session( &self, input: StopRemoteAccessSessionRequest, ) -> RusotoFuture<StopRemoteAccessSessionResult, StopRemoteAccessSessionError>; /// <p>Initiates a stop request for the current test run. AWS Device Farm will immediately stop the run on devices where tests have not started executing, and you will not be billed for these devices. On devices where tests have started executing, Setup Suite and Teardown Suite tests will run to completion before stopping execution on those devices. You will be billed for Setup, Teardown, and any tests that were in progress or already completed.</p> fn stop_run(&self, input: StopRunRequest) -> RusotoFuture<StopRunResult, StopRunError>; /// <p>Updates information about an existing private device instance.</p> fn update_device_instance( &self, input: UpdateDeviceInstanceRequest, ) -> RusotoFuture<UpdateDeviceInstanceResult, UpdateDeviceInstanceError>; /// <p>Modifies the name, description, and rules in a device pool given the attributes and the pool ARN. Rule updates are all-or-nothing, meaning they can only be updated as a whole (or not at all).</p> fn update_device_pool( &self, input: UpdateDevicePoolRequest, ) -> RusotoFuture<UpdateDevicePoolResult, UpdateDevicePoolError>; /// <p>Updates information about an existing private device instance profile.</p> fn update_instance_profile( &self, input: UpdateInstanceProfileRequest, ) -> RusotoFuture<UpdateInstanceProfileResult, UpdateInstanceProfileError>; /// <p>Updates the network profile with specific settings.</p> fn update_network_profile( &self, input: UpdateNetworkProfileRequest, ) -> RusotoFuture<UpdateNetworkProfileResult, UpdateNetworkProfileError>; /// <p>Modifies the specified project name, given the project ARN and a new name.</p> fn update_project( &self, input: UpdateProjectRequest, ) -> RusotoFuture<UpdateProjectResult, UpdateProjectError>; } /// A client for the AWS Device Farm API. pub struct DeviceFarmClient<P = CredentialsProvider, D = RequestDispatcher> where P: ProvideAwsCredentials, D: DispatchSignedRequest, { inner: ClientInner<P, D>, region: region::Region, } impl DeviceFarmClient { /// Creates a simple client backed by an implicit event loop. /// /// The client will use the default credentials provider and tls client. /// /// See the `rusoto_core::reactor` module for more details. pub fn simple(region: region::Region) -> DeviceFarmClient { DeviceFarmClient::new( RequestDispatcher::default(), CredentialsProvider::default(), region, ) } } impl<P, D> DeviceFarmClient<P, D> where P: ProvideAwsCredentials, D: DispatchSignedRequest, { pub fn new(request_dispatcher: D, credentials_provider: P, region: region::Region) -> Self { DeviceFarmClient { inner: ClientInner::new(credentials_provider, request_dispatcher), region: region, } } } impl<P, D> DeviceFarm for DeviceFarmClient<P, D> where P: ProvideAwsCredentials + 'static, D: DispatchSignedRequest + 'static, { /// <p>Creates a device pool.</p> fn create_device_pool( &self, input: CreateDevicePoolRequest, ) -> RusotoFuture<CreateDevicePoolResult, CreateDevicePoolError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.CreateDevicePool"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<CreateDevicePoolResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(CreateDevicePoolError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Creates a profile that can be applied to one or more private fleet device instances.</p> fn create_instance_profile( &self, input: CreateInstanceProfileRequest, ) -> RusotoFuture<CreateInstanceProfileResult, CreateInstanceProfileError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.CreateInstanceProfile"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<CreateInstanceProfileResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(CreateInstanceProfileError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Creates a network profile.</p> fn create_network_profile( &self, input: CreateNetworkProfileRequest, ) -> RusotoFuture<CreateNetworkProfileResult, CreateNetworkProfileError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.CreateNetworkProfile"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<CreateNetworkProfileResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(CreateNetworkProfileError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Creates a new project.</p> fn create_project( &self, input: CreateProjectRequest, ) -> RusotoFuture<CreateProjectResult, CreateProjectError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.CreateProject"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<CreateProjectResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(CreateProjectError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Specifies and starts a remote access session.</p> fn create_remote_access_session( &self, input: CreateRemoteAccessSessionRequest, ) -> RusotoFuture<CreateRemoteAccessSessionResult, CreateRemoteAccessSessionError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header( "x-amz-target", "DeviceFarm_20150623.CreateRemoteAccessSession", ); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<CreateRemoteAccessSessionResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(CreateRemoteAccessSessionError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Uploads an app or test scripts.</p> fn create_upload( &self, input: CreateUploadRequest, ) -> RusotoFuture<CreateUploadResult, CreateUploadError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.CreateUpload"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<CreateUploadResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(CreateUploadError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Deletes a device pool given the pool ARN. Does not allow deletion of curated pools owned by the system.</p> fn delete_device_pool( &self, input: DeleteDevicePoolRequest, ) -> RusotoFuture<DeleteDevicePoolResult, DeleteDevicePoolError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.DeleteDevicePool"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<DeleteDevicePoolResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else {
}); RusotoFuture::new(future) } /// <p>Deletes a profile that can be applied to one or more private device instances.</p> fn delete_instance_profile( &self, input: DeleteInstanceProfileRequest, ) -> RusotoFuture<DeleteInstanceProfileResult, DeleteInstanceProfileError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.DeleteInstanceProfile"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<DeleteInstanceProfileResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteInstanceProfileError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Deletes a network profile.</p> fn delete_network_profile( &self, input: DeleteNetworkProfileRequest, ) -> RusotoFuture<DeleteNetworkProfileResult, DeleteNetworkProfileError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.DeleteNetworkProfile"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<DeleteNetworkProfileResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteNetworkProfileError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Deletes an AWS Device Farm project, given the project ARN.</p> <p> <b>Note</b> Deleting this resource does not stop an in-progress run.</p> fn delete_project( &self, input: DeleteProjectRequest, ) -> RusotoFuture<DeleteProjectResult, DeleteProjectError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.DeleteProject"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<DeleteProjectResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteProjectError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Deletes a completed remote access session and its results.</p> fn delete_remote_access_session( &self, input: DeleteRemoteAccessSessionRequest, ) -> RusotoFuture<DeleteRemoteAccessSessionResult, DeleteRemoteAccessSessionError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header( "x-amz-target", "DeviceFarm_20150623.DeleteRemoteAccessSession", ); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<DeleteRemoteAccessSessionResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteRemoteAccessSessionError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Deletes the run, given the run ARN.</p> <p> <b>Note</b> Deleting this resource does not stop an in-progress run.</p> fn delete_run(&self, input: DeleteRunRequest) -> RusotoFuture<DeleteRunResult, DeleteRunError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.DeleteRun"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<DeleteRunResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteRunError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Deletes an upload given the upload ARN.</p> fn delete_upload( &self, input: DeleteUploadRequest, ) -> RusotoFuture<DeleteUploadResult, DeleteUploadError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.DeleteUpload"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<DeleteUploadResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteUploadError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Returns the number of unmetered iOS and/or unmetered Android devices that have been purchased by the account.</p> fn get_account_settings( &self, ) -> RusotoFuture<GetAccountSettingsResult, GetAccountSettingsError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.GetAccountSettings"); request.set_payload(Some(b"{}".to_vec())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<GetAccountSettingsResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetAccountSettingsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about a unique device type.</p> fn get_device(&self, input: GetDeviceRequest) -> RusotoFuture<GetDeviceResult, GetDeviceError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.GetDevice"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<GetDeviceResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetDeviceError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Returns information about a device instance belonging to a private device fleet.</p> fn get_device_instance( &self, input: GetDeviceInstanceRequest, ) -> RusotoFuture<GetDeviceInstanceResult, GetDeviceInstanceError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.GetDeviceInstance"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<GetDeviceInstanceResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetDeviceInstanceError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about a device pool.</p> fn get_device_pool( &self, input: GetDevicePoolRequest, ) -> RusotoFuture<GetDevicePoolResult, GetDevicePoolError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.GetDevicePool"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<GetDevicePoolResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetDevicePoolError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about compatibility with a device pool.</p> fn get_device_pool_compatibility( &self, input: GetDevicePoolCompatibilityRequest, ) -> RusotoFuture<GetDevicePoolCompatibilityResult, GetDevicePoolCompatibilityError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header( "x-amz-target", "DeviceFarm_20150623.GetDevicePoolCompatibility", ); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<GetDevicePoolCompatibilityResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetDevicePoolCompatibilityError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Returns information about the specified instance profile.</p> fn get_instance_profile( &self, input: GetInstanceProfileRequest, ) -> RusotoFuture<GetInstanceProfileResult, GetInstanceProfileError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.GetInstanceProfile"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<GetInstanceProfileResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetInstanceProfileError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about a job.</p> fn get_job(&self, input: GetJobRequest) -> RusotoFuture<GetJobResult, GetJobError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.GetJob"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<GetJobResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetJobError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Returns information about a network profile.</p> fn get_network_profile( &self, input: GetNetworkProfileRequest, ) -> RusotoFuture<GetNetworkProfileResult, GetNetworkProfileError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.GetNetworkProfile"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<GetNetworkProfileResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetNetworkProfileError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets the current status and future status of all offerings purchased by an AWS account. The response indicates how many offerings are currently available and the offerings that will be available in the next period. The API returns a <code>NotEligible</code> error if the user is not permitted to invoke the operation. Please contact <a href="mailto:[email protected]">[email protected]</a> if you believe that you should be able to invoke this operation.</p> fn get_offering_status( &self, input: GetOfferingStatusRequest, ) -> RusotoFuture<GetOfferingStatusResult, GetOfferingStatusError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.GetOfferingStatus"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<GetOfferingStatusResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetOfferingStatusError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about a project.</p> fn get_project( &self, input: GetProjectRequest, ) -> RusotoFuture<GetProjectResult, GetProjectError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.GetProject"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<GetProjectResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetProjectError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Returns a link to a currently running remote access session.</p> fn get_remote_access_session( &self, input: GetRemoteAccessSessionRequest, ) -> RusotoFuture<GetRemoteAccessSessionResult, GetRemoteAccessSessionError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.GetRemoteAccessSession"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<GetRemoteAccessSessionResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetRemoteAccessSessionError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about a run.</p> fn get_run(&self, input: GetRunRequest) -> RusotoFuture<GetRunResult, GetRunError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.GetRun"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<GetRunResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetRunError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about a suite.</p> fn get_suite(&self, input: GetSuiteRequest) -> RusotoFuture<GetSuiteResult, GetSuiteError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.GetSuite"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<GetSuiteResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetSuiteError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about a test.</p> fn get_test(&self, input: GetTestRequest) -> RusotoFuture<GetTestResult, GetTestError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.GetTest"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<GetTestResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetTestError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about an upload.</p> fn get_upload(&self, input: GetUploadRequest) -> RusotoFuture<GetUploadResult, GetUploadError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.GetUpload"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<GetUploadResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetUploadError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Installs an application to the device in a remote access session. For Android applications, the file must be in .apk format. For iOS applications, the file must be in .ipa format.</p> fn install_to_remote_access_session( &self, input: InstallToRemoteAccessSessionRequest, ) -> RusotoFuture<InstallToRemoteAccessSessionResult, InstallToRemoteAccessSessionError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header( "x-amz-target", "DeviceFarm_20150623.InstallToRemoteAccessSession", ); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<InstallToRemoteAccessSessionResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(InstallToRemoteAccessSessionError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about artifacts.</p> fn list_artifacts( &self, input: ListArtifactsRequest, ) -> RusotoFuture<ListArtifactsResult, ListArtifactsError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.ListArtifacts"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<ListArtifactsResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListArtifactsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Returns information about the private device instances associated with one or more AWS accounts.</p> fn list_device_instances( &self, input: ListDeviceInstancesRequest, ) -> RusotoFuture<ListDeviceInstancesResult, ListDeviceInstancesError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.ListDeviceInstances"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<ListDeviceInstancesResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListDeviceInstancesError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about device pools.</p> fn list_device_pools( &self, input: ListDevicePoolsRequest, ) -> RusotoFuture<ListDevicePoolsResult, ListDevicePoolsError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.ListDevicePools"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<ListDevicePoolsResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListDevicePoolsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about unique device types.</p> fn list_devices( &self, input: ListDevicesRequest, ) -> RusotoFuture<ListDevicesResult, ListDevicesError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.ListDevices"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<ListDevicesResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListDevicesError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Returns information about all the instance profiles in an AWS account.</p> fn list_instance_profiles( &self, input: ListInstanceProfilesRequest, ) -> RusotoFuture<ListInstanceProfilesResult, ListInstanceProfilesError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.ListInstanceProfiles"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<ListInstanceProfilesResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListInstanceProfilesError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about jobs for a given test run.</p> fn list_jobs(&self, input: ListJobsRequest) -> RusotoFuture<ListJobsResult, ListJobsError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.ListJobs"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<ListJobsResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListJobsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Returns the list of available network profiles.</p> fn list_network_profiles( &self, input: ListNetworkProfilesRequest, ) -> RusotoFuture<ListNetworkProfilesResult, ListNetworkProfilesError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.ListNetworkProfiles"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<ListNetworkProfilesResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListNetworkProfilesError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Returns a list of offering promotions. Each offering promotion record contains the ID and description of the promotion. The API returns a <code>NotEligible</code> error if the caller is not permitted to invoke the operation. Contact <a href="mailto:[email protected]">[email protected]</a> if you believe that you should be able to invoke this operation.</p> fn list_offering_promotions( &self, input: ListOfferingPromotionsRequest, ) -> RusotoFuture<ListOfferingPromotionsResult, ListOfferingPromotionsError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.ListOfferingPromotions"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<ListOfferingPromotionsResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListOfferingPromotionsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Returns a list of all historical purchases, renewals, and system renewal transactions for an AWS account. The list is paginated and ordered by a descending timestamp (most recent transactions are first). The API returns a <code>NotEligible</code> error if the user is not permitted to invoke the operation. Please contact <a href="mailto:[email protected]">[email protected]</a> if you believe that you should be able to invoke this operation.</p> fn list_offering_transactions( &self, input: ListOfferingTransactionsRequest, ) -> RusotoFuture<ListOfferingTransactionsResult, ListOfferingTransactionsError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header( "x-amz-target", "DeviceFarm_20150623.ListOfferingTransactions", ); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<ListOfferingTransactionsResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListOfferingTransactionsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Returns a list of products or offerings that the user can manage through the API. Each offering record indicates the recurring price per unit and the frequency for that offering. The API returns a <code>NotEligible</code> error if the user is not permitted to invoke the operation. Please contact <a href="mailto:[email protected]">[email protected]</a> if you believe that you should be able to invoke this operation.</p> fn list_offerings( &self, input: ListOfferingsRequest, ) -> RusotoFuture<ListOfferingsResult, ListOfferingsError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.ListOfferings"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<ListOfferingsResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListOfferingsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about projects.</p> fn list_projects( &self, input: ListProjectsRequest, ) -> RusotoFuture<ListProjectsResult, ListProjectsError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.ListProjects"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<ListProjectsResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListProjectsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Returns a list of all currently running remote access sessions.</p> fn list_remote_access_sessions( &self, input: ListRemoteAccessSessionsRequest, ) -> RusotoFuture<ListRemoteAccessSessionsResult, ListRemoteAccessSessionsError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header( "x-amz-target", "DeviceFarm_20150623.ListRemoteAccessSessions", ); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<ListRemoteAccessSessionsResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListRemoteAccessSessionsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about runs, given an AWS Device Farm project ARN.</p> fn list_runs(&self, input: ListRunsRequest) -> RusotoFuture<ListRunsResult, ListRunsError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.ListRuns"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<ListRunsResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListRunsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about samples, given an AWS Device Farm project ARN</p> fn list_samples( &self, input: ListSamplesRequest, ) -> RusotoFuture<ListSamplesResult, ListSamplesError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.ListSamples"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<ListSamplesResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListSamplesError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about test suites for a given job.</p> fn list_suites( &self, input: ListSuitesRequest, ) -> RusotoFuture<ListSuitesResult, ListSuitesError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.ListSuites"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<ListSuitesResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListSuitesError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about tests in a given test suite.</p> fn list_tests(&self, input: ListTestsRequest) -> RusotoFuture<ListTestsResult, ListTestsError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.ListTests"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<ListTestsResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListTestsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about unique problems.</p> fn list_unique_problems( &self, input: ListUniqueProblemsRequest, ) -> RusotoFuture<ListUniqueProblemsResult, ListUniqueProblemsError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.ListUniqueProblems"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<ListUniqueProblemsResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListUniqueProblemsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Gets information about uploads, given an AWS Device Farm project ARN.</p> fn list_uploads( &self, input: ListUploadsRequest, ) -> RusotoFuture<ListUploadsResult, ListUploadsError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.ListUploads"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<ListUploadsResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListUploadsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Immediately purchases offerings for an AWS account. Offerings renew with the latest total purchased quantity for an offering, unless the renewal was overridden. The API returns a <code>NotEligible</code> error if the user is not permitted to invoke the operation. Please contact <a href="mailto:[email protected]">[email protected]</a> if you believe that you should be able to invoke this operation.</p> fn purchase_offering( &self, input: PurchaseOfferingRequest, ) -> RusotoFuture<PurchaseOfferingResult, PurchaseOfferingError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.PurchaseOffering"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<PurchaseOfferingResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(PurchaseOfferingError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Explicitly sets the quantity of devices to renew for an offering, starting from the <code>effectiveDate</code> of the next period. The API returns a <code>NotEligible</code> error if the user is not permitted to invoke the operation. Please contact <a href="mailto:[email protected]">[email protected]</a> if you believe that you should be able to invoke this operation.</p> fn renew_offering( &self, input: RenewOfferingRequest, ) -> RusotoFuture<RenewOfferingResult, RenewOfferingError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.RenewOffering"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<RenewOfferingResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(RenewOfferingError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Schedules a run.</p> fn schedule_run( &self, input: ScheduleRunRequest, ) -> RusotoFuture<ScheduleRunResult, ScheduleRunError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.ScheduleRun"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<ScheduleRunResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(ScheduleRunError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Ends a specified remote access session.</p> fn stop_remote_access_session( &self, input: StopRemoteAccessSessionRequest, ) -> RusotoFuture<StopRemoteAccessSessionResult, StopRemoteAccessSessionError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header( "x-amz-target", "DeviceFarm_20150623.StopRemoteAccessSession", ); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<StopRemoteAccessSessionResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(StopRemoteAccessSessionError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Initiates a stop request for the current test run. AWS Device Farm will immediately stop the run on devices where tests have not started executing, and you will not be billed for these devices. On devices where tests have started executing, Setup Suite and Teardown Suite tests will run to completion before stopping execution on those devices. You will be billed for Setup, Teardown, and any tests that were in progress or already completed.</p> fn stop_run(&self, input: StopRunRequest) -> RusotoFuture<StopRunResult, StopRunError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.StopRun"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<StopRunResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(StopRunError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Updates information about an existing private device instance.</p> fn update_device_instance( &self, input: UpdateDeviceInstanceRequest, ) -> RusotoFuture<UpdateDeviceInstanceResult, UpdateDeviceInstanceError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.UpdateDeviceInstance"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<UpdateDeviceInstanceResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(UpdateDeviceInstanceError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Modifies the name, description, and rules in a device pool given the attributes and the pool ARN. Rule updates are all-or-nothing, meaning they can only be updated as a whole (or not at all).</p> fn update_device_pool( &self, input: UpdateDevicePoolRequest, ) -> RusotoFuture<UpdateDevicePoolResult, UpdateDevicePoolError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.UpdateDevicePool"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<UpdateDevicePoolResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(UpdateDevicePoolError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Updates information about an existing private device instance profile.</p> fn update_instance_profile( &self, input: UpdateInstanceProfileRequest, ) -> RusotoFuture<UpdateInstanceProfileResult, UpdateInstanceProfileError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.UpdateInstanceProfile"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<UpdateInstanceProfileResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(UpdateInstanceProfileError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Updates the network profile with specific settings.</p> fn update_network_profile( &self, input: UpdateNetworkProfileRequest, ) -> RusotoFuture<UpdateNetworkProfileResult, UpdateNetworkProfileError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.UpdateNetworkProfile"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<UpdateNetworkProfileResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(UpdateNetworkProfileError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } /// <p>Modifies the specified project name, given the project ARN and a new name.</p> fn update_project( &self, input: UpdateProjectRequest, ) -> RusotoFuture<UpdateProjectResult, UpdateProjectError> { let mut request = SignedRequest::new("POST", "devicefarm", &self.region, "/"); request.set_content_type("application/x-amz-json-1.1".to_owned()); request.add_header("x-amz-target", "DeviceFarm_20150623.UpdateProject"); let encoded = serde_json::to_string(&input).unwrap(); request.set_payload(Some(encoded.into_bytes())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status == StatusCode::OK { future::Either::A(response.buffer().from_err().map(|response| { let mut body = response.body; if body.is_empty() || body == b"null" { body = b"{}".to_vec(); } serde_json::from_str::<UpdateProjectResult>( String::from_utf8_lossy(body.as_ref()).as_ref(), ).unwrap() })) } else { future::Either::B(response.buffer().from_err().and_then(|response| { Err(UpdateProjectError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) } }); RusotoFuture::new(future) } } #[cfg(test)] mod protocol_tests {}
future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteDevicePoolError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })) }
testutils.go
// Licensed to Elasticsearch B.V. under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Elasticsearch B.V. licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package testutils import ( "bytes" "fmt" "io" "testing" "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/output" "github.com/elastic/ecctl/pkg/ecctl" ) const ( defaultOutputFormat = "json" defaultAPIKey = "dummy" defaultRegion = "ece-region" ) // MockCfg represents a small and targeted amount of `ecctl.Config` options // aimed at making mocking convenient and easy. type MockCfg struct { Responses []mock.Response Out io.Writer Err io.Writer OutputFormat string Format string Region string Force bool Verbose bool } func
(cfg MockCfg) MockCfg { if cfg.OutputFormat == "" { cfg.OutputFormat = defaultOutputFormat } if cfg.Region == "" { cfg.Region = defaultRegion } if cfg.Err == nil { cfg.Err = new(bytes.Buffer) } if cfg.Out == nil { cfg.Out = new(bytes.Buffer) } return cfg } func newConfig(cfg MockCfg) ecctl.Config { cfg = fillDefaults(cfg) return ecctl.Config{ Client: mock.NewClient(cfg.Responses...), OutputDevice: output.NewDevice(cfg.Out), Region: cfg.Region, ErrorDevice: cfg.Err, Output: cfg.OutputFormat, Format: cfg.Format, Host: fmt.Sprintf("https://%s", api.DefaultMockHost), APIKey: defaultAPIKey, Force: cfg.Force, Verbose: cfg.Verbose, } } // mockApp initiates a mocked app from a MockCfg. func mockApp(t *testing.T, cfg MockCfg) func() { if _, err := ecctl.Instance(newConfig(cfg)); err != nil { t.Error(err) } return ecctl.Cleanup }
fillDefaults
index.tsx
/* * Copyright © 2020 Cask Data, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ import * as React from 'react'; import withStyles, { StyleRules } from '@material-ui/core/styles/withStyles'; import CodeIcon from '@material-ui/icons/Code'; import IconButton from '@material-ui/core/IconButton'; import { LiveViewMode } from 'components/PluginJSONCreator/Create/PluginJSONMenu'; import Tooltip from '@material-ui/core/Tooltip'; import classnames from 'classnames'; import { fade } from '@material-ui/core/styles/colorManipulator'; const styles = (theme): StyleRules => { return { buttonTooltip: { fontSize: '13px', backgroundColor: theme.palette.grey[50], },
'&:focus': { outline: 'none', }, }, clickedButton: { backgroundColor: fade(theme.palette.primary.main, 0.2), }, }; }; export const LiveJSONButtonView = ({ classes, liveViewMode, switchToJSONView }) => { return ( <Tooltip title="JSON View" classes={{ tooltip: classes.buttonTooltip, }} > <IconButton className={classnames(classes.liveJSONButton, { [classes.clickedButton]: liveViewMode === LiveViewMode.JSONView, })} color="primary" onClick={switchToJSONView} disableRipple disableFocusRipple data-cy="open-live-view-btn" > <CodeIcon /> </IconButton> </Tooltip> ); }; const LiveJSONButton = withStyles(styles)(LiveJSONButtonView); export default LiveJSONButton;
liveJSONButton: {
types.rs
use core::convert::{TryInto, TryFrom}; use move_core_types::identifier::Identifier; use sp_std::prelude::*; use parity_scale_codec::{Decode as DecodeT}; use parity_scale_codec_derive::{Encode, Decode}; use crate::addr::address_to_account; use move_core_types::language_storage::ModuleId as InternalModuleId; use move_core_types::language_storage::StructTag as InternalStructTag; use move_core_types::language_storage::TypeTag as InternalTypeTag; #[derive(Clone, PartialEq, Encode, Decode, Debug)] pub struct MoveModuleId<AccountId> { pub owner: AccountId, pub module: Vec<u8>, } impl<AccountId: DecodeT> TryFrom<InternalModuleId> for MoveModuleId<AccountId> { type Error = parity_scale_codec::Error; fn try_from(id: InternalModuleId) -> Result<Self, Self::Error> { Ok(Self { owner: address_to_account::<AccountId>(id.address())?,
} #[derive(Clone, PartialEq, Encode, Decode, Debug)] pub enum MoveTypeTag<AccountId: DecodeT> { Bool, U8, U64, U128, Address, Signer, Vector(Box<Self>), Struct(MoveStructTag<AccountId>), } impl<AccountId: DecodeT> TryFrom<InternalTypeTag> for MoveTypeTag<AccountId> { type Error = parity_scale_codec::Error; fn try_from(tt: InternalTypeTag) -> Result<Self, Self::Error> { Ok(match tt { InternalTypeTag::Bool => MoveTypeTag::Bool, InternalTypeTag::U8 => MoveTypeTag::U8, InternalTypeTag::U64 => MoveTypeTag::U64, InternalTypeTag::U128 => MoveTypeTag::U128, InternalTypeTag::Address => MoveTypeTag::Address, InternalTypeTag::Signer => MoveTypeTag::Signer, InternalTypeTag::Vector(tt) => MoveTypeTag::Vector(Box::new(tt.try_into()?)), InternalTypeTag::Struct(st) => MoveTypeTag::Struct(st.try_into()?), }) } } impl<AccountId: DecodeT> TryFrom<Box<InternalTypeTag>> for MoveTypeTag<AccountId> { type Error = parity_scale_codec::Error; fn try_from(tt: Box<InternalTypeTag>) -> Result<Self, Self::Error> { Ok(match *tt { InternalTypeTag::Bool => MoveTypeTag::Bool, InternalTypeTag::U8 => MoveTypeTag::U8, InternalTypeTag::U64 => MoveTypeTag::U64, InternalTypeTag::U128 => MoveTypeTag::U128, InternalTypeTag::Address => MoveTypeTag::Address, InternalTypeTag::Signer => MoveTypeTag::Signer, InternalTypeTag::Vector(tt) => MoveTypeTag::Vector(Box::new(tt.try_into()?)), InternalTypeTag::Struct(st) => MoveTypeTag::Struct(st.try_into()?), }) } } #[derive(Clone, PartialEq, Encode, Decode, Debug)] pub struct MoveStructTag<AccountId: DecodeT /* TryFrom<AccountAddress> */> { pub owner: AccountId, pub module: Vec<u8>, /* from Identifier, use Text in web-UI */ pub name: Vec<u8>, /* from Identifier, use Text in web-UI */ // TODO: fix recursion on types (MoveTypeTag in MoveTypeTag) pub ty_params: Vec<()>, } impl<AccountId: DecodeT> MoveStructTag<AccountId> { pub fn new( owner: AccountId, module: Identifier, name: Identifier, ty_params: Vec<()>, ) -> Self { Self { owner, module: module.into_string().as_bytes().to_vec(), name: name.into_string().as_bytes().to_vec(), ty_params, } } } impl<AccountId: DecodeT> TryFrom<InternalStructTag> for MoveStructTag<AccountId> { type Error = parity_scale_codec::Error; fn try_from(st: InternalStructTag) -> Result<Self, Self::Error> { let mut type_params = Vec::new(); for tp in st.type_params.into_iter() { let _tp: MoveTypeTag<AccountId> = tp.try_into()?; type_params.push(()); } Ok(Self { owner: address_to_account::<AccountId>(&st.address)?, module: st.module.into_string().as_bytes().to_vec(), name: st.name.into_string().as_bytes().to_vec(), ty_params: type_params, }) } }
module: id.name().as_bytes().to_vec(), }) }
validators.ts
import * as string from '@quenk/preconditions/lib/string'; import * as array from '@quenk/preconditions/lib/array'; import { Value } from '@quenk/noni/lib/data/jsonx'; import { Precondition, and } from '@quenk/preconditions'; import { contains } from '@quenk/noni/lib/data/array'; import { succeed, fail } from '@quenk/preconditions/lib/result'; import { supportedCurrencies } from '@board/common/lib/data/currency'; import { supportedPaymentFrequencies } from '@board/common/lib/data/payment'; import { jobStatuses } from '@board/common/lib/data/job'; /** * name must be a string and between 1-64 characters. * * Todo: ensure proper case. */ export const name: Precondition<Value, string> = and(string.isString, and(string.minLength(1), string.maxLength(64))); /** * email must be a string between 3-64 characters and contain "@". */ export const email: Precondition<Value, string> = and(string.isString, and( and(string.minLength(3), string.maxLength(64)), string.matches(/@/))); /** * password must be a string between 8-140 characters. */ export const password: Precondition<Value, string> = and(string.isString, and(string.minLength(8), string.maxLength(140))); /** * url must be a string of at least 7 characters and begin with http or https. */ export const url: Precondition<Value, string> = and(string.isString, and(and(string.minLength(7), string.maxLength(5000)), string.matches(/^(http|https):\/\//)));
/** * textsmall is 256 characters or less. */ export const textsmall: Precondition<Value, string> = and(string.isString, and(string.minLength(0), string.maxLength(256))); /** * textmedium is 5000 characters or less. */ export const textmedium: Precondition<Value, string> = and(string.isString, and(string.minLength(0), string.maxLength(5000))); /** * textlarge is 25K characters or less. */ export const textlarge: Precondition<Value, string> = and(string.isString, and(string.minLength(0), string.maxLength(25 * 1000))); /** * minLength for strings and array. */ export const minLength = (n: number): Precondition<Value, Value> => (value: Value) => Array.isArray(value) ? array.min<Value>(n)(value) : string.minLength(n)(<string>value); /** * maxLength for strings and array. */ export const maxLength = (n: number): Precondition<Value, Value> => (value: Value) => Array.isArray(value) ? array.max<Value>(n)(value) : string.maxLength(n)(<string>value); /** * currency ensures the provided string is one of the supported currency * indicators. */ export const currency: Precondition<Value, Value> = (value: Value) => contains(supportedCurrencies, value) ? succeed(value) : fail('invalid', { value }) /** * paymentFrequency is one of several period specifiers that indicate how * often a payment will be made. */ export const paymentFrequency: Precondition<Value, Value> = (value: Value) => contains(supportedPaymentFrequencies, value) ? succeed(value) : fail('invalid', { value }) /** * jobStatus must be one of the predefined job posting statuses. */ export const jobStatus: Precondition<Value, Value> = (value: Value) => contains(jobStatuses, value) ? succeed(value) : fail('invalid', { value })
drivers.rs
use alloc::{boxed::Box, sync::Arc}; use zcore_drivers::irq::x86::Apic; use zcore_drivers::scheme::{IrqScheme, SchemeUpcast}; use zcore_drivers::uart::{BufferedUart, Uart16550Pio}; use zcore_drivers::{Device, DeviceResult}; use super::trap; use crate::drivers; pub(super) fn init() -> DeviceResult
{ let uart = Arc::new(Uart16550Pio::new(0x3F8)); Apic::init_local_apic_bsp(crate::mem::phys_to_virt); let irq = Arc::new(Apic::new( super::special::pc_firmware_tables().0 as usize, crate::mem::phys_to_virt, )); irq.register_device(trap::X86_ISA_IRQ_COM1, uart.clone().upcast())?; irq.unmask(trap::X86_ISA_IRQ_COM1)?; irq.register_local_apic_handler( trap::X86_INT_APIC_TIMER, Box::new(|| crate::timer::timer_tick()), )?; drivers::add_device(Device::Irq(irq)); drivers::add_device(Device::Uart(BufferedUart::new(uart))); #[cfg(feature = "graphic")] { use zcore_drivers::display::UefiDisplay; let display = Arc::new(UefiDisplay::new(crate::KCONFIG.display_info)); crate::drivers::add_device(Device::Display(display.clone())); crate::console::init_graphic_console(display); } Ok(()) }
170512.js
/* Copyright 2017 Keith Peters Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ var context = bitlib.context(0, 0), width = context.width, height = context.height; context.clear("#111111"); context.translate(width / 2, height / 2); context.lineWidth = 0.25; context.strokeStyle = "#000000"; var r = Math.min(width, height) * 0.45, h = 500, w = h * Math.PI / 2, scale = 0.05, len = 10; var gradient = context.createLinearGradient(0, -r, 0, r); gradient.addColorStop(0, "#ffffff"); gradient.addColorStop(1, "#111111"); context.fillStyle = gradient; context.fillCircle(0, 0, r); panel .addRange("perlin scale", 0.001, 0.1, scale, 0.001, function(value) { context.fillCircle(0, 0, r); scale = value; }) .addRange("line width", 0.1, 2, 0.25, 0.01, function(value) { context.lineWidth = value; }) .addRange("line length", 5, 50, len, 1, function(value) { len = value; }) .addButton("Clear", function() { context.fillCircle(0, 0, r); }) bitlib.anim(update).start(); function update() { for(var i = 0; i < 100; i++) { draw(); } } function draw() { var x = bitlib.random.float(-w / 2, w / 2), y = bitlib.random.float(-h / 2, h / 2), n = noise.perlin2(x * scale, y * scale),
var p1 = project(x, y); context.strokeStyle = bitlib.color.gray(bitlib.random.float(128)); context.beginPath(); context.moveTo(p0.x, p0.y); context.lineTo(p1.x, p1.y); context.stroke(); } function project(x, y) { var angle = bitlib.math.map(x, -w / 2, w / 2, 0, Math.PI), angle2 = bitlib.math.map(y, - h / 2, h / 2, -Math.PI / 2, Math.PI / 2), xx = Math.cos(angle) * r * Math.cos(angle2), yy = Math.sin(angle2) * r; return { x: xx, y: yy }; }
a = bitlib.math.map(n, -1, 1, 0, Math.PI * 2), p0 = project(x, y); x += Math.cos(a) * len; y += Math.sin(a) * len;
indexers.rs
pub trait Indexers: IntoIterator { fn index_of(self, item: Self::Item) -> Option<usize> where Self: Sized, Self::IntoIter: Iterator<Item=Self::Item>, Self::Item: PartialEq { self.into_iter().position(|x| match x == item { true => true, _ => false }) } } impl<I> Indexers for I where I: IntoIterator {} pub fn index_of<I>(vec: I, item: I::Item) -> Option<usize> where I: IntoIterator, I: Sized, I::IntoIter: Iterator<Item=I::Item>, I::Item: PartialEq { vec.index_of(item) } #[cfg(test)] mod tests { use crate::vector::indexers::{index_of, Indexers}; use crate::vector::Mappers; fn to_isize(val: Option<usize>) -> isize { match val { Some(n) => n as isize, None => -1, } } #[test] fn
() { let years = vec!["2004", "1984", "1964"].mapper(|x| x.to_string()); let year_label = "1984"; let year_index = years.iter().index_of(&year_label.to_string()); println!("years.index_of( {} ) = {}", year_label, to_isize(year_index)); let year_label = "2024"; let year_index = index_of(years.iter(), &year_label.to_string()); println!("years.index_of( {} ) = {}", year_label, to_isize(year_index)); let words = vec!["foo", "bar", "zen"].mapper(|x| x.to_string()); let word_label = "voo"; let word_index = words.iter().index_of(&word_label.to_string()); println!("years.index_of( {} ) = {}", word_label, to_isize(word_index)); } }
test_crostab_simplified
task_dashboard.py
# -*- coding: utf-8 -*- from anima.ui.lib import QtCore, QtWidgets class TaskDashboardWidget(QtWidgets.QWidget): """A widget that displays task related information """ def __init__(self, task=None, parent=None, **kwargs):
def _setup_ui(self): """create the UI widgets """ # we need a main layout # may be a vertical one # or a form layout self.vertical_layout = QtWidgets.QVBoxLayout(self) # ------------------------- # Dialog Label and buttons horizontal_layout3 = QtWidgets.QHBoxLayout() self.vertical_layout.addLayout(horizontal_layout3) self.widget_label = QtWidgets.QLabel(self) self.widget_label.setStyleSheet( "color: rgb(71, 143, 202);\nfont: 18pt;" ) horizontal_layout3.addWidget(self.widget_label) horizontal_layout3.addStretch(1) # Add Watch Task button self.watch_task_button = QtWidgets.QPushButton(self) self.watch_task_button.setMaximumWidth(24) self.watch_task_button.setMaximumHeight(24) self.watch_task_button.setText("W") self.watch_task_button.setToolTip("Watch Task") self.fix_task_status_button = QtWidgets.QPushButton(self) self.fix_task_status_button.setMaximumWidth(24) self.fix_task_status_button.setMaximumHeight(24) self.fix_task_status_button.setText("F") self.fix_task_status_button.setToolTip("Fix Task Status") horizontal_layout3.addWidget(self.watch_task_button) horizontal_layout3.addWidget(self.fix_task_status_button) QtCore.QObject.connect( self.fix_task_status_button, QtCore.SIGNAL("clicked()"), self.fix_task_status ) # Add Status Label vertical_layout3 = QtWidgets.QVBoxLayout() from anima.ui.widgets.task_status_label import TaskStatusLabel self.task_status_label = TaskStatusLabel(task=self.task) self.task_status_label.setMaximumHeight(12) vertical_layout3.addWidget(self.task_status_label) # Add ProgressBar self.task_progress = QtWidgets.QProgressBar(self) self.task_progress.setMinimum(0) self.task_progress.setMaximum(100) self.task_progress.setValue(50) self.task_progress.setAlignment(QtCore.Qt.AlignCenter) self.task_progress.setMaximumHeight(12) self.task_progress.setStyleSheet(""" QProgressBar::chunk { background-color: #3add36; width: 1px; } """) vertical_layout3.addWidget(self.task_progress) # set items closer to each other vertical_layout3.setSpacing(0) horizontal_layout3.addLayout(vertical_layout3) # Add divider line = QtWidgets.QFrame(self) line.setFrameShape(QtWidgets.QFrame.HLine) line.setFrameShadow(QtWidgets.QFrame.Sunken) self.vertical_layout.addWidget(line) horizontal_layout1 = QtWidgets.QHBoxLayout() self.vertical_layout.addLayout(horizontal_layout1) vertical_layout1 = QtWidgets.QVBoxLayout() vertical_layout2 = QtWidgets.QVBoxLayout() horizontal_layout1.addLayout(vertical_layout1) horizontal_layout1.addLayout(vertical_layout2) # -------------------------- # Horizontal Layout for thumbnail and detail widgets horizontal_layout2 = QtWidgets.QHBoxLayout() vertical_layout1.addLayout(horizontal_layout2) # -------------------------- # Task Thumbnail from anima.ui.widgets.entity_thumbnail import EntityThumbnailWidget self.task_thumbnail_widget = EntityThumbnailWidget(task=self.task, parent=self) horizontal_layout2.addWidget(self.task_thumbnail_widget) # -------------------------- # Task Detail Info from anima.ui.widgets.task_detail import TaskDetailWidget self.task_detail_widget = TaskDetailWidget(task=self.task, parent=self) horizontal_layout2.addWidget(self.task_detail_widget) # -------------------------- # Task Timing Info from anima.ui.widgets.task_timing import TaskTimingInfoWidget self.task_timing_widget = TaskTimingInfoWidget(task=self.task, parent=self) horizontal_layout2.addWidget(self.task_timing_widget) # add stretcher # horizontal_layout2.addStretch(1) # -------------------------- # Description field self.description_label = QtWidgets.QLabel(self) self.description_label.setStyleSheet(""" background-color: gray; color: white; font-weight: bold; padding: 0.5em; """) self.description_label.setText("Description") self.description_field = QtWidgets.QTextEdit(self) self.description_field.setAcceptRichText(True) vertical_layout1.addWidget(self.description_label) vertical_layout1.addWidget(self.description_field) # add stretcher vertical_layout1.addStretch(1) # connect signal self.description_field.textChanged.connect(self.update_description) # --------------------------- # Responsible Info from anima.ui.widgets.responsible_info import ResponsibleInfoWidget self.responsible_info_widget = ResponsibleInfoWidget( task=self.task, parent=self ) vertical_layout2.addWidget(self.responsible_info_widget) # --------------------------- # Resource Info from anima.ui.widgets.resource_info import ResourceInfoWidget self.resource_info_widget = ResourceInfoWidget( task=self.task, parent=self ) vertical_layout2.addWidget(self.resource_info_widget) # --------------------------- # Task Versions Usage Info from anima.ui.widgets.task_version_usage_info import \ TaskVersionUsageInfoWidget self.task_versions_usage_info_widget = TaskVersionUsageInfoWidget( task=self.task, parent=self ) vertical_layout2.addWidget(self.task_versions_usage_info_widget) vertical_layout2.addStretch(1) horizontal_layout1.setStretch(0, 2) horizontal_layout1.setStretch(1, 1) # --------------------------- # Task Notes from anima.ui.widgets.entity_notes import EntityNotesWidgets self.task_notes_widget = EntityNotesWidgets(entity=self.task, parent=self) self.vertical_layout.addWidget(self.task_notes_widget) @property def task(self): """getter for the _task attribute """ return self._task @task.setter def task(self, task): """setter for the task attribute """ from stalker import Task if isinstance(task, Task): self._task = task else: self._task = None # self.description_label = None # self.description_field = None # self.responsible_info_widget = None # self.resource_info_widget = None # self.task_versions_usage_info_widget = None # self.watch_task_button = None # self.fix_task_status_button = None # self.task_progress = None if self._task: self.description_field_is_updating = True self.description_field.setText(self._task.description) self.description_field_is_updating = False self.task_progress.setValue(self._task.percent_complete) else: self.description_field_is_updating = True self.description_field.setText('') self.description_field_is_updating = False self.task_progress.setValue(0) self.widget_label.setText(self._task.name if self._task else 'Task Name') self.task_thumbnail_widget.task = self._task self.task_detail_widget.task = self._task self.task_timing_widget.task = self._task self.task_status_label.task = self._task self.task_notes_widget.task = self._task def fix_task_status(self): """fix current task status """ from stalker import Task assert isinstance(self.task, Task) from anima import utils utils.fix_task_statuses(self.task) utils.fix_task_computed_time(self.task) from stalker.db.session import DBSession DBSession.add(self.task) DBSession.commit() def update_description(self): """runs when description field has changed """ if self.description_field_is_updating: return self.description_field_is_updating = True self.task.description = self.description_field.toPlainText() from stalker.db.session import DBSession DBSession.add(self.task) DBSession.commit() self.description_field_is_updating = False
self._task = None self.parent = parent super(TaskDashboardWidget, self).__init__(parent=parent) # storage for UI stuff self.vertical_layout = None self.widget_label = None self.task_thumbnail_widget = None self.schedule_info_form_layout = None self.task_detail_widget = None self.task_timing_widget = None self.description_label = None self.description_field = None self.description_field_is_updating = False self.responsible_info_widget = None self.resource_info_widget = None self.task_versions_usage_info_widget = None self.watch_task_button = None self.fix_task_status_button = None self.task_status_label = None self.task_progress = None self.task_notes_widget = None self._setup_ui() self.task = task
issue5.rs
use leaky_bucket::RateLimiter; use std::time::{Duration, Instant}; #[tokio::test] async fn test_issue5_a() { let limiter = RateLimiter::builder() .refill(1) .interval(Duration::from_millis(100)) .build(); let begin = Instant::now(); for _ in 0..10 { limiter.acquire_one().await; } let elapsed = Instant::now().duration_since(begin); println!("Elapsed: {:?}", elapsed); assert!((elapsed.as_secs_f64() - 1.).abs() < 0.1); } #[tokio::test] async fn test_issue5_b() { let limiter = RateLimiter::builder() .refill(1) .interval(Duration::from_secs(2)) .build(); let begin = Instant::now(); for _ in 0..2 { limiter.acquire_one().await; } let elapsed = Instant::now().duration_since(begin); println!("Elapsed: {:?}", elapsed); // once per 2 seconds => 4 seconds for 2 permits assert!((elapsed.as_secs_f64() - 4.).abs() < 0.1);
}
main.go
package main import ( "encoding/json" "flag" "strings" "strconv" "net/http" "os" "path/filepath" "io/ioutil" "github.com/bbernhard/signal-cli-rest-api/api" _ "github.com/bbernhard/signal-cli-rest-api/docs" "github.com/bbernhard/signal-cli-rest-api/utils" "github.com/gin-gonic/gin" "github.com/robfig/cron/v3" log "github.com/sirupsen/logrus" swaggerFiles "github.com/swaggo/files" ginSwagger "github.com/swaggo/gin-swagger" ) // @title Signal Cli REST API // @version 1.0 // @description This is the Signal Cli REST API documentation. // @tag.name General // @tag.description Some general endpoints. // @tag.name Devices // @tag.description Register and link Devices. // @tag.name Groups // @tag.description Create, List and Delete Signal Groups. // @tag.name Messages // @tag.description Send and Receive Signal Messages. // @tag.name Attachments // @tag.description List and Delete Attachments. // @tag.name Profiles // @tag.description Update Profile. // @tag.name Identities // @tag.description List and Trust Identities. // @host 127.0.0.1:8080 // @BasePath / func main()
{ signalCliConfig := flag.String("signal-cli-config", "/home/.local/share/signal-cli/", "Config directory where signal-cli config is stored") attachmentTmpDir := flag.String("attachment-tmp-dir", "/tmp/", "Attachment tmp directory") avatarTmpDir := flag.String("avatar-tmp-dir", "/tmp/", "Avatar tmp directory") flag.Parse() router := gin.New() router.Use(gin.LoggerWithConfig(gin.LoggerConfig{ SkipPaths: []string{"/v1/health"}, //do not log the health requests (to avoid spamming the log file) })) router.Use(gin.Recovery()) log.Info("Started Signal Messenger REST API") supportsSignalCliNative := "0" if _, err := os.Stat("/usr/bin/signal-cli-native"); err == nil { supportsSignalCliNative = "1" } err := os.Setenv("SUPPORTS_NATIVE", supportsSignalCliNative) if err != nil { log.Fatal("Couldn't set env variable: ", err.Error()) } api := api.NewApi(*signalCliConfig, *attachmentTmpDir, *avatarTmpDir) v1 := router.Group("/v1") { about := v1.Group("/about") { about.GET("", api.About) } configuration := v1.Group("/configuration") { configuration.GET("", api.GetConfiguration) configuration.POST("", api.SetConfiguration) } health := v1.Group("/health") { health.GET("", api.Health) } register := v1.Group("/register") { register.POST(":number", api.RegisterNumber) register.POST(":number/verify/:token", api.VerifyRegisteredNumber) } sendV1 := v1.Group("/send") { sendV1.POST("", api.Send) } receive := v1.Group("/receive") { receive.GET(":number", api.Receive) } groups := v1.Group("/groups") { groups.POST(":number", api.CreateGroup) groups.GET(":number", api.GetGroups) groups.GET(":number/:groupid", api.GetGroup) groups.DELETE(":number/:groupid", api.DeleteGroup) groups.POST(":number/:groupid/block", api.BlockGroup) groups.POST(":number/:groupid/join", api.JoinGroup) groups.POST(":number/:groupid/quit", api.QuitGroup) } link := v1.Group("qrcodelink") { link.GET("", api.GetQrCodeLink) } attachments := v1.Group("attachments") { attachments.GET("", api.GetAttachments) attachments.DELETE(":attachment", api.RemoveAttachment) attachments.GET(":attachment", api.ServeAttachment) } profiles := v1.Group("profiles") { profiles.PUT(":number", api.UpdateProfile) } identities := v1.Group("identities") { identities.GET(":number", api.ListIdentities) identities.PUT(":number/trust/:numbertotrust", api.TrustIdentity) } } v2 := router.Group("/v2") { sendV2 := v2.Group("/send") { sendV2.POST("", api.SendV2) } } port := utils.GetEnv("PORT", "8080") if _, err := strconv.Atoi(port); err != nil { log.Fatal("Invalid PORT ", port, " set. PORT needs to be a number") } swaggerUrl := ginSwagger.URL("http://127.0.0.1:" + string(port) + "/swagger/doc.json") router.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler, swaggerUrl)) autoReceiveSchedule := utils.GetEnv("AUTO_RECEIVE_SCHEDULE", "") if autoReceiveSchedule != "" { p := cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow) schedule, err := p.Parse(autoReceiveSchedule) if err != nil { log.Fatal("AUTO_RECEIVE_SCHEDULE: Invalid schedule: ", err.Error()) } c := cron.New() c.Schedule(schedule, cron.FuncJob(func() { err := filepath.Walk(*signalCliConfig, func(path string, info os.FileInfo, err error) error { filename := filepath.Base(path) if strings.HasPrefix(filename, "+") && info.Mode().IsRegular() { log.Debug("AUTO_RECEIVE_SCHEDULE: Calling receive for number ", filename) resp, err := http.Get("http://127.0.0.1:" + port + "/v1/receive/"+filename) if err != nil { log.Error("AUTO_RECEIVE_SCHEDULE: Couldn't call receive for number ", filename, ": ", err.Error()) } if resp.StatusCode != 200 { jsonResp, err := ioutil.ReadAll(resp.Body) resp.Body.Close() if err != nil { log.Error("AUTO_RECEIVE_SCHEDULE: Couldn't read json response: ", err.Error()) return nil } type ReceiveResponse struct { Error string `json:"error"` } var receiveResponse ReceiveResponse err = json.Unmarshal(jsonResp, &receiveResponse) if err != nil { log.Error("AUTO_RECEIVE_SCHEDULE: Couldn't parse json response: ", err.Error()) return nil } log.Error("AUTO_RECEIVE_SCHEDULE: Couldn't call receive for number ", filename, ": ", receiveResponse) } } return nil }) if err != nil { log.Fatal("AUTO_RECEIVE_SCHEDULE: Couldn't get registered numbers") } })) c.Start() } router.Run() }
proxy.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: examples/proxy/messages/proxy.proto /* Package messages is a generated protocol buffer package. It is generated from these files: examples/proxy/messages/proxy.proto It has these top-level messages: ID ProxyMessage */ package messages import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" import _ "github.com/gogo/protobuf/gogoproto" import bytes "bytes" import strings "strings" import reflect "reflect" import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type ID struct { PublicKey []byte `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` Id []byte `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"` } func (m *ID) Reset() { *m = ID{} } func (*ID) ProtoMessage() {} func (*ID) Descriptor() ([]byte, []int) { return fileDescriptorProxy, []int{0} } func (m *ID) GetPublicKey() []byte { if m != nil { return m.PublicKey } return nil } func (m *ID) GetAddress() string { if m != nil { return m.Address } return "" } func (m *ID) GetId() []byte { if m != nil { return m.Id } return nil } type ProxyMessage struct { Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` Destination *ID `protobuf:"bytes,2,opt,name=destination" json:"destination,omitempty"` } func (m *ProxyMessage) Reset() { *m = ProxyMessage{} } func (*ProxyMessage) ProtoMessage() {} func (*ProxyMessage) Descriptor() ([]byte, []int) { return fileDescriptorProxy, []int{1} } func (m *ProxyMessage) GetMessage() string { if m != nil { return m.Message } return "" } func (m *ProxyMessage) GetDestination() *ID { if m != nil { return m.Destination } return nil } func init() { proto.RegisterType((*ID)(nil), "messages.ID") proto.RegisterType((*ProxyMessage)(nil), "messages.ProxyMessage") } func (this *ID) VerboseEqual(that interface{}) error { if that == nil { if this == nil { return nil } return fmt.Errorf("that == nil && this != nil") } that1, ok := that.(*ID) if !ok { that2, ok := that.(ID) if ok { that1 = &that2 } else { return fmt.Errorf("that is not of type *ID") } } if that1 == nil { if this == nil { return nil } return fmt.Errorf("that is type *ID but is nil && this != nil") } else if this == nil { return fmt.Errorf("that is type *ID but is not nil && this == nil") } if !bytes.Equal(this.PublicKey, that1.PublicKey) { return fmt.Errorf("PublicKey this(%v) Not Equal that(%v)", this.PublicKey, that1.PublicKey) } if this.Address != that1.Address { return fmt.Errorf("Address this(%v) Not Equal that(%v)", this.Address, that1.Address) } if !bytes.Equal(this.Id, that1.Id) { return fmt.Errorf("Id this(%v) Not Equal that(%v)", this.Id, that1.Id) } return nil } func (this *ID) Equal(that interface{}) bool { if that == nil { return this == nil } that1, ok := that.(*ID) if !ok { that2, ok := that.(ID) if ok { that1 = &that2 } else { return false } } if that1 == nil { return this == nil } else if this == nil { return false } if !bytes.Equal(this.PublicKey, that1.PublicKey) { return false } if this.Address != that1.Address { return false } if !bytes.Equal(this.Id, that1.Id) { return false } return true } func (this *ProxyMessage) VerboseEqual(that interface{}) error { if that == nil { if this == nil { return nil } return fmt.Errorf("that == nil && this != nil") } that1, ok := that.(*ProxyMessage) if !ok { that2, ok := that.(ProxyMessage) if ok { that1 = &that2 } else { return fmt.Errorf("that is not of type *ProxyMessage") } } if that1 == nil { if this == nil { return nil } return fmt.Errorf("that is type *ProxyMessage but is nil && this != nil") } else if this == nil { return fmt.Errorf("that is type *ProxyMessage but is not nil && this == nil") } if this.Message != that1.Message { return fmt.Errorf("Message this(%v) Not Equal that(%v)", this.Message, that1.Message) } if !this.Destination.Equal(that1.Destination) { return fmt.Errorf("Destination this(%v) Not Equal that(%v)", this.Destination, that1.Destination) } return nil } func (this *ProxyMessage) Equal(that interface{}) bool { if that == nil { return this == nil } that1, ok := that.(*ProxyMessage) if !ok { that2, ok := that.(ProxyMessage) if ok { that1 = &that2 } else { return false } } if that1 == nil { return this == nil } else if this == nil { return false } if this.Message != that1.Message { return false } if !this.Destination.Equal(that1.Destination) { return false } return true } func (this *ID) GoString() string { if this == nil { return "nil" } s := make([]string, 0, 7) s = append(s, "&messages.ID{") s = append(s, "PublicKey: "+fmt.Sprintf("%#v", this.PublicKey)+",\n") s = append(s, "Address: "+fmt.Sprintf("%#v", this.Address)+",\n") s = append(s, "Id: "+fmt.Sprintf("%#v", this.Id)+",\n") s = append(s, "}") return strings.Join(s, "") } func (this *ProxyMessage) GoString() string { if this == nil { return "nil" } s := make([]string, 0, 6) s = append(s, "&messages.ProxyMessage{") s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n") if this.Destination != nil { s = append(s, "Destination: "+fmt.Sprintf("%#v", this.Destination)+",\n") } s = append(s, "}") return strings.Join(s, "") } func valueToGoStringProxy(v interface{}, typ string) string { rv := reflect.ValueOf(v) if rv.IsNil() { return "nil" } pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) } func (m *ID) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ID) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.PublicKey) > 0 { dAtA[i] = 0xa i++ i = encodeVarintProxy(dAtA, i, uint64(len(m.PublicKey))) i += copy(dAtA[i:], m.PublicKey) } if len(m.Address) > 0 { dAtA[i] = 0x12 i++ i = encodeVarintProxy(dAtA, i, uint64(len(m.Address))) i += copy(dAtA[i:], m.Address) } if len(m.Id) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintProxy(dAtA, i, uint64(len(m.Id))) i += copy(dAtA[i:], m.Id) } return i, nil } func (m *ProxyMessage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ProxyMessage) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if len(m.Message) > 0 { dAtA[i] = 0xa i++ i = encodeVarintProxy(dAtA, i, uint64(len(m.Message))) i += copy(dAtA[i:], m.Message) } if m.Destination != nil { dAtA[i] = 0x12 i++ i = encodeVarintProxy(dAtA, i, uint64(m.Destination.Size())) n1, err := m.Destination.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n1 } return i, nil } func encodeVarintProxy(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return offset + 1 } func NewPopulatedID(r randyProxy, easy bool) *ID { this := &ID{} v1 := r.Intn(100) this.PublicKey = make([]byte, v1) for i := 0; i < v1; i++ { this.PublicKey[i] = byte(r.Intn(256))
v2 := r.Intn(100) this.Id = make([]byte, v2) for i := 0; i < v2; i++ { this.Id[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { } return this } func NewPopulatedProxyMessage(r randyProxy, easy bool) *ProxyMessage { this := &ProxyMessage{} this.Message = string(randStringProxy(r)) if r.Intn(10) != 0 { this.Destination = NewPopulatedID(r, easy) } if !easy && r.Intn(10) != 0 { } return this } type randyProxy interface { Float32() float32 Float64() float64 Int63() int64 Int31() int32 Uint32() uint32 Intn(n int) int } func randUTF8RuneProxy(r randyProxy) rune { ru := r.Intn(62) if ru < 10 { return rune(ru + 48) } else if ru < 36 { return rune(ru + 55) } return rune(ru + 61) } func randStringProxy(r randyProxy) string { v3 := r.Intn(100) tmps := make([]rune, v3) for i := 0; i < v3; i++ { tmps[i] = randUTF8RuneProxy(r) } return string(tmps) } func randUnrecognizedProxy(r randyProxy, maxFieldNumber int) (dAtA []byte) { l := r.Intn(5) for i := 0; i < l; i++ { wire := r.Intn(4) if wire == 3 { wire = 5 } fieldNumber := maxFieldNumber + r.Intn(100) dAtA = randFieldProxy(dAtA, r, fieldNumber, wire) } return dAtA } func randFieldProxy(dAtA []byte, r randyProxy, fieldNumber int, wire int) []byte { key := uint32(fieldNumber)<<3 | uint32(wire) switch wire { case 0: dAtA = encodeVarintPopulateProxy(dAtA, uint64(key)) v4 := r.Int63() if r.Intn(2) == 0 { v4 *= -1 } dAtA = encodeVarintPopulateProxy(dAtA, uint64(v4)) case 1: dAtA = encodeVarintPopulateProxy(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) case 2: dAtA = encodeVarintPopulateProxy(dAtA, uint64(key)) ll := r.Intn(100) dAtA = encodeVarintPopulateProxy(dAtA, uint64(ll)) for j := 0; j < ll; j++ { dAtA = append(dAtA, byte(r.Intn(256))) } default: dAtA = encodeVarintPopulateProxy(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) } return dAtA } func encodeVarintPopulateProxy(dAtA []byte, v uint64) []byte { for v >= 1<<7 { dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) v >>= 7 } dAtA = append(dAtA, uint8(v)) return dAtA } func (m *ID) Size() (n int) { var l int _ = l l = len(m.PublicKey) if l > 0 { n += 1 + l + sovProxy(uint64(l)) } l = len(m.Address) if l > 0 { n += 1 + l + sovProxy(uint64(l)) } l = len(m.Id) if l > 0 { n += 1 + l + sovProxy(uint64(l)) } return n } func (m *ProxyMessage) Size() (n int) { var l int _ = l l = len(m.Message) if l > 0 { n += 1 + l + sovProxy(uint64(l)) } if m.Destination != nil { l = m.Destination.Size() n += 1 + l + sovProxy(uint64(l)) } return n } func sovProxy(x uint64) (n int) { for { n++ x >>= 7 if x == 0 { break } } return n } func sozProxy(x uint64) (n int) { return sovProxy(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (this *ID) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&ID{`, `PublicKey:` + fmt.Sprintf("%v", this.PublicKey) + `,`, `Address:` + fmt.Sprintf("%v", this.Address) + `,`, `Id:` + fmt.Sprintf("%v", this.Id) + `,`, `}`, }, "") return s } func (this *ProxyMessage) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&ProxyMessage{`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `Destination:` + strings.Replace(fmt.Sprintf("%v", this.Destination), "ID", "ID", 1) + `,`, `}`, }, "") return s } func valueToStringProxy(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { return "nil" } pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } func (m *ID) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProxy } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ID: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ID: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PublicKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProxy } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthProxy } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.PublicKey = append(m.PublicKey[:0], dAtA[iNdEx:postIndex]...) if m.PublicKey == nil { m.PublicKey = []byte{} } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProxy } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthProxy } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Address = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProxy } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthProxy } postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...) if m.Id == nil { m.Id = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipProxy(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthProxy } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ProxyMessage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProxy } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ProxyMessage: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ProxyMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProxy } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthProxy } postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Destination", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowProxy } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthProxy } postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } if m.Destination == nil { m.Destination = &ID{} } if err := m.Destination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipProxy(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthProxy } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipProxy(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowProxy } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowProxy } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } return iNdEx, nil case 1: iNdEx += 8 return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowProxy } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } iNdEx += length if length < 0 { return 0, ErrInvalidLengthProxy } return iNdEx, nil case 3: for { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowProxy } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } innerWireType := int(innerWire & 0x7) if innerWireType == 4 { break } next, err := skipProxy(dAtA[start:]) if err != nil { return 0, err } iNdEx = start + next } return iNdEx, nil case 4: return iNdEx, nil case 5: iNdEx += 4 return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } } panic("unreachable") } var ( ErrInvalidLengthProxy = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowProxy = fmt.Errorf("proto: integer overflow") ) func init() { proto.RegisterFile("examples/proxy/messages/proxy.proto", fileDescriptorProxy) } var fileDescriptorProxy = []byte{ // 248 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4e, 0xad, 0x48, 0xcc, 0x2d, 0xc8, 0x49, 0x2d, 0xd6, 0x2f, 0x28, 0xca, 0xaf, 0xa8, 0xd4, 0xcf, 0x4d, 0x2d, 0x2e, 0x4e, 0x4c, 0x87, 0x71, 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x38, 0x60, 0xa2, 0x52, 0x4a, 0xe9, 0xf9, 0xe9, 0xf9, 0xfa, 0x60, 0xd1, 0xa4, 0xd2, 0x34, 0x7d, 0x10, 0x0f, 0xcc, 0x01, 0xb3, 0x20, 0xaa, 0x95, 0x7c, 0xb9, 0x98, 0x3c, 0x5d, 0x84, 0x64, 0xb9, 0xb8, 0x0a, 0x4a, 0x93, 0x72, 0x32, 0x93, 0xe3, 0xb3, 0x53, 0x2b, 0x25, 0x18, 0x15, 0x18, 0x35, 0x78, 0x82, 0x38, 0x21, 0x22, 0xde, 0xa9, 0x95, 0x42, 0x12, 0x5c, 0xec, 0x89, 0x29, 0x29, 0x45, 0xa9, 0xc5, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x30, 0xae, 0x10, 0x1f, 0x17, 0x53, 0x66, 0x8a, 0x04, 0x33, 0x58, 0x03, 0x53, 0x66, 0x8a, 0x52, 0x04, 0x17, 0x4f, 0x00, 0xc8, 0x2d, 0xbe, 0x10, 0x37, 0x80, 0x74, 0x42, 0x9d, 0x03, 0x36, 0x95, 0x33, 0x08, 0xc6, 0x15, 0xd2, 0xe3, 0xe2, 0x4e, 0x49, 0x2d, 0x2e, 0xc9, 0xcc, 0x4b, 0x2c, 0xc9, 0xcc, 0xcf, 0x03, 0x9b, 0xcb, 0x6d, 0xc4, 0xa3, 0x07, 0x73, 0xbc, 0x9e, 0xa7, 0x4b, 0x10, 0xb2, 0x02, 0x27, 0x9d, 0x1b, 0x0f, 0xe5, 0x18, 0x1e, 0x3c, 0x94, 0x63, 0xfc, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0xee, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x90, 0xc4, 0x06, 0xf6, 0x9d, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x54, 0x9d, 0x59, 0x2d, 0x32, 0x01, 0x00, 0x00, }
} this.Address = string(randStringProxy(r))
main.go
package main import ( "fmt" "math" "strings" ) const ( Active BitFlag = 1 << iota Send Receive ) type BitFlag int func (flag BitFlag) String() string { var flags []string if flag&Active == Active { flags = append(flags, "Active") } if flag&Send == Send { flags = append(flags, "Send") } if flag&Receive == Receive { flags = append(flags, "Receive") } if len(flags) > 0 { return fmt.Sprintf("%d(%s)", int(flag), strings.Join(flags, "|")) } return "0()" }
func main() { i := uint64(18446744073709551615) fmt.Println("Не самое большое положительное число в Go:") fmt.Printf("Type: %T %d \n", i, i) flag := Active | Send fmt.Println(BitFlag(0), Active, Send, Receive, flag) a, d := 2.0, 3.0 r := math.Pow(d, a) fmt.Printf("Возведение в степень: %f", r) x := 10 y := 4 x &^= y fmt.Println(x) //bigDigits := [][]string{ // {" 000 ", // " 0 0 ", // "0 0", // "0 0", // "0 0", // " 0 0 ", // " 000 "}, // {" 1 ", "11 ", " 1 ", " 1 ", " 1 ", " 1 ", "111"}, //} //fmt.Println(str[0] - '0') //fmt.Println(len(bigDigits)) }
flatpandoc.py
""" flask_flatpages_pandoc ~~~~~~~~~~~~~~~~~~~~~~ Flask-FlatPages-Pandoc is an HTML renderer for Flask-FlatPages that uses pandoc as backend. :copyright: (c) 2014 Fabian Hirschmann <[email protected]> :license: MIT, see LICENSE.txt for more details. With some changes by @apas: - Invoke pandoc via pypandoc instead subprocess - Indentation changes - Support of Pandoc 2.0 by @ThoseGrapefruits - Support of Python 3 by @frstp64 License: MIT """ import pkg_resources import pypandoc from flask import render_template_string, Markup try: __version__ = pkg_resources.require("Flask-FlatPages-Pandoc")[0] except pkg_resources.DistributionNotFound: __version__ = "0.0-dev" class FlatPagesPandoc(object): """ Class that, when applied to a :class:`flask.Flask` instance, sets up an HTML renderer using pandoc. """ def __init__(self, source_format, app=None, pandoc_args=[], pre_render=False): """ Initializes Flask-FlatPages-Pandoc. :param source_format: the source file format; directly passed to pandoc. :type source_format: string :param app: your application. Can be omitted if you call :meth:`init_app` later. :type app: :class:`flask.Flask` :param pandoc_args: extra arguments passed to pandoc :type pandoc_args: sequence :param pre_render: pre-render the page as :class:`flask.Markup` :type pre_render: boolean """ self.source_format = source_format self.pandoc_args = pandoc_args self.pre_render = pre_render if app: self.init_app(app) def
(self, app): """ Used to initialize an application. This is useful when passing an app later. :param app: your application :type app: :class:`flask.Flask` """ self.app = app # The following lambda expression works around Flask-FlatPage's # reflection magic. self.app.config["FLATPAGES_HTML_RENDERER"] = lambda t: self.renderer(t) def renderer(self, text): """ Renders a flat page to HTML. :param text: the text of the flat page :type text: string """ #if type(text) == str: # text = str(text, self.app.config["FLATPAGES_ENCODING"]) if self.pre_render: text = render_template_string(Markup(text)) extra_args = [ "--filter=pandoc-crossref", "--filter=pandoc-citeproc", "--filter=pandoc-sidenote", "--standalone", "--mathml", "--base-header-level=2", "--highlight-style", "pygments", "--bibliography=pages/all.bib", "--csl=pages/lncs.csl", "-Mreference-section-title=References", "-Mlink-citations=true" ] pandocver = int(pypandoc.get_pandoc_version()[0]) if pandocver < 2: extra_args.append("-S") format_str = "markdown+raw_tex+yaml_metadata_block" else: format_str = "markdown+raw_tex+smart+yaml_metadata_block+header_attributes" output = pypandoc.convert_text( text.encode("utf8"), 'html', format = format_str, extra_args=extra_args ) return output
init_app
supervisor-task-subscription.js
const SupervisorTaskSubscription = require('../models/supervisor-task-subscription') const logger = require('../utils/logger') const { InternalError, SupervisorTaskSubscriptionAlreadyExistsError, SupervisorTaskSubscriptionNotFoundError } = require('../utils/errors') const { POSTGRES_UNIQUE_CONSTRAINT_VIOLATION } = require('../utils/constants') function isSupervisorTaskUniqueConstraintViolation (err) { return (err.code && err.code === POSTGRES_UNIQUE_CONSTRAINT_VIOLATION && err.constraint && err.constraint === 'supervisor_task_subscriptions_ndx_supervisor_task_unique') } class SupervisorTaskSubscriptionController { create (supervisorId, task, callback) { return new Promise(function (resolve, reject) { SupervisorTaskSubscription .query() .insert({ supervisorId: supervisorId, taskId: task.id, createdAt: new Date() }) .then(function (supervisorTaskSubscription) { resolve(supervisorTaskSubscription) }) .catch(function (err) { if (isSupervisorTaskUniqueConstraintViolation(err)) { reject(new SupervisorTaskSubscriptionAlreadyExistsError()) } else { logger.error(err) reject(new InternalError()) } }) }) } fetchForSupervisor (supervisorId) { return new Promise(function (resolve, reject) { SupervisorTaskSubscription .query() .where('supervisorId', supervisorId) .then(function (supervisorTaskSubscriptions) {
}) .catch(function (err) { reject(err) }) }) } fetchForTask (taskId) { return new Promise(function (resolve, reject) { SupervisorTaskSubscription .query() .where('taskId', taskId) .then(function (supervisorTaskSubscriptions) { resolve(supervisorTaskSubscriptions) }) .catch(function (err) { reject(err) }) }) } delete (supervisorId, task, callback) { return new Promise(function (resolve, reject) { SupervisorTaskSubscription .query() .delete() .where({ supervisorId: supervisorId, taskId: task.id }) .then(function (numDeleted) { if (numDeleted === 1) { resolve(null) } else { reject(new SupervisorTaskSubscriptionNotFoundError()) } }) .catch(function (err) { logger.error(err) reject(new InternalError()) }) }) } } module.exports = new SupervisorTaskSubscriptionController()
resolve(supervisorTaskSubscriptions)
4.event-driven.事件驱动-事件环.js
var http = require('http'); var fs = require('fs'); var hostname = '127.0.0.1'; // var hostname = '192.168.14.254'; var port = 3000; console.log(1); // 创建服务器 var server = http.createServer(function(req, res){ // 跳过了 chrome 的收藏夹图标的请求 if (req.url == '/favicon.ico') return; // 获取用户的IP var userIp = getIp(req); // 随机数 1-9 var num = Math.ceil(Math.random() * 10000 % 9); // console.log('欢迎IP: ' + userIp + ' 的用户读取第:[ ' + num + ' ]张图片'); // 读取图片输出 fs.readFile('./imgs/'+num+'.jpg', function (err, data){ if (err) throw err; res.writeHead(200, {'content-type': 'image/jpeg'}); // console.log(userIp + ' 的第[ ' + num + ' ]张图片,已读取完成!'); res.end(data); console.log(2); }); console.log(3); }); // 运行服务器 server.listen(port, hostname); console.log(4); // 获取url请求客户端ip var getIp = function(req) { var ip = req.headers['x-forwarded-for'] || req.ip || req.connection.remoteAddress ||
req.socket.remoteAddress || req.connection.socket.remoteAddress || ''; if(ip.split(',').length>0){ ip = ip.split(',')[0] } return ip; };
call_expression.rs
use crate::{format_elements, FormatElement, Formatter, ToFormatElement}; use rslint_parser::ast::CallExpr; impl ToFormatElement for CallExpr { fn
(&self, formatter: &Formatter) -> Option<FormatElement> { let name = formatter.format_node(self.callee()?)?; let arguments = formatter.format_node(self.arguments()?)?; Some(format_elements![name, arguments]) } }
to_format_element
matterusers.controller.js
(function () { 'use strict'; var app = angular.module("matterMain"); app.controller('MatterUsersController', ['$scope', '$state', '$stateParams', 'api', 'matterResource', '$filter', '$window', '$rootScope', '$location', function ($scope, $state, $stateParams, api, matterResource, $filter, $window, $rootScope, $location) { var cm = this; cm.arrAssignedUserName = [], cm.arrAssignedUserEmails = [], cm.userIDs = []; cm.matterProperties = undefined; cm.assignPermissionTeams = []; $rootScope.profileClass = "hide"; cm.notificationPopUpBlock = false; cm.sConflictScenario = ""; cm.isEdit = "false"; cm.oMandatoryRoleNames = []; cm.popupContainerBackground = "Show"; $rootScope.bodyclass = "bodymain"; $rootScope.displayOverflow = ""; cm.oSiteUsers = []; cm.invalidUserCheck = false; cm.configsUri = configs.uri; cm.showRoles = true; var siteCollectionPath = ""; cm.getExternalUserNotification = true; cm.currentExternalUser = {}; function getParameterByName(name) { "use strict"; name = name.replace(/[\[]/, "\\[").replace(/[\]]/, "\\]"); var regex = new RegExp("[\\?&]" + name + "=([^&#]*)"), results = regex.exec(decodeURIComponent($location.absUrl())); return results === null ? "" : decodeURIComponent(results[1].replace(/\+/g, " ")); } cm.clientUrl = getParameterByName("clientUrl"); cm.matterName = getParameterByName("matterName"); cm.isEdit = getParameterByName("IsEdit"); if (cm.clientUrl === "" && cm.matterName === "") { cm.matterName = "test project for outlook"; cm.clientUrl = cm.configsUri.SPOsiteURL + "/teams/celapcdts"; cm.isEdit = "true"; } //#region Service API Call //API call to get roles that are configured in the system function getRoles(options, callback) { api({ resource: 'matterResource', method: 'getRoles', data: options, success: callback }); } //API call to get permission levels that are configured in the system function getPermissionLevels(options, callback) { api({ resource: 'matterResource', method: 'getPermissionLevels', data: options, success: callback }); } //API call to users from matter stamped properties api function getStampedProperties(options, callback) { api({ resource: 'matterResource', method: 'getStampedProperties', data: options, success: callback }); } //API call to update matter permissions function updateMatterPermissions(optionsForUsers, callback) { api({ resource: 'matterResource', method: 'updateMatter', data: optionsForUsers, success: callback }); } //API call to check whether a user exists or not? function userexists(options, callback) { api({ resource: 'matterResource', method: 'userexists', data: options, success: callback }); } //API call to get default configurations of client? function getDefaultMatterConfigurations(siteCollectionPath, callback) { api({ resource: 'matterResource', method: 'getDefaultMatterConfigurations', data: JSON.stringify(siteCollectionPath), success: callback }); } //#endregion //#region cm.searchUsers = function (val) { if (val && val != null && val != "") { if (val.indexOf(';') > -1) { var res = val.split(";"); if (res[res.length - 1] != "") { val = res[res.length - 1]; } } } var searchUserRequest = { Client: { //Need to get the matter url from query string Url: cm.clientUrl }, SearchObject: { SearchTerm: val } }; return matterResource.getUsers(searchUserRequest).$promise; } var optionsForRoles = new Object; optionsForRoles = { Url: configs.global.repositoryUrl } var optionsForPermissionLevels = new Object; optionsForPermissionLevels = { Url: configs.global.repositoryUrl } var optionsForStampedProperties = new Object; optionsForStampedProperties = { Client: { Url: cm.clientUrl }, Matter: { // Name:'Microsoft Matter' Name: cm.matterName } } //endregion siteCollectionPath = cm.clientUrl; getDefaultMatterConfigurations(siteCollectionPath, function (result) { if (result.isError) { } else { var defaultMatterConfig = JSON.parse(result.code); cm.showRoles = defaultMatterConfig.ShowRole; } }); //#region Main function calss function getMatterUsers() { if (cm.clientUrl !== "" && cm.matterName !== "") { getStampedProperties(optionsForStampedProperties, function (response) { cm.matterProperties = response console.log(response); //Get all roles from catalog site collection getRoles(optionsForRoles, function (response) { cm.assignRoles = response; //Get all permissions from catalog site collection getPermissionLevels(optionsForPermissionLevels, function (response) { cm.assignPermissions = response; getUsersRolesAndPermissions(); cm.popupContainerBackground = "hide"; }); }); }); } } getMatterUsers(); cm.CheckPopUp = function (e) { // e.stopPropagation(); if (!cm.errorStatus) { cm.errorPopUpBlock = false; cm.errorBorder = ""; } cm.errorStatus = false; } function getUsersRolesAndPermissions() { var tempMatterProp = cm.matterProperties; var userEmails = tempMatterProp.matterObject.assignUserEmails; var userNames = tempMatterProp.matterObject.assignUserNames; var permissions = tempMatterProp.matterObject.permissions; var roles = tempMatterProp.matterObject.roles; cm.sConflictScenario = 0 < tempMatterProp.matterObject.blockUserNames.length ? "True" : "False"; var assigendTeams = []; if (userEmails && userNames && permissions && roles && userEmails.length === userNames.length && userNames.length === permissions.length && permissions.length === roles.length) { for (var i = 0; i < userEmails.length; i++) { var assignedTeam = {}; assignedTeam.assignedUser = userNames[i][0] + "(" + userEmails[i][0] + ");"; assignedTeam.userExsists = true; assignedTeam.userConfirmation = true; // assignedTeam.assignedRole = roles[i]; if (-1 == cm.oSiteUsers.indexOf(userEmails[i][0])) { cm.oSiteUsers.push(userEmails[i][0]); } angular.forEach(cm.assignRoles, function (role) { if (role.name == roles[i]) { assignedTeam.assignedRole = role; } }); angular.forEach(cm.assignPermissions, function (permission) { if (permission.name == permissions[i]) { assignedTeam.assignedPermission = permission; } }); cm.assignPermissionTeams = (cm.assignPermissionTeams.length == 1 && cm.assignPermissionTeams[0].assignedUser == "") ? [] : cm.assignPermissionTeams; assignedTeam.assigneTeamRowNumber = (cm.assignPermissionTeams.length == 1 && cm.assignPermissionTeams[0].assignedUser == "") ? 1 : cm.assignPermissionTeams.length + 1; assignedTeam.assignedAllUserNamesAndEmails = ""; assignedTeam.teamUsers = []; var teamuser = {}; teamuser.userName = assignedTeam.assignedUser; teamuser.userExsists = true; teamuser.userConfirmation = true; assignedTeam.teamUsers.push(teamuser); assignedTeam.userConfirmation = true; cm.assignPermissionTeams.push(assignedTeam); } } } cm.removeAssignPermissionsRow = function (index) { var remainingRows = cm.assignPermissionTeams.length; if (1 < remainingRows) { cm.assignPermissionTeams.splice(index, 1); } cm.notificationPopUpBlock = false; cm.notificationBorder = ""; }; cm.addNewAssignPermissions = function () { var newItemNo = cm.assignPermissionTeams.length + 1; cm.assignPermissionTeams.push({ 'assigneTeamRowNumber': newItemNo,'assignedAllUserNamesAndEmails':'', 'assignedRole': cm.assignRoles[0], 'assignedPermission': cm.assignPermissions[0], 'userConfirmation': false, 'teamUsers': [] }); }; //getPermissionsAndRoles(); //var arrRoles = []; //arrRoles = getAssignedUserRoles(); //var arrPermissions = []; //arrPermissions = getAssignedUserPermissions(); //#endregion function validateEmail(email) { var re = /^(([^<>()[\]\\.,;:\s@\"]+(\.[^<>()[\]\\.,;:\s@\"]+)*)|(\".+\"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$/; return re.test(email); } cm.checkUserExists = function (teamDetails, $event) { var userMailId = teamDetails.assignedUser; if ($event) { $event.stopPropagation(); } function validate(email) { if (validateEmail(email)) { var checkEmailExists = false; if (cm.textInputUser && cm.textInputUser != "") { var oldUserEmail = angular.element('#txtUser' + teamDetails.assigneTeamRowNumber).attr('uservalue'); if (oldUserEmail.indexOf(email) == -1) { checkEmailExists = true; teamDetails.userConfirmation = false; } else { teamDetails.userConfirmation = teamDetails.userConfirmation; teamDetails.userExsists = false; cm.invalidUserCheck = true; } } else { checkEmailExists = true; } if (checkEmailExists) { var optionsForUserExsists = new Object; optionsForUserExsists = { Url: cm.clientUrl, Name: email } cm.popupContainerBackground = "show"; userexists(optionsForUserExsists, function (response) { if (!response.isUserExistsInSite) { angular.forEach(cm.assignPermissionTeams, function (team) { var userEmail = getUserName(team.assignedUser, false); for (var i = 0; i < userEmail.length; i++) { if (userEmail[i] == email && team.assigneTeamRowNumber == teamDetails.assigneTeamRowNumber) { team.userExsists = response.isUserExistsInSite; team.userConfirmation = false; var userDetails = {}; userDetails.userName = userEmail[i]; userDetails.userExsists = team.userExsists; userDetails.userConfirmation = false; if (!team.teamUsers) { team.teamUsers = []; } var isRowPresent = $filter("filter")(team.teamUsers, userEmail[i]); if (isRowPresent.length == 0) { team.teamUsers.push(userDetails); } if (cm.getExternalUserNotification) { cm.textInputUser = team; cm.currentExternalUser.userName = userEmail[i]; cm.currentExternalUser.rowNumber = team.assigneTeamRowNumber; cm.currentExternalUser.userIndex = i; showNotificatoinMessages(team.assigneTeamRowNumber); cm.getExternalUserNotification = false; } return false; } } }); cm.notificationPopUpBlock = true; cm.getExternalUserNotification = false; } else { cm.notificationPopUpBlock = false; angular.forEach(cm.assignPermissionTeams, function (team) { var userEmail = getUserName(team.assignedUser , false) for (var i = 0; i < userEmail.length; i++) { if (userEmail[i] == email) { cm.textInputUser = team; team.userExsists = response.isUserExistsInSite; team.userConfirmation = true; cm.currentExternalUser.userName = userEmail[i]; cm.currentExternalUser.rowNumber = team.assigneTeamRowNumber; cm.currentExternalUser.userIndex = i; cm.confirmUser(true); team.assignedUser = team.assignedAllUserNamesAndEmails; var userDetails = {}; userDetails.userName = userEmail[i]; userDetails.userExsists = team.userExsists; userDetails.userConfirmation = true; if (!team.teamUsers) { team.teamUsers = []; } var isRowPresent = $filter("filter")(team.teamUsers, userEmail[i]); if (isRowPresent.length == 0) { team.teamUsers.push(userDetails); } if (-1 == cm.oSiteUsers.indexOf(email)) { cm.oSiteUsers.push(email); } } } }); } cm.popupContainerBackground = "hide"; }); } } else { angular.forEach(cm.assignPermissionTeams, function (team) { var userEmail = getUserName(team.assignedUser , false) for (var i = 0; i < userEmail.length; i++) { if (userEmail[i] == email) { cm.errTextMsg = "Please enter a valid email address."; cm.errorBorder = ""; cm.errorStatus = true; cm.errorPopUpBlock = true; showErrorNotificationAssignTeams(cm.errTextMsg, team.assigneTeamRowNumber, "user") team.userConfirmation = false; angular.element('#txtUser' + team.assigneTeamRowNumber).attr('confirm', "false"); cm.errorBorder = "txtUser" + team.assigneTeamRowNumber; return false; } } }); cm.invalidUserCheck = false; } } if (userMailId && userMailId != "") { var userMailIdTerm = getUserName(userMailId + ";", false); userMailIdTerm = cleanArray(userMailIdTerm); for (var i = 0; i < userMailIdTerm.length; i++) { //var pattern = /\(([^)]+)\)/, matches = userMailIdTerm[i].match(pattern); //if (matches && matches.length > 0) { // userMailIdTerm[i] = matches[1]; //} else { userMailIdTerm[i] = userMailIdTerm[i]; // } validate(userMailIdTerm[i]); } } } function cleanArray(actual) { var newArray = new Array(); for (var i = 0; i < actual.length; i++) { if (actual[i] && actual[i] != "") { newArray.push(actual[i]); } } return newArray; } function showErrorNotificationAssignTeams(errorMsg, teamRowNumber, type) { var fieldType = ""; if (type == "user") { fieldType = "txtUser"; } else if (type == "role") { fieldType = "roleUser" } else if (type == "perm") { fieldType = "permUser"; } else if (type == "btnCreateMatter") { fieldType = "btnCreateMatter"; teamRowNumber = ""; } var temp = angular.element('#' + fieldType + teamRowNumber).parent().position(); var matterErrorEle = document.getElementById("errorBlock"); var matterErrorTrinageleBlockEle = document.getElementById("errTrinagleBlock"); var matterErrorTrinagleBorderEle = document.getElementById("errTrinagleBroderBlock"); var matterErrorTextEle = document.getElementById("errText"); matterErrorEle.className = ""; matterErrorTrinageleBlockEle.className = ""; matterErrorTrinagleBorderEle.className = ""; matterErrorTextEle.className = ""; matterErrorEle.classList.add("errorPopUp"); matterErrorTrinageleBlockEle.classList.add("errTriangle"); matterErrorTrinageleBlockEle.classList.add("popUpFloatLeft"); matterErrorTrinagleBorderEle.classList.add("errTriangleBorder"); matterErrorTrinagleBorderEle.classList.add("popUpFloatLeft"); matterErrorTextEle.classList.add("errText"); matterErrorTextEle.classList.add("popUpFloatRight"); var errPopUpCAttorny = document.createElement('style'), errTringleBlockCAttorny = document.createElement('style'), errTringleBorderCAttorny = document.createElement('style'), errTextMatterCAttorny = document.createElement('style'); errPopUpCAttorny.type = 'text/css'; errTringleBlockCAttorny.type = 'text/css'; errTringleBorderCAttorny.type = 'text/css'; errTextMatterCAttorny.type = 'text/css'; var width = GetWidth(); var x = 0, y = 0; if (width > 734) { y = temp.top - 85, x = temp.left - 25; } else { y = temp.offsetTop, x = temp.offsetLeft; } //if (width > 734) { // console.log(posEle.x); // console.log(posEle.y); // y = temp.offsetTop-9 , x = temp.offsetLeft +405; //} //else { // y = temp.offsetTop + 57, x = temp.offsetLeft + 10; //} errPopUpCAttorny.innerHTML = ".errPopUpCAttorny{top:" + y + "px;left:" + x + "px;}"; errTringleBlockCAttorny.innerHTML = "{min-height: 40px;top: 17px !important;left: 24px;width:100%}"; errTringleBorderCAttorny.innerHTML = "{min-height: 40px,top: 17px !important;left: 24px;width:100%}"; errTextMatterCAttorny.innerHTML = "{min-height:40px;top:21px !important;left: 24px;width:100%}"; document.getElementsByTagName('head')[0].appendChild(errPopUpCAttorny); document.getElementsByTagName('head')[0].appendChild(errTringleBlockCAttorny); document.getElementsByTagName('head')[0].appendChild(errTringleBorderCAttorny); document.getElementsByTagName('head')[0].appendChild(errTextMatterCAttorny); cm.errTextMsg = errorMsg; cm.errorPopUpBlock = true; matterErrorEle.classList.add("errPopUpCAttorny"); matterErrorTrinageleBlockEle.classList.add("errTringleBlockCAttorny"); matterErrorTrinagleBorderEle.classList.add("errTringleBorderCAttorny"); matterErrorTextEle.classList.add("errTextMatterCAttorny"); } //#region Utilty functions var getUserName = function (sUserEmails, bIsName) { "use strict"; var arrUserNames = [], sEmail = "", oEmailRegex = new RegExp("^[\\s]*\\w+([-+.']\\w+)*@\\w+([-.]\\w+)*\\.\\w+([-.]\\w+)*[\\s]*$"); if (sUserEmails && null !== sUserEmails && "" !== sUserEmails) { arrUserNames = sUserEmails.split(";"); for (var iIterator = 0; iIterator < arrUserNames.length - 1; iIterator++) { if (arrUserNames[iIterator] && null !== arrUserNames[iIterator] && "" !== arrUserNames[iIterator]) { if (-1 !== arrUserNames[iIterator].lastIndexOf("(")) { sEmail = $.trim(arrUserNames[iIterator].substring(arrUserNames[iIterator].lastIndexOf("(") + 1, arrUserNames[iIterator].lastIndexOf(")"))); if (oEmailRegex.test(sEmail)) { arrUserNames[iIterator] = bIsName ? $.trim(arrUserNames[iIterator].substring(0, arrUserNames[iIterator].lastIndexOf("("))) : sEmail; } } } } } return arrUserNames; } cm.oMandatoryRoleNames = []; function validateTeamAssigmentRole() { var oAssignList = cm.assignPermissionTeams , iExpectedCount = 0, iActualCount = 0, iIterator = 0, iLength = cm.assignRoles.length; for (iIterator = 0; iIterator < iLength; iIterator++) { if (cm.assignRoles[iIterator].mandatory) { iExpectedCount++; cm.oMandatoryRoleNames.push(cm.assignRoles[iIterator].name); } } angular.forEach(oAssignList, function (oItem) { if (true == oItem.assignedRole.mandatory) { iActualCount++; } }); if (iExpectedCount <= iActualCount) { return true; } return false; } /* Function to validate Permission */ function validatePermission() { "use strict"; var oPermissionList = cm.assignPermissionTeams, bIsFullControl = false; var Edit_Matter_Mandatory_Permission_Level = "Full Control"; angular.forEach(oPermissionList, function (oPermissionList, oPermissionListItem) { if (oPermissionListItem) { if (Edit_Matter_Mandatory_Permission_Level === oPermissionListItem.assignedPermission.name) { bIsFullControl = true; } } }); return bIsFullControl; } function getUserEmail(arrUsersEmails) { var sEmail = ""; if (arrUsersEmails && 0 < arrUsersEmails.length) { for (var nCount = 0; nCount < arrUsersEmails.length; nCount++) { if ("" !== arrUsersEmails[nCount]) { sEmail = arrUsersEmails[nCount]; } } } return sEmail; } function getArrAssignedUserNamesAndEmails() { cm.arrAssignedUserName = [], cm.arrAssignedUserEmails = [], cm.userIDs = []; var count = 1; angular.forEach(cm.assignPermissionTeams, function (team) { //For loop cm.arrAssignedUserName.push(getUserName(team.assignedUser + ";", true)); cm.arrAssignedUserEmails.push(getUserName(team.assignedUser + ";", false)); cm.userIDs.push("txtAssign" + count++); }); } function getAssignedUserRoles() { "use strict"; var arrAssigneTeams = cm.assignPermissionTeams, nCount = 0, nlength, arrRoles = []; if (arrAssigneTeams) { nlength = arrAssigneTeams.length; for (nCount = 0; nCount < nlength; nCount++) { if (arrAssigneTeams[nCount] && arrAssigneTeams[nCount].assignedRole) { if (arrAssigneTeams[nCount].assignedRole && arrAssigneTeams[nCount].assignedRole.name) { if ("" !== arrAssigneTeams[nCount].assignedRole.name) { arrRoles.push(arrAssigneTeams[nCount].assignedRole.name); } } } } } return arrRoles; } function getAssignedUserPermissions() { "use strict"; var arrAssigneTeams = cm.assignPermissionTeams, nCount = 0, nlength, arrAssignRoles, arrPermissions = []; if (arrAssigneTeams) { nlength = arrAssigneTeams.length; for (nCount = 0; nCount < nlength; nCount++) { if (arrAssigneTeams[nCount] && arrAssigneTeams[nCount].assignedPermission) { if (arrAssigneTeams[nCount].assignedPermission && arrAssigneTeams[nCount].assignedPermission.name) { if ("" !== arrAssigneTeams[nCount].assignedPermission.name) { arrPermissions.push(arrAssigneTeams[nCount].assignedPermission.name); } } } } } return arrPermissions; } var validateAttornyUserRolesAndPermissins = function () { var responsibleAttorny = 0, fullControl = 0; if (!cm.showRoles) { assignDefaultRolesToTeamMembers(); } for (var iCount = 0; iCount < cm.assignPermissionTeams.length; iCount++) { if ("" !== cm.assignPermissionTeams[iCount].assignedUser) { if (cm.assignPermissionTeams[iCount].assignedRole && "" !== cm.assignPermissionTeams[iCount].assignedRole.name) { if (cm.assignPermissionTeams[iCount].assignedPermission && "" != cm.assignPermissionTeams[iCount].assignedPermission.name) { if (cm.assignPermissionTeams[iCount].assignedRole.mandatory) { responsibleAttorny++; } if (cm.assignPermissionTeams[iCount].assignedPermission.name == "Full Control") { fullControl++; } } else { cm.errTextMsg = "Please provide at least one permission on this matter. "; cm.errorBorder = ""; cm.errorPopUpBlock = true; return false; } } else { cm.errorPopUpBlock = true; cm.errTextMsg = "Enter at least one role for this matter."; cm.errorBorder = ""; return false; } } else { cm.errTextMsg = cm.assignPermissionTeams[iCount].assignedRole.name + " cannot be empty."; cm.errorBorder = ""; showErrorNotificationAssignTeams(cm.errTextMsg, cm.assignPermissionTeams[iCount].assigneTeamRowNumber, "user"); cm.errorPopUpBlock = true; return false; } } if (responsibleAttorny >= 1) { if (fullControl >= 1) { return true; } else { cm.errTextMsg = "Please provide at least one user who has Full Control permission on this matter."; cm.errorBorder = "permUser1"; showErrorNotificationAssignTeams(cm.errTextMsg, 1, "perm"); cm.errorPopUpBlock = true; return false; } } else { cm.errTextMsg = "Enter at least one Responsible Attorney for this matter."; cm.errorBorder = "roleUser1"; showErrorNotificationAssignTeams(cm.errTextMsg, 1, "role"); cm.errorPopUpBlock = true; return false; } } //setting the team roles to default i.e responsible attrony when showRole is false from default settings. function assignDefaultRolesToTeamMembers() { if (!cm.showRoles) { var arrAssigneTeams = cm.assignPermissionTeams, nCount = 0, nlength; if (arrAssigneTeams) { nlength = arrAssigneTeams.length; for (nCount = 0; nCount < nlength; nCount++) { if (arrAssigneTeams[nCount] && arrAssigneTeams[nCount].assignedUser && "" !== arrAssigneTeams[nCount].assignedUser) { angular.forEach(cm.assignRoles, function (role) { if (role.mandatory) { arrAssigneTeams[nCount].assignedRole = role; } }); } } } } } cm.onSelect = function ($item, $model, $label, value, fucnValue, $event, username) { var typeheadelelen = angular.element('.dropdown-menu li').length; var noresults = true; if (typeheadelelen == 1) { if (angular.element('.dropdown-menu li a')[0]) { if (angular.element('.dropdown-menu li a')[0].innerHTML == "No results found") { noresults = false; if ($event.keyCode == 9 || $event.keyCode == 13) { cm.user = angular.element('#' + $event.currentTarget.id).val(); } } } } if ($item && $item.name !== "No results found") { if (value == "team") { // $label.assignedUser = $item.name + '(' + $item.email + ');'; if ($label.assignedAllUserNamesAndEmails && $label.assignedAllUserNamesAndEmails.indexOf(';') > -1) { $label.assignedUser = $item.name + '(' + $item.email + ');'; if ($label.assignedAllUserNamesAndEmails.indexOf($item.name) == -1) { $label.assignedAllUserNamesAndEmails = $label.assignedAllUserNamesAndEmails + $label.assignedUser; $label.assignedUser = $label.assignedAllUserNamesAndEmails; } else { $label.assignedUser = $label.assignedAllUserNamesAndEmails; } } else { $label.assignedUser = $item.name + '(' + $item.email + ');'; $label.assignedAllUserNamesAndEmails = $item.name + '(' + $item.email + ');'; } cm.typehead = false; cm.notificationPopUpBlock = false; } if (-1 == cm.oSiteUsers.indexOf($item.email)) { cm.oSiteUsers.push($item.email); } $label.userConfirmation = false; cm.checkUserExists($label); } else { if (fucnValue == "on-blurr") { cm.user = username; $label.assignedAllUserNamesAndEmails = $label.assignedUser; var userEmailTxt = ""; var userNames = getUserName($label.assignedUser, true); var userEmails = getUserName($label.assignedUser, false); var exsistingTeams = []; for (var i = 0; i < userEmails.length; i++) { if (userEmails[i] != "" && validateEmail(userEmails[i])) { angular.forEach($label.teamUsers, function (team) { if (team.userName == userEmails[i]) { exsistingTeams.push(team); } }); var userNameAndEmailTxt = (userNames[i] == userEmails[i]) ? userEmails[i] : userNames[i] + "(" + userEmails[i] + ")"; userEmailTxt = userEmailTxt + userNameAndEmailTxt + ";"; } } $label.assignedAllUserNamesAndEmails = userEmailTxt; $label.teamUsers = exsistingTeams; } if (fucnValue == "on-blurr" && typeheadelelen == 0 && noresults) { cm.checkUserExists($label, $event); } if (!noresults) { if (value == "team") { $label.assignedUser = ""; $label.assignedUser = cm.user; } } } } cm.confirmUser = function (confirmUser) { if (confirmUser) { cm.notificationPopUpBlock = false; cm.notificationBorder = ""; var userEmail = getUserName(cm.textInputUser.assignedUser, false); userEmail = cleanArray(userEmail); for (var i = 0; i < userEmail.length; i++) { if (i == cm.currentExternalUser.userIndex && userEmail[i] == cm.currentExternalUser.userName && userEmail[i] != "") { angular.forEach(cm.textInputUser.teamUsers, function (teamUser) { if (teamUser.userName == userEmail[i]) { teamUser.userConfirmation = true; teamUser.userExsists = teamUser.userExsists; } }); } } cm.textInputUser.userConfirmation = true; cm.getExternalUserNotification = true; angular.element('#txtUser' + cm.textInputUser.assigneTeamRowNumber).attr('uservalue', cm.textInputUser.assignedUser); angular.element('#txtUser' + cm.textInputUser.assigneTeamRowNumber).attr('confirm', "true"); angular.element('#txtUser' + cm.textInputUser.assigneTeamRowNumber).css('border-color', '#ccc'); } else { cm.notificationPopUpBlock = false; cm.getExternalUserNotification = true; var userEmail = getUserName(cm.textInputUser.assignedUser, false); var userNames = getUserName(cm.textInputUser.assignedUser, true); userEmail = cleanArray(userEmail); userNames = cleanArray(userNames); var updatedUserEmail = ""; for (var i = 0; i < userEmail.length; i++) { if (i != cm.currentExternalUser.userIndex && userEmail[i] != cm.currentExternalUser.userName && userEmail[i] != "") { if (userNames[i] == userEmail[i]) { updatedUserEmail = updatedUserEmail + userEmail[i] + ";"; } else { updatedUserEmail = updatedUserEmail + userNames[i] + "(" + userEmail[i] + ");"; } } } cm.textInputUser.assignedUser = updatedUserEmail; cm.textInputUser.userExsists = false; cm.textInputUser.userConfirmation = false; cm.notificationBorder = ""; } angular.forEach(cm.assignPermissionTeams, function (team) { var keepGoing = true; if (keepGoing) { if (team.assignedUser && team.assignedUser != "") {//For loop var usersEmails = getUserName(team.assignedUser, false); usersEmails = cleanArray(usersEmails); for (var j = 0; j < usersEmails.length; j++) { angular.forEach(team.teamUsers, function (teamUser) { if (keepGoing) { if (teamUser.userName == usersEmails[j]) { if (!teamUser.userConfirmation) { cm.textInputUser = team; cm.currentExternalUser.rowNumber = team.assigneTeamRowNumber; cm.currentExternalUser.userIndex = j; cm.currentExternalUser.userName = teamUser.userName; showNotificatoinMessages(team.assigneTeamRowNumber); cm.notificationPopUpBlock = true; keepGoing = false; return false; } } } }); } } } }); } function showNotificatoinMessages(teamRowNumber) { var temp = document.getElementById('txtUser' + teamRowNumber); var notificationEle = document.getElementById("notificationBlock"); var notificationTrinageleBlockEle = document.getElementById("notificatoinTrinagleBlock"); var notificationTrinagleBorderEle = document.getElementById("notificationTrinagleBroderBlock"); var notificationTextEle = document.getElementById("notificationText"); notificationEle.className = ""; notificationTrinageleBlockEle.className = ""; notificationTrinagleBorderEle.className = ""; notificationTextEle.className = ""; notificationEle.classList.add("notificationPopUp"); notificationTrinageleBlockEle.classList.add("notificatonTriangle"); notificationTrinageleBlockEle.classList.add("popUpFloatLeft"); notificationTrinagleBorderEle.classList.add("notificationTriangleBorder"); notificationTrinagleBorderEle.classList.add("popUpFloatLeft"); notificationTextEle.classList.add("notificatonText"); notificationTextEle.classList.add("popUpFloatRight"); var notifcationPopUpCAttorny = document.createElement('style'), notifcationTringleBlockCAttorny = document.createElement('style'), notifcationTringleBorderCAttorny = document.createElement('style'), notifcationTextMatterCAttorny = document.createElement('style'); notifcationPopUpCAttorny.type = 'text/css'; notifcationTringleBlockCAttorny.type = 'text/css'; notifcationTringleBorderCAttorny.type = 'text/css'; notifcationTextMatterCAttorny.type = 'text/css'; var width = GetWidth(); var x = 0, y = 0; if (width > 734) { y = temp.offsetTop + 53, x = temp.offsetLeft + 70; } else { y = temp.offsetTop, x = temp.offsetLeft; } cm.notificationBorder = "txtUser" + teamRowNumber; notifcationPopUpCAttorny.innerHTML = ".notifcationPopUpCAttorny{top:" + y + "px;left:" + x + "px;}"; notifcationTringleBlockCAttorny.innerHTML = "{min-height: 40px;top: 17px !important;left: 24px;width:100%}"; notifcationTringleBorderCAttorny.innerHTML = "{min-height: 40px,top: 17px !important;left: 24px;width:100%}"; notifcationTextMatterCAttorny.innerHTML = "{min-height:40px;top:21px !important;left: 24px;width:100%}"; document.getElementsByTagName('head')[0].appendChild(notifcationPopUpCAttorny); document.getElementsByTagName('head')[0].appendChild(notifcationTringleBlockCAttorny); document.getElementsByTagName('head')[0].appendChild(notifcationTringleBorderCAttorny); document.getElementsByTagName('head')[0].appendChild(notifcationTextMatterCAttorny); notificationEle.classList.add("notifcationPopUpCAttorny"); notificationTrinageleBlockEle.classList.add("notifcationTringleBlockCAttorny"); notificationTrinagleBorderEle.classList.add("notifcationTringleBorderCAttorny"); notificationTextEle.classList.add("notifcationTextCAttorny"); } function GetWidth() { "use strict"; var x = 0; if (self.innerHeight) { x = self.innerWidth; } else if (document.documentElement && document.documentElement.clientHeight) { x = document.documentElement.clientWidth; } else if (document.body) { x = document.body.clientWidth; } return x; } function validateUsers() { var keepGoing = true; var blockedUserEmail = cm.matterProperties.matterObject.blockUserNames; angular.forEach(cm.assignPermissionTeams, function (team) { if (keepGoing) { if (team.assignedUser && team.assignedUser != "") {//For loop var usersEmails = getUserName(team.assignedUser, false); usersEmails = cleanArray(usersEmails); if (usersEmails.length !== team.teamUsers.length) { cm.checkUserExists(team); keepGoing = false; return false; } else { for (var j = 0; j < usersEmails.length; j++) { angular.forEach(team.teamUsers, function (teamUser) { if (keepGoing) { if (teamUser.userName == usersEmails[j]) { if (teamUser.userExsists) { if (-1 == cm.oSiteUsers.indexOf(usersEmails[j])) { // cm.blockedUserName.trim() cm.errTextMsg = "Please enter valid team members."; cm.errorBorder = ""; cm.errorPopUpBlock = true; showErrorNotificationAssignTeams(cm.errTextMsg, team.assigneTeamRowNumber, "user") cm.errorBorder = "txtUser" + team.assigneTeamRowNumber; keepGoing = false; return false; } if (blockedUserEmail && blockedUserEmail != "") { blockedUserEmail = cleanArray(blockedUserEmail); for (var i = 0; i < blockedUserEmail.length; i++) { if (usersEmails[j] == blockedUserEmail[i]) { cm.errTextMsg = "Please enter individual who is not conflicted."; cm.errorBorder = ""; cm.errorPopUpBlock = true; showErrorNotificationAssignTeams(cm.errTextMsg, team.assigneTeamRowNumber, "user") cm.errorBorder = "txtUser" + team.assigneTeamRowNumber; keepGoing = false; return false; } } } } else { if (!teamUser.userConfirmation) { cm.textInputUser = team; cm.currentExternalUser.rowNumber = team.assigneTeamRowNumber; cm.currentExternalUser.userIndex = j; cm.currentExternalUser.userName = teamUser.userName; showNotificatoinMessages(team.assigneTeamRowNumber); cm.notificationPopUpBlock = true; //cm.checkUserExists(team); //if (!cm.invalidUserCheck) { keepGoing = false; return false; //} } } } } }); } } } else { showErrorNotificationAssignTeams(team.assignedRole.name + " cannot be empty", team.assigneTeamRowNumber, "user") cm.errorBorder = "txtUser" + team.assigneTeamRowNumber; keepGoing = false; return false; } } }); if (keepGoing) { return true; } else { return false; } } cm.UpdateMatter = function ($event) { cm.popupContainerBackground = "Show"; if ($event) { $event.stopPropagation(); } var arrUserNames = [], arrUserEmails = [], arrTeamMembers = [], roleInformation = {}, arrReadOnlyUsers = [], sResponsibleAttorney = [], sResponsibleAttorneyEmail = []; var arrBlockUserNames = cm.matterProperties.matterObject.blockUserNames ? cm.matterProperties.matterObject.blockUserNames : "" var attornyCheck = validateAttornyUserRolesAndPermissins($event); var validUsersCheck = validateUsers(); if (validUsersCheck) { var checkUserDExists = validateCheckUserExisits(); if (attornyCheck && validUsersCheck && checkUserDExists) { angular.forEach(cm.assignPermissionTeams, function (item) { if ("" !== item.assignedRole && "" !== item.assignedPermission) { if (roleInformation.hasOwnProperty(item.assignedRole.name)) { roleInformation[item.assignedRole.name] = roleInformation[item.assignedRole.name] + ";" + item.assignedUser; } else { roleInformation[item.assignedRole.name] = item.assignedUser; } } }); angular.forEach(cm.assignPermissionTeams, function (item) { arrUserNames.push(getUserName(item.assignedUser.trim() + ";", true)); arrUserEmails.push(getUserName(item.assignedUser.trim() + ";", false)); arrTeamMembers.push(getUserName(item.assignedUser.trim() + ";", true).join(";")); var User_Upload_Permissions = "Read"; angular.forEach(cm.assignPermissionTeams, function (item) { if (item.assignedPermission.name.toLowerCase() === User_Upload_Permissions.toLowerCase()) { arrReadOnlyUsers.push(getUserName(item.assignedRole.name.trim() + ";", false).join(";"), ";"); } }); }); validateTeamAssigmentRole(); getArrAssignedUserNamesAndEmails(); var arrRoles = getAssignedUserRoles(); var arrPermissions = getAssignedUserPermissions(); angular.forEach(cm.assignPermissionTeams, function (item) { if (1 <= cm.assignPermissionTeams.length) { if ("" !== item.assignedRole && "" !== item.assignedPermission) { if (-1 !== cm.oMandatoryRoleNames.indexOf(item.assignedRole.name)) { sResponsibleAttorney.push(getUserName(item.assignedUser + ";", true).join(";")); sResponsibleAttorneyEmail.push(getUserName(item.assignedUser + ";", false).join(";")); } } } }); var updatedMatterUsers = { Client: { Url: cm.clientUrl, Id: "", Name: "" }, Matter: { Name: cm.matterName, BlockUserNames: arrBlockUserNames, AssignUserNames: arrUserNames, AssignUserEmails: arrUserEmails, Permissions: arrPermissions, Roles: arrRoles, Conflict: { Identified: cm.sConflictScenario }, FolderNames: [], DefaultContentType: "", ContentTypes: [], Description: "", Id: "", MatterGuid: cm.matterProperties.matterObject.matterGuid }, MatterDetails: { PracticeGroup: "", AreaOfLaw: "", SubareaOfLaw: "", ResponsibleAttorney: sResponsibleAttorney.join(";").replace(/;;/g, ";"), ResponsibleAttorneyEmail: sResponsibleAttorneyEmail.join(";").replace(/;;/g, ";"), UploadBlockedUsers: arrReadOnlyUsers, TeamMembers: arrTeamMembers.join(";"), RoleInformation: JSON.stringify(roleInformation) }, EditMode: cm.isEdit, UserIds: cm.userIDs, SerializeMatter: "", Status: "" } updateMatterPermissions(updatedMatterUsers, function (response) { console.log(response); cm.popupContainerBackground = "hide"; // cm.errTextMsg = "Error in updating matter: Incorrect inputs."; // showErrorNotificationAssignTeams(cm.errTextMsg, "", "btnCreateMatter"); // cm.errorBorder = ""; // cm.errorPopUpBlock = true; // cm.popupContainerBackground = "hide"; }); } else { cm.popupContainerBackground = "hide"; } } else { cm.popupContainerBackground = "hide"; } } function se
{ angular.forEach(cm.assignPermissionTeams, function (team) { if (team.userConfirmation) { angular.element('#txtUser' + team.assigneTeamRowNumber).attr('confirm', "true"); } }); } function validateCheckUserExisits() { var validUsers = false; var keepGoing = true; setTeamConfirmationValues(); angular.forEach(cm.assignPermissionTeams, function (team) { if (keepGoing) { var userVal = angular.element('#txtUser' + team.assigneTeamRowNumber).attr('confirm'); if (userVal == "false") { cm.textInputUser = team; showNotificatoinMessages(team.assigneTeamRowNumber); cm.notificationPopUpBlock = true; } validUsers = (userVal == "false") ? false : true; if (!validUsers) { keepGoing = false; } } }); return validUsers; } //#endregion }]); })();
tTeamConfirmationValues()
iam-update-bounded-context-by-id.handler.ts
import { Injectable } from '@nestjs/common'; import { ICommandBus, IQueryBus, QueryStatement } from 'aurora-ts-core'; // @apps import { FindBoundedContextByIdQuery } from '@apps/iam/bounded-context/application/find/find-bounded-context-by-id.query'; import { UpdateBoundedContextByIdCommand } from '@apps/iam/bounded-context/application/update/update-bounded-context-by-id.command'; import { IamBoundedContext, IamUpdateBoundedContextByIdInput } from '../../../../graphql'; import { IamBoundedContextDto, IamUpdateBoundedContextByIdDto } from '../dto'; @Injectable() export class
{ constructor( private readonly commandBus: ICommandBus, private readonly queryBus: IQueryBus, ) {} async main( payload: IamUpdateBoundedContextByIdInput | IamUpdateBoundedContextByIdDto, constraint?: QueryStatement, timezone?: string, ): Promise<IamBoundedContext | IamBoundedContextDto> { await this.commandBus.dispatch(new UpdateBoundedContextByIdCommand(payload, constraint, { timezone })); return await this.queryBus.ask(new FindBoundedContextByIdQuery(payload.id, constraint, { timezone })); } }
IamUpdateBoundedContextByIdHandler
user.js
"use strict"; const router = require("express").Router(); const auth = require(process.cwd() + "/config/lib/authorisation.js"); const User = require("mongoose").model("account"); router.delete("/API/user", auth.isAuthorised("ALTER_USERS"), function (req, res, next) { if (req.body.id == req.user._id) { res.sendStatus(400); } else { User.findOneAndRemove({ "_id": req.body.id }, function (err) { if (err) { err.shouldReload = true; err.status = 400; next(err); } else { res.sendStatus(200); } }); } }); router.post("/API/user", auth.isAuthorised("ALTER_USERS"), function (req, res, next) { const newAccount = new User(req.body); newAccount.save(function (err) { if (err) { err.shouldReload = true; err.status = 400; next(err); } else { res.redirect("/options"); }
if (req.query.id) { req.query._id = req.query.id; delete req.query.id; } const count = parseInt(req.query.count); delete req.query.count; User .find(req.query, ["_id", "username", "rank", "school"]) // Do not show hashed password .limit(count || 20) .exec(function (err, users) { if (err) { err.shouldReload = true; err.status = 400; next(err); } else { res.send(users); } }); }); module.exports = router;
}); }); router.get("/API/user", auth.isAuthorised("VIEW_OPTIONS"), function (req, res, next) {
index.js
import React from "react" import { Link } from "gatsby" import Layout from "../components/layout" import Image from "../components/image" import SEO from "../components/seo" import Kelsie from "../images/kelsie-adorable.jpg" import Navigation from "../components/navigation" const IndexPage = () => ( <Layout> <Navigation /> <SEO title="Home" /> <section class="summary"> <h2>Professional Summary</h2> <img class="profile-img" src={Kelsie} alt="Classic Kelse" /> {/*Summary*/} <h3>Summary</h3> <p>Energetic, creative, self-motivated individual with a passion for winemaking. Outgoing, team player with excellent interpersonal skills allowing for successful interactions with customers in varying situations. Interacts easily with people of diverse backgrounds, cultures, and professions. Possesses effective speaking and listening skills as well as outstanding verbal and written communication skills. Works well with others and is able to complete tasks with little or no supervision.</p> </section> <section class="content-wrap skills"> {/*Job Details*/} <h2>Skills</h2> <ul class="nobullets"> <li>Pruning; cane pruned vines in the winter</li> <li>Harvest experience; harvested red and white varieties in a vineyard</li> <li>Marketing; advertised companies and studied marketing in college</li> <li>Customer relations; worked and communicated with customers</li> </ul> </section> <div class="banner"> </div> <section> {/*Experience*/} <h2>Experience</h2> <div class="row-1"> <div class="column"> <h3>Vineyard Worker</h3> <p>February 2018-present</p> <p class="bold">Wawawai Vineyard and Winery</p> </div> <div class="column"> <ul> <li>Observed vines to determine those in need of pruning</li> <li>Performed Cane pruning</li> <li>Installed new trellis, adjusted or repaired existing trellis</li>
</div> <hr /> <div class="row-1"> <div class="column"> <h3>Secretary</h3> <p>May 2017-present</p> <p class="bold">WSU Viticulture and Enology Club </p> </div> <div class="column"> <ul> <li>Recorded meeting notes</li> <li>Assisted other officers</li> <li>Coordinated field trips and enology related activities</li> <li>Planned fundraising events</li> </ul> </div> </div> <div class="row-1"> <div class="column"> <h3>Sales and Marketing Representative</h3> <p>June 2015-August 2015</p> <p class="bold">Margaritaville Marketing</p> </div> <div class="column"> <ul> <li>Assisted in launch of new location</li> <li>Solicited prospective customers</li> <li>Advertised business to locals and tourists</li> <li>Implemented a coupon campaign and oversaw distribution</li> </ul> </div> </div> </section> {/*Education*/} <section> <h2 id="education" class="education">Education</h2> {/*School 1 details*/} <h3>Bachelor of Science, Viticulture and Enology</h3> <p>Washington State University, 2019</p> <p>14 week full-time program.</p> </section> </Layout> ) export default IndexPage
<li>Assisted with establishing new vines</li> <li>Harvested multiple varieties of red and white grapes</li> </ul> </div>
user.type.js
"use strict"; var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; var __metadata = (this && this.__metadata) || function (k, v) { if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
exports.UserType = void 0; const graphql_1 = require("@nestjs/graphql"); const user_role_enum_1 = require("../entities/user-role.enum"); let UserType = class UserType { }; __decorate([ graphql_1.Field((type) => graphql_1.Int), __metadata("design:type", Number) ], UserType.prototype, "id", void 0); __decorate([ graphql_1.Field(), __metadata("design:type", String) ], UserType.prototype, "first_name", void 0); __decorate([ graphql_1.Field(), __metadata("design:type", String) ], UserType.prototype, "last_name", void 0); __decorate([ graphql_1.Field(), __metadata("design:type", String) ], UserType.prototype, "phone", void 0); __decorate([ graphql_1.Field(), __metadata("design:type", String) ], UserType.prototype, "email", void 0); __decorate([ graphql_1.Field(), __metadata("design:type", String) ], UserType.prototype, "username", void 0); __decorate([ graphql_1.Field(), __metadata("design:type", String) ], UserType.prototype, "password", void 0); __decorate([ graphql_1.Field(), __metadata("design:type", Boolean) ], UserType.prototype, "is_active", void 0); __decorate([ graphql_1.Field(), __metadata("design:type", String) ], UserType.prototype, "role", void 0); UserType = __decorate([ graphql_1.ObjectType('User') ], UserType); exports.UserType = UserType; //# sourceMappingURL=user.type.js.map
}; Object.defineProperty(exports, "__esModule", { value: true });
create_test.go
// Copyright 2020 MongoDB Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build unit // +build unit package clusters import ( "testing" "github.com/golang/mock/gomock" "github.com/mongodb/mongocli/internal/mocks" "github.com/mongodb/mongocli/internal/test/fixture" "github.com/spf13/afero" ) func TestCreate_Run(t *testing.T)
{ ctrl := gomock.NewController(t) mockStore := mocks.NewMockAutomationPatcher(ctrl) defer ctrl.Finish() expected := fixture.AutomationConfig() appFS := afero.NewMemMapFs() // create test file fileYML := ` --- name: "cluster_2" version: 4.2.2 featureCompatibilityVersion: 4.2 processes: - hostname: host0 dbPath: /data/cluster_2/rs1 logPath: /data/cluster_2/rs1/mongodb.log priority: 1 votes: 1 port: 29010 - hostname: host1 dbPath: /data/cluster_2/rs2 logPath: /data/cluster_2/rs2/mongodb.log priority: 1 votes: 1 port: 29020 - hostname: host2 dbPath: /data/cluster_2/rs3 logPath: /data/cluster_2/rs3/mongodb.log priority: 1 votes: 1 port: 29030` fileName := "test_om_create.yml" _ = afero.WriteFile(appFS, fileName, []byte(fileYML), 0600) createOpts := &CreateOpts{ store: mockStore, fs: appFS, filename: fileName, } mockStore. EXPECT(). GetAutomationConfig(createOpts.ProjectID). Return(expected, nil). Times(1) mockStore. EXPECT(). UpdateAutomationConfig(createOpts.ProjectID, expected). Return(nil). Times(1) if err := createOpts.Run(); err != nil { t.Fatalf("Run() unexpected error: %v", err) } }
utils.ts
import { send } from '@sapphire/plugin-editable-commands'; import { Message, MessageEmbed } from 'discord.js'; import { RandomLoadingMessage } from './constants'; /** * Picks a random item from an array * @param array The array to pick a random item from * @example * const randomEntry = pickRandom([1, 2, 3, 4]) // 1
return array[Math.floor(Math.random() * length)]; } /** * Sends a loading message to the current channel * @param message The message data for which to send the loading message */ export function sendLoadingMessage(message: Message): Promise<typeof message> { return send(message, { embeds: [new MessageEmbed().setDescription(pickRandom(RandomLoadingMessage)).setColor('#FF0000')] }); }
*/ export function pickRandom<T>(array: readonly T[]): T { const { length } = array;
mod.rs
use block_powchain::{Block, BlockHeader, BlockInterlink, BlockBody}; use account::{Account, AccountsList, Receipts}; use std::path::Path; use std::fs::OpenOptions; use beserial::{Serialize, Deserialize, SerializingError}; use failure::Fail; use keys::Address; use std::io::{Error as IoError, Write}; use hash::Blake2bHash; use hex::FromHexError; pub struct PowChainGenesis { genesis_block: Block, genesis_hash: Blake2bHash, genesis_accounts: &'static str, } #[derive(Debug, Fail)] pub enum PowChainError { #[fail(display = "Serialization failed")] SerializingError(#[cause] SerializingError), #[fail(display = "I/O error")] IoError(#[cause] IoError), #[fail(display = "Invalid hex encoding")] HexEncodingError(#[cause] FromHexError) } impl From<SerializingError> for PowChainError { fn from(e: SerializingError) -> Self { PowChainError::SerializingError(e) } } impl From<IoError> for PowChainError { fn from(e: IoError) -> Self { PowChainError::IoError(e) } } impl From<FromHexError> for PowChainError { fn from(e: FromHexError) -> Self { PowChainError::HexEncodingError(e) } } impl PowChainGenesis { pub fn write_to_files<P: AsRef<Path>>(&self, directory: P) -> Result<(), PowChainError> { let block_path = directory.as_ref().join("block.dat"); info!("Writing block to {}", block_path.display()); let mut file = OpenOptions::new().create(true).write(true).open(&block_path)?; self.generate_block()?.serialize(&mut file)?; let accounts_path = directory.as_ref().join("accounts.dat"); info!("Writing accounts to {}", accounts_path.display()); let mut file = OpenOptions::new().create(true).write(true).open(&accounts_path)?; //AccountsList(self.generate_accounts()?).serialize(&mut file)?; file.write_all(&hex::decode(self.genesis_accounts)?)?; Ok(()) } pub fn generate_block(&self) -> Result<Block, PowChainError> { Ok(self.genesis_block.clone()) } pub fn generate_genesis_hash(&self) -> Result<Blake2bHash, PowChainError> { Ok(self.genesis_hash.clone()) } pub fn generate_accounts(&self) -> Result<Vec<(Address, Account)>, PowChainError> { let accounts: AccountsList = Deserialize::deserialize_from_vec(&hex::decode(self.genesis_accounts)?)?; Ok(accounts.0) } pub fn main() -> Self { Self { genesis_block: Block { header: BlockHeader { version: 1, prev_hash: [0u8; 32].into(), interlink_hash: [0u8; 32].into(), body_hash: "7cda9a7fdf06655905ae5dbd9c535451471b078fa6f3df0e287e5b0fb47a573a".into(), accounts_hash: "1fefd44f1fa97185fda21e957545c97dc7643fa7e4efdd86e0aa4244d1e0bc5c".into(), n_bits: 0x1f01_0000.into(), height: 1, timestamp: 1_523_727_000, nonce: 137_689, }, interlink: BlockInterlink::new(vec![], &[0u8; 32].into()), body: Some(BlockBody { miner: [0u8; Address::SIZE].into(), extra_data: b"love ai amor mohabbat hubun cinta lyubov bhalabasa amour kauna pi'ara liebe eshq upendo prema amore katresnan sarang anpu prema yeu".to_vec(), transactions: vec![], receipts: Receipts::default(), }), }, genesis_hash: "264aaf8a4f9828a76c550635da078eb466306a189fcc03710bee9f649c869d12".into(), genesis_accounts: {"\ 05740fe832581bf6a0892412acfb9651b451c509831d0000000005dbf2a54718ce70a65cb6c7e08a\ bd6b24373b33574e16e70d00000000046458a440306108b9072bfcb59984fad0ecba5e6c1de46187\ 01000000204faa14e0200020d0290be350f9dd1263c9d915cbe23bfb3b000000010001fa40000000\ 1027d50a70000000204faa14e05a864ffbccc17d57674ee0db678cc347ec9281ea00000000002ea0\ 3010861bc9dfd24ba6a9c25daa883b6443b04b566c030000000000083d871088b5db434af0385e24\ dcf12676a24191f4f062c000000000011f6a03f8a1c62efca1ccaa68b433be7518a42f5245e1d2db\ 0000000004e3b29200a6dd0618146a400ae1b0f96a731dda2c3eb57203000000000319f94b68f8a0\ 14c0411bba7e44707d8ce3ecd2f6c78f26d00000000007d1b50006119474e10409fe87014f0f3732\ 9942f264d41e43000000000005f5e1001330c9b007dbb2eaae43b07882c23478444b0bf100000000\ 003abe203221fe73de48a5728ebca0df7117d65ccebbde5f110000000000753afc8028d78f338d97\ ffb63d471d74403a3bc2651a100a00000000016b7f8cb5293358cc662c49cad9d452b268d415eee8\ f935b50000000001a3f2c77f351a1422e9079b4014366db376967f1d38e0feda0000000014f46b04\ 003a0111c0a3bfcba85abb7fc15c70cc76d9c613f80000000000cd3802f258a510bb93bb432f3f20\ f775ba5d24cd6c2201c300000000012d935e80590ae4d457fd6a2d6ff1a02ada5f626ff5750ae000\ 0000000447d1658f617df9944cd4a58c214305ad1f84fd83288005ae00000000048e9afbe077db9c\ a8efcaef7c4ee73df7434e0c9f55aba4c1000000001c025ebfce8291cb3d6dcd6a57e41c9b5a57d3\ e124f4d3e10c000000000c393e6d00863e303e23ed5c7aa5c2a65ab7e62c057f781f870000000008\ 26299e00b52a4999e713cdc70258851e5276b69c01dd86f40000000003d8d5eb806533bb559e5327\ e2b49356e06b19dfeb7372c45901000000024d8caec0bc40dcd372a89c9283d3afb8afb3403c5c08\ b59e000000010001fa400000000126c65760000000024d8caec0da88f36168bb3c6b0d67642b9f06\ 45d18599e20c0000000002447d58c0db199de0d21cf3f990c1a9b1ceafb315e8c73c3a0000000002\ c41dd8e8e161a406e95ca4c9b2781c56bc8848c3f6a61d9300000000006a64f040ffc1fa35dbe497\ eb815d574df3553c8a0661325b0000000001530e8db00e7c86436e4bdcdc0f539ba6c1bdbc5f21bd\ 8a3f0000000000684ee1801bce364605cb171636b5ae4accda5d058d267a590000000040091a6458\ 203a483d03ed864ec53fc85035229c864f936cdb00000000031c3af6b527d71fcaa22ae63b06f555\ a37240bcf4a96edaa100000000006a0b94a42cb6cc75b288ba98f5c9362a7ab5fa49a92246380000\ 00000055a310b04684d5f2c67e7391f3de1f4d858cd72d1d430e890000000001ff1c1dc046c690af\ 6ffd722e3988109e2b8250c52c53b1da0000000002098a67806bdba8f298f9b37773b8c8a4116c3f\ f33f41f92300000000001a13b86073465fdfbe5852c5c2d1dfad9dc2345fa465a945000000000120\ 8c438081344309386f7408529103239f0c831068cca53d0000000005a4ba811d916244ae40498dcf\ 7d93842865345aa8902e5e7f00000000041314cf0096157bdc37238df523356568db3f21a23e64ff\ 5800000000018c5ef280ad549ea164476e2ae30eb80bf250c9f1fc68fc3e000000000165a0bc00ea\ 98d97a921f95de5713e935dacef968ae52e194000000000ba43b7400f688830512d6827ed36ebeeb\ a186d275dd86c4bc00000000014dc93800122664ac12cc191e67afc9dad13a55a3c3019928000000\ 00008725c14f25a35154812e05ecffcf43137f97be92f134b6bd0000000001d562f6c03c012e4b4a\ fcb49a29e4ae4a6c29135f94b864f500000000013a2d13c04c89eb5fca10b0729151e0f578f51b5c\ b9cc78d20000000045bd673100603f126807abb650b4a061fc39970e599f176e0d0000000003a55d\ bf9c6f1f235ef03f7f1cf32db67a315ffa2347a3910900000000004c5a82401cd00cc47737684602\ ad7d3b653391e7b1581bcc01000001977420dc006fb0f9413145fbadf20146b6194898a62042ae5e\ 000000010003f48000000043e8b024ab000001977420dc0074c9f67ea50cab0dc9cd85590b06b98d\ 5abf5778000000000067a6a7188270736c17075cd5784c8ceaab260876017277c000000000003acc\ 4dbe94e4d8a18f7a8bf2b2a53e841ec1b6624d3fd7f400000000002e618c8296e93db661d6e4d5b3\ 575b61f2be22ad2b53a85d0000000001a13b8600c7ec87bbf1ac6d70fd5699adb77fee4883190cd3\ 00000000010f337d80e91515a9ed2a740d9913af7d13835d29795ec3f7000000000147b05432f2f6\ 4a832d172857493f6d7ef22c0913fa8ff0bd000000000502fd6f40077f86f89d9afe4fa5add3234b\ 3c4e3b58435a930000000002098a67801522c8d201edf3a49107c0a55ba106a59bf839a300000000\ 0001312d001743b24dcedef3590939ae58aef13bba779f756500000000008adf9c4d49c10cf90677\ 48ceb1fa0f8119596e44ad4780e00000000000ffbc853d513b17d490262fb91f78b1f48aa16556b3\ df9cde000000000071d849805d5c11f6e71c2f258036000db4fa4264407e558900000000018a4f23\ a263975584ca764c5aaaa407e1349380d0b6509d900000000001a13b860074b88e8d49555c9c6069\ 7c0975349de7bcd82bbb0000000000684ee1807ab7853fa977a4f2c65ed13583f6b16fea4fc86d00\ 00000000009896807dc73b7bb818a9c74183ee12b4d3a0c4a4aebaf5000000000c409cc7598126f5\ 86279aa5bed7af570a99ad6c43b745f19c000000000046f25398856a672bdd1a333811f377f6fc88\ 1d465b7bbce60000000001802cc29f87d808a720b0cfcb1076aa345908fd1ec169b5230000000004\ 1314cf00ddda74a5140a42cb37a1169dd348b4077397eb3d0100000011601c20208dbb4c872de726\ 79639392d5482e26efe6f4485e000000010001fa4000000008b00e101000000011601c2020900aca\ 2847105782d8882fb3f43ea05ade9c64ef000000000015222209a0cf0841bb303ae4754072112a95\ 9d72cd7b400a0000000000ee4fb0c0d1d1b34d5b2f2b336268109643113c65364269290000000000\ 7c7a7480ec15f7168ab643b6736203858bf340cb690a189f00000000012aba8c32eda0d538062601\ adb8f73f9174087132e51f877d0000000001596604ea21e8359101377f4108ae1c82b17aa28f41c0\ 44b80000000000342770c0508e08330ed1e7dfbe753d311195bbc70008e546000000000271a9fba0\ 573f6fbe9936a9917c6fae7265723f8222e27c940000000000315b4334637164ca1a9de33fed5b71\ 4743946add2dd9262d000000000137bb77807efdd0064f4a3bf50df9a67713227e20b38823e30000\ 000005312637b48fd1613d58f353f00f01e07acd13819087f0cc950000000006c366d50992887a64\ 19696791e8749784fe05568c5e40e1cc000000000061f325ba9b2045abcdf7f83abd506d2033841f\ 283cfdd8460000000001a13b8600bd215bf873d59bd34515f13589acd231e88ac19400000000061c\ 9f3680c1a1e88e2cbe3f8555539d26ca28f732d242aefa0000000003fe383b80c74ce8379dee3080\ 8dfec07c8be5d30f388bd97100000000009fdcc4d9c7d610319e587b31d7e8c6580f84162e4ae5dd\ 290000000000c62f7940f704df51a09505aa14ab6840bc56c6f2c011f7b2000000000067a6a718fa\ f22741f8b74ed648058b2e9b309ecb7aaa5e830000000000211d7b910ccaeda01a8e383889bb4161\ 3e5262cb89430cbb0000000000d09dc3001059c5ece6b2e24d34c663fa9ac3c274be5ea6e4000000\ 000f4fd8398011651fb231f2bd049e99fd00c37b4e0efa2e2e6e00000000018ed329ea12e425986d\ 5fca768ee9928782a9abe72c8bb73b0000000006aea73f0016dd7a4b71cf8bf1b25c872d1e827459\ e910adc300000000006949128d17b858528b3b2f620906b989e393bdb5cfc3ed250000000000ef70\ 625027261965b9b740c1d7bade614748e25c30aa88b60000000001a13b8600c81c8fc0c210c6aced\ eab6a6d74d44b218c2b6d101000000131c84218052b787460800d22744dcab004cf6fa3e6ce1eea1\ 000000010001fa40000000098e4210c0000000131c8421805dc7986ef6c46219dd739a6b7af13f5b\ 19e3bee10000000003aac5ed806a1f98f5d61271747e8e7fc849c94f865ef9825e000000000b90bc\ 5a3d82ea349d70b97645a0261da529f27ad1243c072d000000000342770c0090065d6bbc3ce0d0f4\ 510225e2a5d1b0445d4fd3000000000039bafbd6a014c8b966ea695ed1d2f5e47ff50c95ba871953\ 0000000001a0dcb9d4a17c4dd00c092a8a2004c5f7518c9acb4643b14900000000001941432eab01\ 005cf2e76f6ffe8e55b4f70dd426463b8c9d0000000000127a3980d7752a98401c5c189ae0714c9d\ 08fd26ea6be2b300000000003d57dd94d975f4df44d4a856deb0384e39591953030c36e200000000\ 1f55523dc4ed2de0dd99eb33f84b9d1c8d77924d0e1ee07d480000000000c84ba280f6ab6417dd46\ d9470ccaf1a54e811065049dfe8c00000000003fc6e780fc0ab2982448c4d8e322e63d5e3a773d4e\ 110b0d000000000c393e6d00fc3c5025807fb83e82572b31ec219691545c906f000000000092abd5\ ad1fef9247c66bd40c179322d5ad14bee131d2ecdc0000000000f601593620d7f8dc95bd8401d14d\ 59907cc01c1cdeb79ff300000000001494dc202dc0962730686692e0a86f5c339d298307ce908800\ 0000000826299e004197c2f41e49f8c1f14daee9dc2705ea251005f8000000000150e29017634bdd\ de908fe8e811ad363f4f243533f9f46d9400000000025c24d91ca3a57f5e415ee1233f6eece184bf\ 7ae720d7c5e400000000007a484b6aab79ef99f1fc70a36055e2117e2c087d8f49c4f10000000000\ 0bebc200c3635b4e2447e294aa387803ff85bd0a9756da030000000000d30ca447e3eab835cb3b8f\ 7baf13a7a24c83c639a881c88700000000067f19c8c0e6c088e97dce68d1830259283111e010c243\ f11f000000000bad2c4580fb463f8e84f7abcc66d0e73082beae440f48a736000000000238414d97\ 060c290288450d7d195688cfc1d5b9e01b1052670000000000f0be369914059a855f43070321053f\ ef753df4d6d9e0deca0000000000684ee18025900207bfaa657ec95a88ab7374d1cdbf3a1e5e0000\ 0000462074568b30d148ddd97cee63f0f7b287fba57bada33c481a000000000011e1a30033678b8d\ e1b5899b816d06775798044befeaf089000000000187b42f203b879a4a9566c44a5c450a7b03fcff\ 38f21a2e9200000000059649f4cc49dc2b60c3ab52c2730dc9462ee9a0fd9e446efe00000000030b\ eb14f54ad9a924d25b4f8b872bacc145c88caeb25133e800000000008f0d18004d4d450f5fd6ec65\ 16b5b2297b42dd1a22a9f20b000000000163d6f8806a254fbfb5c5a1263f8f80a68688953ba6f28c\ 630000000000483e5c766a325f9ec8a2d7e506896019a53f918715b15ee8000000000a7a35820075\ cbab0e9e6e4633c7dc3f5da9735de2fb670f310000000001caeb96c7880ea341bd788c56cb7b4158\ 6cd7c64b07e5eb490000000000b57f0340a8819828241b4013e6673080de8d85ed3f426b46000000\ 000136906500abacf682e6047c7d6322a9ec6e14e774e2b4c1900000000000ab10b980ad89fce19b\ 294eb1098beca9fbef757f59c3fd870000000001646f8f00e094f561faf744eff949370ec8469696\ 8dc6a9d6000000000001312d00d0f0e976f65db8c3a929be04975144e071b7c83a0100000a680eff\ 7820ee097e8e28cd348632655204480986ab2db95db2000000010001fa4000000534077fbc100000\ 0a680eff78203317554a8eaa82ea4e1038601c39aa2b2ba24de600000000005e0a29ce3d87785fc4\ 40fd50ea63f5bbf92099f0608f9878000000000200294dfd40ef10fadc1ea3e58095e22908a23236\ 67c6fffe000000000133deb280d8ea82ac1f8c63395069511e4ede1b264868f5e40100000056e089\ 936042eb86b8e60837407119c32d0c3cbff56c31590d000000010001fa400000002b7044c9b00000\ 0056e08993606a3bffe260edfed0e8ce942dd50866a6e0cd0ba3000000000125c90ba06d1a5452ac\ a4820a817f1a491e661c290ceaca4c000000000045de5a4092d619680ba162e224f61a4172f8c15f\ 4a7ad57d00000000061b0eab709ec9f08e2eb1a882e32f5622ae2ad0b4093db23f0000000001babf\ f6b0a0bc9f4fc0650dcca786021b045be2a3661edbe0000000000c1467fa73a6f46adf12649bc9f1\ 9b63d0cd87746428e2750f0000000000023056e8c1d7463a55837d93ce192a835d5eb1db7ab784cc\ 0000000002540be400c2248af122b76deb57c5832344fe4ea79080e1c50000000009acdbe86bd30a\ a388dbba79b79b7f67d0187adb47be653880000000000560457080d65011ca76553ccab08fed3d1d\ 311d69213e7d22000000006beb1dc508ea541645449952338257133c6faf62fdd16b8ca100000000\ 0110b44ab60c4de994d1d5db1fcbcb6cede802023b24c6f25f000000000395e95a002059262cc1a5\ fba3e4f4be85ebf7b8bcaa2d880100000000005f5e100021615c1a1f27bc3d194f35412b961544b7\ 0101c900000000019f24823d6b2445536fb5df48c86016448f7a6998b2cf6e2200000000012e7e5a\ c06f4e4cd6075f0614b38048b423c2b988a0f051de000000000138eca480998dc7e21bbc40d4719d\ 9411d6d9ef4777fcff1300000000027c975f58da5315a50def11b5ae54cef46d9c124c571b3a5500\ 000000003d278480e07bb817040fe712689af2c9f1958df0966462c200000000037e11d600eb0d5a\ e8339739aaf6cf764f0ca1fb9b4473f4110000000000c5c7b724fab858a2e3b568e7b9ceabf8c401\ a501cd2924a000000000000bfe3e1c1f9a762b8717eec2a5a76f6d8764c8b3bb6f0f340000000000\ 0fa56ea0328e0b834be388c7070ec8ea106f844413c4d75d00000000187aa4d766cbf350f2ee1d06\ ed710ab527d870e6081f075a2c010000000409f4b0205d0286d9c3cbc8cecb90f09feba76cd4bb93\ cb8a000000010001fa400000000204fa58100000000409f4b020806f1c465dccc9b672edd4e6d705\ 2d11bfeeae64000000000057bec2e68c7ed38dfbaab8366a092e339bec61977aa0dc350000000000\ 6674350ea767a9c973ab1ba914fac41a2e434f33deab5e1f0000000037f17dba30af96d946f3eb83\ 6866c95f28ea3521e26fd3fb8500000000006ce92f37c3135c86645ac965c93d3d1e1a9f0c1ec586\ a150000000000794219580de71cfc3487a21890857499d6ce66bfac420e645000000000ba7366480\ efb2e6dfa864b8f336a3d4609672385535d8c26000000000013e49ef0000878b8ccc0fa3eee7fe2a\ 38f5c073912f432e5200000000028b0b363d483edf51a60f8ece9e358c143150f3cce5e162af0100\ 0000114f91cfc00578229584b791b31bb070f47303a8332e12a584000000010001fa4000000008a7\ c8e7e0000000114f91cfc02e9727f7d4d70fdd45a6a046082f07ec4165c4ac00000000006f319bc8\ 331e79690bf28c46e39eb5a5ba093e9a79ae47de000000000160fdbb034a96572eb401098379e114\ 6c7483e1f8035f58d5000000001080880cdb4c2c75a3afb9674337f0faf5293df6b960822e7d0000\ 0000022300f5804df5287455f5c039a1dc0ed915f6b8a824b1a7160000000000a7f12a004ed51a7b\ 934f15910c2299d4b9aa260b3484710a0000000000d0052c80512bab2b37bbfb95f8b33b98ef47c7\ 8e66064d630000000006d1334a326331111ff15dbf53b7b9f5cd73fd0c8f045ea021000000000000\ 989680731f08d1d137c7f273c0f8889a9f2263e20ade5d000000001c087f4f48a0bc8224acdea4de\ 97a8f172460298606e643ab40000000008bc023680af6760ddac22c7ece780f186d4fca65d6d0e8b\ de000000000017d78400cac546fbaad0bfe34451aefb4eed69c1939c5701000000000137b6946ce5\ 0945886192af8dea8720632798e4e505f9ecb800000000002d346134f730dedea7d927a1e5fbe7eb\ 7e3c56bd3ad3e04100000000013396a454fed34126d7c0ab4970100a5ba12ab777998c39b9000000\ 0001a423d36a0183f0aeaaf1418c59d03f80efee627c1b63fe2400000000003348a9800579d97312\ e23d2c406fdd63b5664645c2be24b10000000000255a69470eb57ffd9c9da44ea41e383a3072d2aa\ 4c6b2f1100000001d33b9ef3801a0278f6a02f67cc34b4d141bd10dcb83e03606600000000006685\ 1e0030a095cf88135f25d8cc94bade280f8b1927bf730000000000684ee18037bc4a50b135b5409e\ e203c53a4993da086543960000000018053f4e503e413480be11647add7fccf86dde36431f8ae00e\ 000000000479ef2a0b40d061215b0c3a409d059dce0248cc265e44983000000000004229b5df6183\ d929c38ff445149af60c2d769dbd75fe3cdf000000001063edb70062f891ec3bd0edb5a3b691b489\ 936681833494ef0000000004a817c80084dfd47308fbcbde436ea9e5ddf0dbc272fff07a00000000\ 005d436d60acfd06e1be0a98cf35df43278c2764d2c8dac06a000000000002faf080d2af75102b57\ 973c3d0487085dcfca0700b443b80000000000d09dc30033dd5720e8d2af3a608d08e53ab7a3bee7\ e7119c0000000000ac79beca35df441284651da796c5c66015020f6b3d991fd400000000005c44e2\ f82a2d8ed8eb934bd974f07823dfb76c040294ab890100000011601c20203b111ac481d99d66c14c\ 82499d41555364a8792b000000010001fa4000000008b00e101000000011601c20206120581e8a53\ dfe4d1547671d21c2ca5e5fdc7370000000007558bdb007267dbb3a22829727f0997f0d664965184\ 7c5377000000002e96c8d3c1835514b8b1b59b96077b3707bb4cc058109179180000000007fa183d\ 81b98b8ceceafa4b6f91093b0d777f2e0015ffff34000000000002e40d20cc595ff867911ef846aa\ 7f02009c1ac4d053c8a90000000000684ee180d3b10838361a89c26d752c5c9b5ebc9a1d5d2fba00\ 00000002098a6780d46daf32595154e05042b82a0df1984b054e3421000000000035327820d9917d\ 0857a3c4780c019ff59e1d43fe94db88f2000000007832e5da800da58d29d47509b40fa5475ec0dd\ b3449019e75300000000006669a05626f284893563564ccdc0de3eaae6b8f111b67c4f0000000000\ ce1ff1c0374041f94052ea0b25e35a68e756558a05293678000000000ba43b740046906208eba3a0\ d49d071ef1ee3b254edea5f367000000000061a121467500f4950f1523f3831f7ea041c3798d454c\ cce4000000001911c44da0b0a17cb0826e5324c150b73318e3a41e6e216f680000000000240a4835\ bb5e954f03189d366462374a9f1b7e0a7991cfe1000000000906880545e25921c92ce6eb5cb72989\ 3eb63b6383bab0959a0000000000938580c0fdcd26b48b560cdcd71c84c2ff636865ccecdeea0000\ 00000241ef913bffa3195e28fa2794ab1aa4fec66607a75ce34d7b00000000000098968015902ea7\ ec41a980b54a156476f27caee9a33d240000000000331c69601edd3d7fe2ddbb393b74df370b01bc\ feb0a527bd0000000000409ac5c8243ead400b67c54a36d61e0b62282098907979e2000000000000\ 9896803cb0e8990f82611929bc474497ca5bacf5c330e800000000052ac86f4c804e289205a99573\ af5efdcbfc678cd20f9dad0b00000000000fa56ea097e25f90670f2ca2098b1dcc0b415a0c33c56a\ 68000000000023c34600d86abfe87de20af5abe0959153cf9c4ea3fa3e02000000000067aea9e0d9\ f3715c07675d1b032496c889c55f6dbdf82e5c0000000013d8ccd799dca237a838af7b967f5ba979\ 7639fce518195bd70000000000787a1e83f3af000d6330fd209b407138b04458b97dd717a3000000\ 0001e495f480024fce32306c34bc7f8e1a92d10b9c2a52460b290000000000c8081d5808133670db\ b403f2a07de2c39c63f9539d49f1fd000000000037cecde020000064eb22f7560ce97243d7026e88\ 8eb3b82c00000000012a05f2001f70b02594d0f185a5ed199aba483ecda67057180100000008d419\ d0c09d514b9f50e55ebb4754257a46a2a2e517a24b4f000000010001fa40000000046a0ce8600000\ 0008d419d0c0a5ef4173b7a61c46decf34c2eb29ceb18cbbe883000000000072bd2b40a68a0ad2b1\ 20eead72d2951bc3deed5737d692e00000000000ddbab200bcf8276dd67f1ad3e042d547c36ee6a0\ f4ced0430000000000152caf50d3db24e4edef0292b1a8c83059578bedadcd93f400000000041314\ cf00d704e71d1608594547062006dec5027d57fb270000000000042108997ee471b22c259b35e15a\ 8fc062239bdd9a14452c480000000000ec5b17b4e7f3d74077fcfd9e064414bafdae3a97a3708a91\ 00000000014c4bbfc00206200f31d124c7c13ed193c128c9f75fe920a8000000000138eca4800c1f\ 43096f66524caf3c35dd23d1e6c06e6e106b0000000002e7323f9412440a4c59d0e43571d4093082\ c49f2b2016708d0000000000684ee180198b7f5e9328fbe4faebb1a60e5795f456834e4200000000\ 010a3ed5631c14ba1b70cf161527e70792f55f1f7137b66b1f0000000040c8fe0e80407d2944194f\ 854556b8be8a4c170c31e905248400000000009c7652404fe4b5dcc781dc0dfcbc87d4854ab7bb9d\ ed6aa5000000003b93ad4f8b53085ae0b8a590a2c1c1a2acd9f52c91a1367d2f000000000140f703\ 7964a138cddb694e0c0d9f8a044827df5bb0407e300000000003207d898065e312a47ed681546beb\ 671acbd9b303fd4fac86000000000071cb66ab66dfada22175252ad3676ce639d1de8fa4c998d400\ 00000000684ee1802c8d0cc9b89fac868125b753edd0e844027d582c010000000813eae6e072a632\ 4656c525a30566d538db0d5837a8b9162f000000010001fa400000000409f573700000000813eae6\ e0929dc82035d237ec91b1d4c22bcad2fc4d98d1a900000000023567a776aa507876f9b01dd7c16f\ 7dbd14c7f7a7142878ce0000000000395e95a00afc50e939a490dd52fc82c2d82eec38c50dcce001\ 0000008bb2c97000dec25d024207468384659c8e9353e72cefc2d18a000000010001fa4000000045\ d964b8000000008bb2c97000eeaabab66c4d980a47b08417ce77864af62830a30000000002098a67\ 80f43404ff8e8acee74ae76b2d06f05bd047425c5d00000000005c631f80f5f6729f5526669000e1\ 767bcce1d91f2c215f300000000002c523eac008aa05fb751344b6efe0cf247dbb1f431d3dac1c00\ 000000003e95ba801e2e2f4c41196324df3e3724503288c3d84a2e7c00000000746a5288002ae019\ a9de4a31bb5852707d5571b5e88183c549000000000c3c1046582e9b9277c52f5e646c1965b69714\ ee394386f54400000000009babc17442365c257d876ed295f47613dc9cdb95012c51dd0000000000\ 3b46dda079b461441586fb9812d404ecc15b4e78b79abe1f0000000003d152a24b7f3396d8f26fff\ 6cf911cb8287e3496b778befa3000000000094ca3bbab0e4b70fe24655f96eeb08450a078ad9f1be\ 58ed0000000003b23e02ffb66d7b506de4584c3ae167912aee3d52c5b473fc000000000049f51b00\ c15c07e7f552c7e8e173b26583353f1180f365f6000000000132aa7840d12ac6a1637219cb61caf4\ adbaace5ac09c3a4790000000000684ee180d13d637e44b6a6a168d4c81483decba243ceaf1c0000\ 000004a817c800d19e5f6c829ee914449c716e40c685c9b1861ae800000000000f8abab0e79c5b9f\ 582406f04be282e899e204d44fcce0c60000000000684ee180ff22ee28a051b9d4d82a071a3ab5c1\ f932cd9332000000002795c6cd4105de50edd590386acfd44d5020e7edde99e74d860000000001a5\ 60a3cd06201c908d79fa8362da5d04b7e15f1a5a087294000000000138eca4800adfd27c955d2d90\ 534dff62afbb4350c3f44eda00000000000f49552421f3c2c08d867f8fd33f6b62cd63b6dd9ac5cf\ ee000000000092dda800578da5b3a84b86f7eecd0653e5f63607c019b5fa0000000002550315407f\ e80075bc2dfccad7f54ed8370dcf84a1bf1418000000000492d96eb49b55f5b8a72830707a02245b\ 6053da607bd90b8c0000000000009896809e039ad01be1ae635c72622656f7b8bcd82099e5000000\ 0065dd083700a953cc697390c432fce69eaa61e33371b5a959e600000001e978223a49ac6384a7db\ 76ee9c64646f608b79ac6632246d7400000000012e7e5ac0b72933f1219edc756ceaf14214ab53ee\ e9e8437e0000000002c777c57bc2bfb727dbaab1d0fc9cfb2e8746ac75b8407f7c00000000098cfb\ 8700c54aead3b5ac1ee205da256b41804437f780240e00000000104c533c00d4f54b013a99dc08cc\ e6d2ad642d35787a5e42b90000000000d2aa7256f25181e3cf31bd219b876791fc32fd1820e5cad8\ 00000000039c2e9380f4b7ae4c803dc4c07bbc6c7e92d5178bf962c0d800000000001a13b8608fb4\ 0d2c76196f7056d025b5fff22c088684a94a0100000011601c2020f56ef02848b5acf1f9435bc922\ 4e897d1287d49b000000010001fa4000000008b00e101000000011601c20200888205e9b7c7896dc\ debc38bc7e416575f1f2bc00000000009c7652400f80a4c14e53cdaba4cd79c36a09017db7afc8ba\ 0000000000026cf897108318c309eb6dcbf4fa85346773d6434125ada9000000000109210e0415c9\ 3b19eac52bfa04d880d4cb82236669db52ef0000000000a475836ec23e5784493f7e4ad25219e70c\ 6e4487acbd870101000000b67121fa20385db37a526758a272367d61da486e1654b9d96e00000001\ 0001fa400000005b3890fd10000000b67121fa203d4126a68d86ff53e5908d021a22c53f98c21fd5\ 000000000012c684c048ca3981e7c52f2b9353fe1163835cdafe5f2f120000000001a5de84664b70\ ee64b74ce6cf713eaf8e116551670be3e6fa000000000065f2a2007bce146f173dd3a1e84276d625\ b761771776ee5b0000000000303c0a38a04e55a4003799e435d47bfeaa3b3e6a6a57a34a00000000\ 17129c9720a313074bc7a5aa43571b21457bd6d0129d7836f70000000080466eb366a56345b03cb4\ c860c192ba3de1fa8e564ec851e000000000007cc344b1bc06b9b063ab4c4cbd4171892a7669a705\ b2439c000000000000989680bd4f6ff16b646c12fc8416aedafd4b28a50de65800000000006423c2\ 1fd1616892ad69598fccfa137b2e9847bd56339a6e00000000000e5f510d17bf663e7be6b8c2c7d1\ 1f58b7426b191eca51380000000001a13b8600196d0c9509fb3eb33cf53ee586c980dfb93929f500\ 000000041314cf00254a86aeb1ee830e2abe881465e9b44aaa0616f900000000035cbf44ce8cd4c5\ 85181ab611f9438c4add7c1d762bca0cbc0100000022ecb25c00704110f7d20a90dd98956ecda84e\ 4d06c5052f52000000010001fa400000001176592e0000000022ecb25c009bc99e7fb114995d61dd\ 9702d65a0a0604ef4cc0000000000005b8d800a71736c46bdc35b7f34ca110b7a0c5bc7462db5900\ 0000000342770c00a89a1f99ab8632b5f3c4929b4cd6e134e5ed1227000000000489a31acacff0c1\ 57fd95f40e4f55fc3379164ba89fb7035800000000044cbd9643d2313b856afee465ea334d4d93a0\ 17d42bbcdd39000000000300017f40eb57b5ddb4f6b4b26dc904eb4774fb54c103c5040000000000\ 01312d00f6b1e28e72ff3c2a1835de63a3f39d0fd0f0239d00000000001dcd6500ffe7a40e607513\ cf318aeffd3929d3584effb76e0000000012a05f20000d059cbe94d95bba01a240d52d546b343733\ 22750000000000684ee1802380f00a6dea9c4ed3025b2285e5782946da654c000000000039643b58\ 2f0532e3fa811efa3b932a2ddaedbd70e93e09f700000000004d0f8b8135d6db768f973293286e6f\ b5fbe33eb39ad3acee0000000000f88ebdd9438688bfbf72e0536235387049be3d61699963a10000\ 0000060ee3f3e9450caf899165000b760875a732931e87219b213b00000000000a6e49c04e4b7ed9\ 436ab4e87f74bc99c5fd286c093900030000000001dc4ec15c6d8726e3368647bab6c277fcabe54c\ e6372247f9000000000019744d95a2b6b0994a6384acef8c3806679c07e12b37a8ba000000000022\ e2f820b966aeabb1aa99ff56161aa3b2e81c55f08087310000000000d09dc300d3153651f0aa24bf\ 3fc93f366ce0f51aeaa0a4e6000000000138eca480fe491d75413226b18841989bd4b54fed1b9532\ b7000000004c2b7a059d0266e4eefdf592d3c703d6fe62639f783b59cc77000000000038538e407b\ 66d98d269f9a167a40d597b3f60dafaa6211e0000000000000da33607dcb9c7ac3f4f91aac4fca50\ 297cc7bbcf3bb1780000000000684ee18082c11bc006f83e417069a8a82f46b8a5e5c5cd7d000000\ 004847a1208e890a14203886c9a2017c8c8f5383948c5df1fe8c000000000622690834c867ae9238\ f36c61a8928b28a2d4cf85ea1febdd0000000001a13b8600cde48045e827e444e37c6e94c77ab99a\ 09c79f0700000000018f7504adcffd98c68dbb67ddaf1df3e93cd2348e265925f9000000000068d4\ 6530ece15f774b59db1067a7630d83bd6d2b6609139600000000009c76524002f28d4aa38fb43c48\ 9f6079f9661b2c82814a41000000000826550342fc0a421b7e7461868b22ea3ca97904e3e00d02ac\ 0100002632e314a00007a0a5559ef7c4919641aac3fae26ede383cee84000000010003f480000001\ e8f1c1080000002632e314a000159401f18a8729fb1cb8f9b109d3a1f97bd7a4990000000000350c\ 528054dfad16a2ceb22a494f753633ca3ec9aee683aa00000000001a13b8605db5c1b9eb21f1af8f\ 911bf2fd80965358d41bbd000000000001677f40840db01c3b1561eb83a88cdf544a4fa9e238330e\ 000000000138eca4809a7427f9dd3714af7146795523a522f2f0682b4a000000001b5890c680afcd\ 31375b9c4cff3c6a8aef3172eba8d1ad0a0c0000000001624d0be3b5343ff3950a9b2cdf71ae3209\ 66fe6010d1bde00000000002a0a39ab3c4b9a0eca608ac94e1c681fe33b1063559873e5000000000\ 0092080880c93f54a50dc362e9e1e65735bf200865c4a24bef0000000000764e8bf0fe5daaf033e7\ 439c816abcb09a7ad5387ed36975000000000000989680013f95a9c8de2d980a7c27273ecf83c362\ 8bb2bb0000000002c6c8640a286fb401991353df9184a42d3123cca9e1082b9b0000000000964480\ 147b52da3e6a002af710a52948a2f8090a210e4c1a0000000028bd9ee9008beb924711175ff4e812\ a6fc21e08fec6f2284e100000000000fa56ea0940620e9dac29a26a0b843cbaf2ade43ab06e17500\ 00000001a13b86009d4b212a654d3a7f5291ae8e01e047ede642357b000000002213dcc159bb9445\ bbe97b4c60987aad3bea347bc1cc663d4b0000000000165a0bc0bee8fa6877d6638c691ba16a3832\ 1553264149ae0000000006ba930100c3905a80c859b51efac6439c6048e94f3020513f0000000000\ b2d05e00ce75245780eaec0b3d6ea1c145fbd4fcf708e7db0000000009c66dd6ca7c4c7c0627b571\ 189a6e1ba050458b602b23e8310100000056e0899360d0cc527617d8ff3f08ae73f81156ad2e3d19\ 2b46000000010001fa400000002b7044c9b000000056e0899360d31897834f2ccb5af3bd94e39e52\ bb6523b45cec000000000277502b70da370d9816d9a5d358e6aa77df985cfab7cccd580000000001\ d241dcb8e2af5553dc914bdfef533e7b1890c4187ffbb28800000000a74a5990907d98a26ee76726\ f93445c657d73025024d8cb7b601000000d862a64600e825b2ce65d619840c128ff93977f15fedc3\ acab000000010001fa400000006c31532300000000d862a646002dc3149d643d730b0ecce459eb87\ 8fc82759d0ee00000000016b2464f0713000d910ad605e82d0d8c263b98f75f41073c10000000007\ 68aa408e75196974b63dd274ab64476a85e465edd283a3290000000000441b34cea3c29055e40ae3\ 4cbef5231155438bb7dd2acf460000000003bea12b79d658a417d3a4d5f6114d97232af397806845\ ddd5000000000005f6a93fd991f9cb0839880d8765ae9c41f201563247629a000000000b718c1b70\ de50fce33fd70b6190a9665d55c816b2b8c48ad400000000135aef2280def1156d13ca07d7a425fe\ 689209b01268b6dfa60000000000edc6cadae454e14be0615b9b2ab363ee1dbf4ad900962cc50000\ 0000006632d82f083dd65ca09a6f69a59edc2bc98d39ea6d621b760000000000b0fd72c00faac947\ 7e2212aef6d7aae60acbe86b65bb48c600000000029a61430011500cdd6852a3383fb0f662e4847a\ 451394da3e0000000001c0fa2d2b14ecc0a99bb0aff6f1224a63ad199ccbd5808ea20000000010c9\ d62b0e170bcfb28129070aef990ca310701e5d8342f5db000000000000a6522017ddfe44258811c1\ 127f33fb816c20423805907d000000004823a168871dc58d42c3dd6e5ada8081269313d8d9dbdc6f\ 630000000000684ee180223d77096648495f1a4e67f00ea66b2d2a9ea7f10000000000331c69600e\ e9941e12d11ece73261b697f34a280d2caefd40100000008a7c8e7e031d7bf18aca15fd8e6fa7479\ af53e431dfa61f1e000000010001fa400000000453e473f000000008a7c8e7e0346157f3a222942e\ 7b09ca18b1191e4da94f098d00000000001130edfc34d4443b39a9f00c2ed3bff9aaa0213d82943b\ 7e00000000009502f9005aa18817b4750fa8299f5ebee6b8b6d9e3eb055000000000041314cf006e\ c800e88237658d5639404c93b5d3316c26291b0000000002741da23472a92d503f26eca1e240a6fb\ ba223d9f2a2969930000000000684ee1808728658cffe7d4852ea0faad1bfab62bbcfddae0000000\ 0022b02fdd98dcef2bf6efb1086b63f885568739e8defdca2ea401000001977420dc00979cf70f87\ ec2c8070d5cb12e05f2b6a466b713c000000010003f48000000043e8b024ab000001977420dc00a7\ 0ce250ffc665dabb7af23135735daad035963200000000003fc6e780b87a2f59acf79a98fcc343c2\ cef20fa0c65f308900000000000eec0d58de38a3eea80b54d1d5f59b0a46713cb16e4125a6000000\ 000138eca480de88a956b82dbb81e0aa211277da4645c963f6d800000000003ba57860df185a330a\ 7831ccb1d7ffbf7558b5b36f5711b9000000000000989680df431199346e0ac672a2b46e5c3264e4\ 785399410000000003aac5ed803cd841c3e82cb6c9853e2711b9343f8a0d1d77300100000056e089\ 9360f9416d7eff1cb4b3f2e4d85e6c829ff211e65b20000000010001fa400000002b7044c9b00000\ 0056e0899360001f087970f34a44d007e6cceb686117083122a00000000013858255d001622a6339\ b877221e9f2489073ffd0abecd0144000000003a35c1da800e38d2c5857e95d649d5988eb6b4e56a\ 7afcf9ef0000000014f765f48010b6b2fee6983c03e978885c65e85ad7f40d03cb00000000003f2e\ 5100847816309ccc320bcd7eca1ba4905a70d3b7f139010000000386abade01356cd0e7ab342e465\ 5fc2f5c0bae98caa7a5059000000010001fa4000000001c355d6f00000000386abade01e34291d37\ 48a0e7c0e8adf69700ad21b388efa800000000021e66fb002b60ed8e3680a7104de2de65cf51d950\ add0645100000000008067a4b92f3df7edaf6f7f6d460ddcacbabe67b22d50f8dd00000000000399\ a180329b5fd41c2360c5aa946e31672beb41f061e720000000000465e970e738139d55f67e018bbc\ 36d5d165cdb32f444bf75d0000000043f0564aa14018afae05097ea4e5dca59cf7e6c8cb230aa941\ 0000000000684ee180406d8beaa65ab11a6e7e86625af4ffa9aced67ec0000000004392eed1f4ac6\ a3e99cf3dfe1024a84cde15e12b93416d8340000000018feeefb2451bf7b66dd6e1995b7a7b33956\ a8632edc50ea63000000000c43ee53a06746b57c3a8ca12ccdba8e259a55d9a7c24d575200000000\ 00e244490c933d903592fd10ddf6301d8a8a2b14fbfe6aa5b40000000001012f6a42a25e2f49e8e1\ 0f5c88b89211fe0b1782f1639b95000000000074d33a00b17fced74e9601d1a8daa4eedeaf1389bb\ 85a9b2000000000826299e00bd1f4fc66b136d65f173f52e9026ebb2bbebb0700000000000039018\ 18d9a69e1efbc417661615ea974ef0788603eba9880000000010476228d3f1c58f34ef4196cce8b8\ 9d50ce414b4b41264bd90000000006c0c859e612ed2a14135fb661a2f41ca6183cfa9c3124709900\ 000000073bdfe52016491a68e6f551b0880254d97389bfd11bef47820000000000368fe54058ec26\ 2aa46114f568ccd3408b86e6d48c96c74a0100000161402ac4001b46ba2eec6f025328cb873b7bce\ 417be9863d24000000010001fa40000000b0a015620000000161402ac4002c4466840ec7f4d58cf3\ a415f4fed9bbb8a9a106000000000213f8b140924d35e70ea45104a6e1c34652598a6ee6995c7301\ 00000022ecb25c002cfe57a93f73e4e553dc279fd0f27a2a100d8344000000010001fa4000000011\ 76592e0000000022ecb25c002d68d2222674465d878da263697706e3044ad0440000000000d09dc3\ 002dd1668ca884f664a0394ac9eb09df0e523c6f470000000000684ee1806940eec028494f91f866\ 8b88d361d3b9c79f0bf9000000000642b2870070633dbc4307d6bb5eac4387bfb4e4c570a5536b00\ 00000006b4583087727c4b22d6e3fe0b511ca1c3f117eccb7c242ee000000000033d1fdc00753156\ 38cede96851108194ea1882abaf1ca22f90000000016c640884c8705adfa13a9c778e157e7bd05ce\ 5a174c763b080000000006c088e2008748bde53f8055d4945b1cdb64236ce07f8a37710000000000\ 37167b379293e6a7b2bae4ba30fad625d8f898b28fbfd7dd00000000311ffbe780bd0bb06fb07da2\ 348b2cedd1c39371e234f4570100000000002ac42e60c632711656289a284af5eeb933200026f740\ 62ae0000000005a24a8830c7370da707b4e592daacd346a0e8ffe34d5cd9e40000000018ed496dc6\ d18e5c165ecba2015a3c994ad250dd2a7202ffca00000000009c765240d9081efcea6917e0b329d6\ d6a9ce6e9a526195e600000000009fd3ed18e8d6ab384f9961ff491423f1e579df97abdf4bf10000\ 00000081579280f8dad0c3d8d2b39a988aa1e848a3c3c61c29029f000000000067a6a718108fea23\ 877902557e58fbf9957d76fa503bc5790000000001a13b860017f6bcdf204112e55c5f01da587cd8\ dec1ced8460000000000d7687895259e09a55b559d75640dc0e58ea2ed98564f137e000000000028\ 87fa002bba64ba76522b8c4dd97b679348b66d1c4639e90000000011aa9118b645dd693b111a460b\ ab898656309aaff06b40392a00000000004644f26d7ac24f4081996d1acefb85558a857500662288\ 760000000000009896807e0ee586ac467c4662e50b6c389ab7fae93c20bd0000000003565bbb357f\ 6df1d62d759fc2687a6cd3e03e6d4085ec3c0600000000002a51bd809404aec16c18ba58f9b4f585\ 1f339dee6442311b00000000000d180d1ba713cb338d3880fd4b75dea2e88ea0dae1bbf126000000\ 0001ab63cbb5b777151e4036e7798273e4786e0cc498472e546800000000018c5ef280d94d054752\ 507aacb263379eaf23773d44e1cd050000000000e6ab83800ab8c3a208e26cfb93c3f178c94aa219\ c373a75b0000000001794c22802aa53760874517110328c63fb7487dd22cbe6dc90000000016d8a8\ c8ee3b3cc3b665e3259b4d9bcbbf51a20d9f4d03d45a000000000162a5cb80453d445923b730e44d\ c7228f98dd00af7d7b2f3900000000009c7652406fb1db3b5d0f6fe0ac888659e7d05af26845b3f9\ 00000000010df30e408f3ff4980426984155741cb5c25ac01e893fef620000000000173eed80aa69\ 1fd6c697276e10efb282f4830407e7f190ca0000000000684ee180ab21a8cf03d321a0f4f1bef047\ 0967a9587a973f000000000208715c7306d1ca4a5f77ecbddedbf16f508fa822e2c6760601000000\ d862a64600b5eb86935b0e0cbac951224a75475ad4a11eb037000000010001fa400000006c315323\ 00000000d862a64600bfb5e46a02425f896373ebc922b4ab30d258c4bb000000004a21675500f814\ 3c4e03c1eaaf2fc23041c8f52ba94533342a00000000009bce17d8fba78dd0f8b1333f38d6d65420\ 77eb5073b5682600000000001696e480feb6c82fa567c9d2a0d81ec6391763649ec3da5300000000\ 001ec5599007a29f1d5d97483426c6c400d2a18c2ac8522719000000000417c6198607a445d1ac98\ eac96b773482edff1ad587e95ca10000000001a13b860015f60cf6e4205611409d6e65c0c25bf9bf\ c08a100000000002098a678026cbf0b2f924f7275d0d58d07600fc3da3fcc6100000000006317bca\ 0044a16f540e1f3e101986c98f8431240e01f9afcc0000000005e9810b9c46c29cbd8155b46ca79a\ 869f7d3d739bc90e6fa700000000015aa16e619702351b41c15d4060cceddaf877fe2080efabaa01\ 00000022ecb25c009d40323be257b11eb307c937cbddaaee6ae781d2000000010001fa4000000011\ 76592e0000000022ecb25c009f9ce0c0376dfc35872ca917daad59f11d8dea5b00000000000bb65a\ 20b0a507232c8ed1d438d96d114516203c75a88d1700000000006959e8e0b39103b390218df91266\ 267c1864cba9febdb5a20000000003fda5a8c4bebbcef97959c9c041d4bbab2afc4e56d1161adf00\ 000000041314cf00ca335a31e6099c0d0fc0085c1ea2535f81dc0d120000000003c905c360ce30bf\ 2775c5273aef5eb94fb2ed183c33b332b0000000000ba43b7400accd191828032c229d4af43936a8\ 14a0daa3a7dd01000000114f9af780d47bb196dab26a9c801d0330e6810f1c763e732e0000000100\ 01fa4000000008a7cd7bc0000000114f9af780e02fb22e6a718f88e4de0852a4a6da6ae2d8c14700\ 00000002098a6780e5f34a8e5ac2b1cd80c3556e532426935134460000000000006638d2c0149eac\ 5eac9530a3778f1d334776e4db52b4b22f00000000014ace47801d6749feb50f0123c2a312c4b794\ f0bfea5a6c8a0000000000684ee1801e0fae453c8dc68b5096a2b4e2a71df96e1c481f0000000000\ b12cc02048bb19bc3051c3905b21f786acf1a0a62ad8eca8000000000032002d7058a1c4adb0dafe\ 79b38588678668edd15a631e8800000000030109106b5e4fc0c4568329e2c71b20d1c9f833e094ee\ 10a90000000000d0bdce206a3c0e665af1ea687fd8f6c323f7c022b9c84be4000000000049f1eec0\ 7f0620b229c7b1437d53b62e9e44c77948aec61400000000001e854d5b9b735abf26f4f46a1f3498\ a6c7861507376e9b8f000000000684ee1800a624ce62530a24ed2a0c28e9f782e8bdb83fa46e0000\ 0000f478e08400b8a13edc0aafcbfcc2b0372742a8cff54549d25c00000000005a419b9ac24d0089\ 0e0be391b325b2b0775ded1adbe31355000000000046b4b37ecb8dd59e2d03ddeb9a5730a32c1291\ 59053970d9010000002b00294b60d9245eb58672b3a46ad0317b8b9957b67aadee60000000010001\ fa40000000158014a5b00000002b00294b60db69885099752cc1ed68f78c852529495b89ac840000\ 0000012a05f200e7b90d1ec0502c56a153bfd1667f95b6c2a5d08600000000001510f10c0becb9a2\ 25ecebbda3c34c176478a26b677d6e0500000000008e23a2605c4084e1d93a31c4a7e5b4879d557c\ b4ec844510000000000158d310da64b3ab966008792b97391fd1b99c3d11bb5adb44000000000147\ d357006fef6c2011b7e1a21f2a483220460dba4166b8d10000000013870e34b77399de251514ae42\ 74c531049b0390d31d33c58700000000000206cc8080fc13fd51b5813ebd2548bd2bc35b6c8b528d\ 4d00000000003fc6e78082f28c251b90f49213f5a5c8326283524d6f2dd100000000001fa4da8e8e\ ad217db3ed9d93502b041358d3e453d6413555000000000138eca4809d35cfd41347bc65e7d34bf2\ 65628eb6ce9570d500000000037646a39aa693c9f948a33357f83fcaaa286e135006ec33bb000000\ 000001312d00af3cc56c1fbbb3e614b30ea95d71d330873bd2a800000000007bfa4800bf744ceb7e\ 7c523faa908d9c3d6dadcf16e390fa000000000000989680c313876c03fa201ac40a2225453e0328\ 9eb28e62000000000026be3680e5dd1e904c3ad7cb7088827ccc09aec8df418f3300000000016f1f\ 75a0f4394ff3dd6865a4336ad0e5c3a43a0f08f1b5b3000000000055885cc058d2267e8e906c08f5\ fcb611416e97e0bad85cb800000000009cc29d806b7075ce85744608f7009c934c18d58f7e54764f\ 000000000241719f6081d604d4486a66610c7a70f1c36e0be671c3e69c0000000002a18868628ac3\ 910eb1a13279b86e3ae51342ae585d7d556e00000000021d399719a421771d0616ae2619ff919fb7\ a73ff0e616afe50000000000d09dc300aabb5b6b2b958202ca8d8e25183358551131b5be00000000\ 001d4f5f7ae86aa1a882d4cd648a6ac5748ff23f11cbb690750000000000db0c0cc0e9d8ec103d68\ 732de38d0d368d4a2606090022c500000000011501c39bf70f7a575b3cc4ea3ef70af8531f4aa525\ fb8519000000000007d2b750fd196f0aecfe3c9ef7582c00f72b1ff0de5d3c63000000000004cace\ 80feb44b39a5f627a1a3d0c1ed1692d6445757338e000000000163a3c874015362ae1c5b0df1b467\ 5f52a0fea3cda3442a9200000000006743da2015bd67cde53a8a6c863b8c303b2af26fb008853800\ 00000001da3d72fd1c0845c0295f0b093ed5ca9e3d342a9d8600fbce00000000007d2b75001c155c\ 9d410fc8d0b159fa5ec647bab0288ba0e300000001be652b3dbb1e45fb5d6ebd6ea6582624fa9a33\ 87440348ed6a000000000217c597c020e7aa1ff5b5d7573c5cf47d44352f75c0e84eea0000000007\ 045559542c9ba31a1fd8d50949f760481b3f97cd58cbb39d000000006f2215e49a2f58f61c195cde\ f2633ff7339678e91f181e606f000000001d210dda293daa006935df179acd7fb3f04e38ff4bfac1\ b65d00000000094c9c09003ee68231326fb98ed5debf153ffffd3e07aa210800000000003e95ba80\ 4c99ee96cddcb956e6063266072e64a3274776c90000000007d99d37945289e83f2726e01a046832\ 9ab08e440734b58177000000000376fb3a515bd2229f716f1c6b094c6391b508ac640f8a8b190000\ 000000166b8b9468bda0f9f72b099c2b9b1fb9eaad84e426d800d900000000032d01e2006a0f76c1\ 32f5158869616b634dca7a30505379dd000000000f377c524d9b68de54ae3df1876b234bb5ba40d6\ a61cf5cd7300000000009c765240a6ac0a1da8c64658eced64b9548a96cd2462d001000000000c39\ 3e6d00c0b349e2164c2627cbe217dafbdd903586a3603500000000000bebc200c3ff439e167f2bba\ 5be803240ac13d04c6fa8dbc00000000006529858fdab92792d5132857ff09b763c6057700be859e\ a70000000000146a22a0dc079b490b93270da8e46f4589130d6f1bea319a0000000001a8629400df\ 20d00785d420d85e568e66560ecae0a7770b20000000000017dd9e80e26f8fb893f14e55d774d088\ f23281b32402e95a00000000062802915bfc463c561dfd3f6925746f49a55b5f1911dc091e000000\ 00005335664c417cee58074efca8cc78f6a4fb1f9ab22f3d6f320000000000342770c04318c9224b\ 8cb2bcf20e6e9109a17a8fc0946ab700000000028c8bde628283872c745aa7331862bd32247c8cb5\ c8a859ef00000000003c7fabc08ccc904a06f37e35404cbf306839b0fd211a233900000000001add\ e902be50e822da68f16a96a11bfd803b4843e758cda3000000000138eca480e72424487c166f5f12\ ab999722caecb5b106b15b000000000303e15180eb09c3213d59e90d6241839a957fb76988bc07a2\ 000000001982ac1ca0f2a228457d9800b7055a273d19e8426a1bb1c81c0000000000342770c00a1a\ deadf77d08a370ae54cc8027d7aa7e00da5c0000000000009896802b3419fe24a0cd5d8713ba11be\ b2812fb2477d9400000000000566724039873bb3e1ed6ba0d3ae6030967584491664bc6e00000000\ 002d2e29804599cfbe62fce3a5efc3c78981233493114377ac00000000056ebe8d615ab6457201ff\ 7fcc7a7a439b2dfc57977fb721dd00000000000ff15e608159e684829d7c013a61eb685667ee66ce\ 78c15200000000000a6385cfa185d1afe65fad9ec07bedcca30361bc9bb72ff40000000000a929f8\ 20a421ba4401945c6fdbaf3fc60f02723e5fb6a213000000000540d2e700b71501166f110977f647\ 766fcf18c9cd8470c1620000000001080347c0bd9c1be2ddd9124813d144df1faf4e8e00ad070a00\ 0000000138eca480c899738c4c5117f9d8bb057980635bffb366891d000000000000989680cc25f6\ ea67e274e878c7cc81e623cb42aa453f6e0000000001c9a68220d45690792774ddd2862c42aefae8\ b49535d14049000000000ee4e2a200e15a7c3bdf8d4dcfca96d25a15ffdff971eb62f00000000000\ 95a31acee2c384cee8d8ee0dbc2d2b5d7d4f6d4578fef25c000000000068e77800f61048116772fa\ faaad735c07663ba08b4f0a7ab00000000046033daa40b08f781be42240c4c36a0d6a590e7b1e820\ a834000000000025c705401e605e6acc237684d8879276d556a44374e308000000000000157b4480\ 3740d361fd9887e33528630b9d2b76d3fbd69c040000000000bb5af3a04bcb6b3cef75893a964a83\ 5aa888772da82920540000000000903e450066814f57cc55b631df64c6098fadd5ab78c18cfa0000\ 0000000a6e49c0708ee7f6cb5809213c63dbb7d318e7d031e4641b000000000064dc5cae93dfbd91\ b06e75d080b58ae71d6f4722fb7f770500000000003d3707b0c480344b374f8cae2db4d70304554d\ b7dd7ad2af0000000000d09dc300c9da437b5a8b907eed55de12bae2af2221d0ae50000000000155\ 279bbace8b711b3f2103c476346ae7bc6fa062f9d8733b000000000447688931e321aa13adaddab1\ ac6201fcf0d75ec1d904a8ea000000000ef613e9831e349ecc1ef812bef6fc1fe64c0ad419ba3ddb\ 7a0000000005fac66af71f13ac234a8a6342528316bd05c9b31fa72f2a2b00000000025706d48021\ 6428c0ec1d962a5c87aa5e0fc8b2a5d88b9fe8000000000716f620802c45699520b8e13e186692e3\ c3e2c7110d1e082e0000000000a3d1f1a02e131b02de1376e3e764c5e3cc9e8652b47cef0e000000\ 000520cb099f61c4b145f367f8c21f611840dfade99ef7fd33ee000000000194fd3cac655d569900\ f3050f9d66be4d5755007dc674a2a800000000004190ab009806d20f71d6db86947940028fd6eb2d\ f69031bf0000000001213eac229e86758accd7d83115dda28e21166cdeb34893d000000000006377\ 9d469fe8607d0ca8e96bf1f294c1424c5e083e9ec1d500000000003f2e5100bfb0756435c0e5a939\ 6c4411a54f314453bf43390000000000fa56ea00d4e08315958443a76fe5237a36743ae89fec4ff0\ 000000007a3c70420008783a1a6fe2526b24c09a63a21582b237cefdd5000000000e36447c003794\ 14478091d38da882f07aa11ca8457f9d8466000000000b1584fc5a31e95398b7e11565c61cf609d8\ b940b3c18d7e77010000001176592e004756335ede8094b859defdd937c721eeee7813e900000001\ 0001fa4000000008bb2c97000000001176592e004bf17122ddef3091ed711ff204d4074e9a3e887b\ 000000000258da88436c6a771c60fa8f7f5242a1b83c8f98e266f1bfc900000000041fc2c07586dc\ 278acdd1a2a62fefc98f167cba40f35e09c00000000002034a9346a83f41924c7230902283e032b5\ 04e2da6c14aa6d000000000188683ce0b95797edce7e767f31356bf4808c403337dd17b600000000\ 2095f7d2c0ce1a7ce31f1a9e55ebf865a43e539ed4ab143b3e000000000029b92700e6e89076f0b9\ 2627b0fbab622ce208d255032c9300000000041314cf00c8b99c3cd4a33df368d96e2666d929776b\ cb8aff01000001319718a500fcb25eb488484eeb7fd04db94dffe264bd505430000000010001fa40\ 00000098cb8c5280000001319718a5000a76d33185e98fcacef0fc4600f82a311f5b9f7300000000\ 0007b0009851c17dcc71479fdfc2464ef68816f6a53e16693f00000000009c7652402caa80bf426b\ dc19dc9c00d9a43e2da6350fc73d0100000015b822c68062a153a5eeabe57af690496251d9c23041\ 68db50000000010001fa400000000adc11634000000015b822c6807cbb31f375be7d029e60c5882e\ 08a0e32dd9c7120000000004612ce8a98b0fa0da24dc92090239099dc51573942a6767b700000000\ 017797bbc09eb4d34727e07713836cd10f8711aa12e64bbbd70000000000684ee180af95e4a115fc\ c2e3a7f3adb05eaef2485dd67dae00000000006743da20d204dffee9638e49e48817fb10a15509cb\ e2494500000000000f273884d309da8a2bf1e1c4efe48b698d3cc7d1eef1a1b4000000000138eca4\ 80dff0924886e52b1f32721ae751f28784902c7f7c00000000002e6f0d33fd911c415dd62a96863e\ b956191ac84f832db5470000000000671db4801ac14d6eeab009a34dcd9bc41eef208d9384873c00\ 000000000a6e49c01ae27d72c54811d772a4665425152c6ad640b6760000000003aac5ed80308d6d\ d2a9230f94ee3f9efb19d32eb40b47ea7f00000000004e9e57c030ae8c4e8f0bfc87a95d56e4181b\ 72c3292c73e70000000013dc7b460a42d0e405151ac8ffae8eeb7ac75f02e8cfc8a5490000000000\ 2c54b9704d3f6a252e0df4422f0fb485780fb1abfbdd8a7f0000000000157b44805c52965dd761cc\ e4191c3f724ca2ea93cdd093de0000000000684ee180616da4072390222ca4f74df29968e20e843a\ 96370000000000342770c06a46940ae1c4d86f350ff016540b5155b081ad96000000001e1d71da2a\ 770eb3b3db4f655af6452d1520464a6490fa95f2000000000043a4577596b127d9052ac7077b415c\ 6a8cc9dce56f76566300000000002cb41780ac27f53e6ed8c9ba980c477baada6eac335dcbfb0000\ 00000000989680af3fd0751a672560e49a1a17848374f31a3035500000000001ee1f01cdc797ab0f\ d79b61042189d947aed5ce7437b975ab0000000005d06148ebd2985450804dbdfdd9cfe5858b158f\ 7a70126b600000000000684ee180d68e7ae79d1f76663e83ef7cebd180481837b272000000000084\ e2365ef9f97cb91a830a2720707301809d468c20c89506000000000011bb7d600fd3a8a421cd8274\ 0d63f220d968e39ef5215c3700000000004ff5aa604ab24e1416cbd6fb836085aa64a95e37caff8a\ 7700000000185c41e02f5da341e30e77620ce22c827ea745e55323f2adc90000000009f24f780070\ 2a534fa0ac4d77362ff4bb7a06d381f04be1c1000000000640dd42f48ff16867659e27933fd4a182\ 90268839d7f7d119000000000068f454f3a841e20f1adb47d40ce7171bd39dcd580583d218000000\ 0001a13b8600a9d75b982ef5f2e9efbb77b7e179f889757e366a00000000027dc50b00cf86a6b263\ 7ac615d35940a6bb139ac4148c745d0000000000d0763dd0f1e052699c2d1bcf8dcb87ff0e96b213\ c988959d000000000035b7fbd0f92277008230b2cb4ce77e332dae9639579f868c000000002dcb9c\ 64972f6bbeffe7306cf1a5eafc1ba5c039e5d5474bbe010000002b70458d0007d7ce4b19b795254f\ 5109c210ff892f95748a37000000010001fa4000000015b822c6800000002b70458d001c9d7ff28d\ a41d42b5ae0f0871131bec78be7fc7000000000006c7aff02694f77ef0b6c830b1518861156ead74\ 42d3e65f00000000037892912a2da3183636aae21c2710b5bd4486903f8541fb8000000000096004\ 4c00371fe03628f6146a45dcae22ce010b4bdc7249c20000000002315cec8356a3d0dd4a7fd68553\ b1d842511316ab19ac9e32000000000024b6324b5fcf23ad86c4e7c2caef971878be3b5eba66d5a6\ 00000000014920f52974695248c5798101527fe8d6018a7c52d834c1af000000000030c655a9850d\ 3b4a43cab895562e509af0fc375c53b934320000000013e23c9600a81796f7a36281cbfed9cadeb4\ 1e13b141f6de26000000000000989680b225327faf32dd0952f6d380b5dc7bc01a153da300000000\ 0ba271b080c640ffe14a4f476294942262722d78a98063d7ae00000000003f66454cc6441ce8e60a\ 6da8fd50a62778e620c340cffe5a0000000002305bf6f8d9a7ff0827bac19ad7da9472dc8e607021\ f56cdf000000000173f162a1dc2c1058682ad9e2b75eb5a6b77eaa6e7fbcf72b00000000008799be\ c0f9cf2e9fffecf760b73fc89d98cb50a4000f7a55000000001870b31680fd4567097059497f7120\ 18155932135cbc2365f7000000000a31058bc0855fd96b13cc98443cbbb577fc14762d4c492e1a01\ 0000000132a8f1a007397174290a1a587fe955e0e4f59af37e5aab87000000010001fa4000000000\ 995478d00000000132a8f1a0171866db6d9d80f61084ae41e382055d1835f2d1000000000019f071\ 0720cc540863e620a639a73c243830a8310ab3dbda0000000018dacbbb80f6afd3ee3e1243b301bb\ a454e14ed7f0f869759601000001977420dc004dcf0b0c70486431227b32eeb42a2f6ca30c5b9a00\ 0000010003f48000000043e8b024ab000001977420dc005ee2ff77542f63cf46177f9ce64f1ddde1\ ca79e800000000226c90515f66a8ce4130803e50da5e7f21ee1b8c9f2723588d0000000055079dc4\ 006cf83128db5e097319677253dc523b6ebb28cc3d000000000057516ab071669d69b1a4095161ea\ 04e6e62e147a347a1b4600000000187b6dab807a2571238884819a616fcd5dbf7bca74142e6b2d00\ 0000000bc50b535d828e88e00dbc3a735eaf6804749eb5a7515a69d600000000006d6d8d62855a07\ b67a4c77f6e307aa1a4b8b762b2c0db78900000000005e60c4409ea8e6935a9e37882e783ed5653c\ 40a1a7cd0ddb0000000057e8055f2dc6e97f003afdecf5e0afeefb14ded2c29cf48ccb0000000004\ a9dccf97cc4136afdb85127d02d777728614052006ec91250000000003758c63fdd3c0f08a7d7a48\ 34b1e97313be6e5b360356569200000000004c4b4000dac3b4baae81169d5993c1fa997e690522ec\ 873c0000000001ff1c1dc0e8a5ff5b5264ab0d81904d2d771cadcdd60f299e00000000001f4add40\ f896825d830a104a217a988516b55195a5334ff2000000000072f10c800998726a35e16f2c810229\ 65bdd6395a3aae58fa00000000020a956ee00a5f8d99dc12e94079486cd1147acf259f2d426d0000\ 0000011e1a30001576a2558b85545ecd0d8d0b568d366cd2044e160000000000d09dc300160d867a\ 17d2030f4caa9675fc7066ec88b0f0b30000000000009896802c958dcc325e5e63917d159d9a0ca6\ 786adcab54000000000079bc3670461cee2bc01b07dfa43153be2c514e9a600add9100000000026f\ 76ef005800a28f318ff464e1d3fdcf833afe6efe9662b300000000004e3b292059333e9560a4a009\ 8c47a02a0df41cae36c6b5c7000000000040dfaa80729d41e71fcdd403dc2147a1992f52fac2f2d0\ 0a0000000675f026928077fe71b45cc2ce664922b16381f1e183282affd10000000001b23c68b87b\ 2b8b80d3fd85f5c7b54bbe7c77513989c3990500000000262c1b172385c01b33ef589086b192d06b\ 33a4338eed43ac9200000000000098968096af4324a391339cc130952f769d98b716889a02000000\ 005a5ba24e9f98f5767b7fe9091f7146ef067dbc06860780997c0000000003c5a8a5cdbe3c910763\ ce0cc90e6bbeb486226fb99b1b0d9c000000000001312d00e30c9bf570d152f87ccf23c113eb49ac\ 767420e000000000041314cf00e5b5ea289911c0a7be65b3b1cac39cf91f517fed00000000179b5e\ 5b20923090173eda76f373447bf38fd43bb82543b410010000008bb2c9700008a618d31b7fd6c407\ 9e24db5898071b9670419d000000010001fa4000000045d964b8000000008bb2c970001232e919cf\ 237c81aa13a9a6ef3087c04f786663000000000130dbc4701deb8cfa143ba9975abd912a19754235\ 605331560000000002540be4001f56582c8b4a16f77e27bc7cad7aa60c2491be8d0000000000684e\ e1803304ef4a109afc50a7ad2b2d87d2bbc17cfa8258000000000038ae05473e3f5d74be8344a11f\ 8d9602082a23e2444e618e000000000053724e0044c023c3cce3db087d091c5f608c5d7f919efe90\ 0000000000684ee1804c4055cf472a5424018b2aa1ded37df886965907000000014256290c405613\ 0ff25afc1d6a55fc84251fa5a7cd0e69ff1000000000009af8da005a756d3171624e66c8ce0b68f4\ 5740f95d28bb070000000000659af7b76194fb672bae99e860134cdece86418d5d17fb3000000000\ 009c7652406cf34c7e17fa51c3eb3d69fec18cdb5dda2e5d7d00000000019095be608082b9cbbaa3\ c10d119657f80fe5f42baf877e5200000000008320547b2559a5d59fbf45ec48233218c99eb88ea0\ e642240100000056e0899360a0049600fecbc7b3312ca0050d6605a67fd8d507000000010001fa40\ 0000002b7044c9b000000056e0899360abab9760f958a163eaaffd99e98d966266a31fcd00000000\ 012b27dcc0b0e5dc9710715ac15150f904a50e81dd3622139f000000000005f5e100b2dfd0040a47\ 5c1d558513a37ad15ffca5584ea6000000000052fe73d6bb722b5a4e5f70b30cc8617888ebdb96f5\ f235e00000000006ec7319e20531ce072255bd3de8e252ce25d8312d30216763010000008bb2c970\ 00c55113641286f254766b44e6808b8787ba4a1fce000000010001fa4000000045d964b800000000\ 8bb2c97000c8d592340b64d1a34a2ea168a2d0bf605a6175fc00000000001ec18900c90af9e93a19\ 5ac8078e8c937f750444dbcf4c6100000000067c7b9647cd9c259d40fd7cae4bfe016410f1c81528\ e5e9ab0000000004a817c800d9579ac56b1cd6fff6f123d09db964254b6eb1180000000028bed016\ 00ec6894f7a2cde8abbf56524b3d4cbcc58a5a3bfd0000000000730f9100f6a4dbeb7de221ce2da6\ 39d3d782798c36bd0ed00000000002efccaaf02dff455cfc98717cc7bc81b471c0fd8152295f3300\ 000000090e7be410371b8d893b295960ab9d1e05716f944e692fe27900000000073c2ee68f576f09\ bde517dd032f26c2a2cb0fb19f6b86873d0000000000bf3a64385b2db4f81f878f6da0de3ba904c3\ 11e16c811dee0000000000c9369ec08dd79341ac24b55423a017a6c712c828be79f6b10100000074\ 6a5288006099016973f25367585e3f94658bd64f01a7fb0e000000010001fa400000003a35294400\ 000000746a528800b2451f3e949dc0505845c559a12064edb584e61c00000000056a014b94be878f\ 01f45c7137f79af0a951f59905fbc6ee9a000000000008f0d180f0807c70ccd87a2bc826a7cb298b\ 2214e6d4476800000000140ae953fc195274b2a44d415487cc8496be7771696b86a7dc0000000000\ 3e51312223a6e0f3ac4d47291adc8544fa7b0564d0eb2b3600000000080b06f3f5947a887d9206f9\ 81294611953a9d2b9418e5e389010000002b46ee0e0058ef72017a95654210f7cbffe791b3943032\ ce5d000000010001fa4000000015a37707000000002b46ee0e0059e312f3202b26a692e40b0901bd\ 91101a53eab400000000061c9f368065dd0ac968fc36314e82908492091f3956e0568e0000000008\ 05453c2e77bde99d4af9c3671f07eb9532246116f9281ba600000000003356351fc6bb56f6c977b3\ 123eb5a095717b31e48825a24e0000000002098a6780028d8d85887c427b6a8a7cf70638385c3d60\ bf0f00000000047b63b08015f1d7eb32ae58bb9537ea36e745a866d7ea72ea0000000000671920a0\ 2f73719f43aab9989ecd94e61a130e52295d2e70000000000008f0d1803a5c3fd56576b0f9fec1d3\ a0d4f26234e670b06c000000000014dc938069549f3460db21b71645775771787e87d4410edc0000\ 0000000835844892174b36712b9b1dc35c1b5796d7656c72ac13530000000000025b6da194aa3d34\ 8815b1e1a1d8fc110a4706892784ebdd000000001d759781b8a42fa062370c222d07d76d52d31ec5\ d85ca8cddd0000000028bed01600b7f991d7404bfd3ac2744bee76d40ad44cfa61a60000000000ca\ 82e3c8c84a60cab65e958965cb5987d2e9d80baf7927f90000000001fc5e3640d1d1ecf309396c6b\ 4d50dc61938e90e0778dc2e10000000000684ee180dc1c53621b66775a89c6d99e41d68e11388af9\ d70000000000ff1a13a94c757a90ee9b11a2fcae2c693fa5f98ec93dd7bb00000000002e6fb2595d\ 1a3d7073ee30c1ca566085c1e43bd7c54d7e500000000000684ee1807db21674937ea0fd357d962b\ 8915dfdfd030f11f0000000000e612ed00a4262ef1540b0ebefa034ae7dba3a391f12f1f02000000\ 00041314cf00b46ba513f472fa85b60c710ef010e38262e4562a000000000179e754a2c6a7a8fccb\ 21361c3df867b6137c6b187c117ebe000000000118464073f8c92884bb21fec24b45f19d876264ec\ c6380327000000000128c1b2300524afbc1ab0b0a9023b2d3e765511c9329e1a2900000000dbd714\ 77103f870a7e36ac4bb0e538036e6553a36550d167f700000000031c4332c5473a0e84b27c3b6147\ 9f856481842ce1b427fdd4000000000342770c0055d38b0176bb4339ae483d2e48870086a0ef0636\ 000000000128d4c50059cd5d81518acbcb54a93d14af1ec148c8b4c402000000000826299e006036\ 66c39097fd3af7897be9e0ab43914b073a660000000004d9219188a5b7980b9d3598e69618555e56\ 02a560ab5b14ce000000000b85b3e6c0d711633fb4ddbfda46109e2e611cae53e355c48b00000000\ 0b2c385844db5851ad09ebd6cafc37b05acf7e336c9672cdb7000000000474f8d505f2b9eab8266d\ d4f44bf44cbae19f1af19940c85a00000000041314cf00f95f281a92bd8b4ecf6cd4e91e34678acb\ ee228a0000000001a1608d9b37f06a00a49ef5eebbe6165341eaf0381864ddc90000000000d09dc3\ 0057207d845c1fb41b2e2b05462a7f92b8a4a1286000000000002ed557ad6f99c7d05b145a67e3de\ f96fbd3525a7f61fff5a00000000001b1fd09a8c92ea3bbfec5d955b2167cbdbd71d1503368b5800\ 00000019ab697e809f64c0fed218a09ec502a27897adfd7403d9af27000000003b9b629680a4018b\ 2eb1534e275db76266e4b979cad7e37bf000000000006674951aa78acae58b6f85929f8d5f06065f\ f9fced6f3270000000000826299e00aa57ee0f5fefdb205066ad8577b3459394e57c3f0000000007\ 9ffebc7ac2949b49bb1bb6662526294560581df2c5f2ff080000000074db0204f0da3c97ea1103e8\ b8f6b63313c72d3ddda30394e60000000000861c4acddc319d79be03860fce49781086e4d7ca0244\ 9df00000000000684ee180e6da972de1ba77d07032f2159a27214a6cc7b2f10000000000684ee180\ ef2f0e942f8ddc1585af222a9184a4a1d056a0b9000000000055ebe5c9f655fe048ad1e49915ce93\ 3297f7d33979e5a177000000000029b92700fff92af40c17d8990c5c0b5b20bf73508389033c0000\ 0000007d09dc7f004df512449111ab60b9721f15bb26de484f4312000000001e4778f9720920d09c\ f2705c5ccce40d17efed9bd401bfbd410000000002ff9797460b28da77fac452321aaad99eb94512\ 41fa1204ea00000000019963778015464137648777f03a89799bba3e70f78284c33e00000000053f\ 431f401e4a358b65d421323faa79d0728fd4c4f19a5b480000000002f692dc893f09f88c7f1c185f\ fc7d0f28433d1ab2b2fb735000000000002a3339005d6185072976bd221cc699df25658ef323eb27\ 64000000018bcfe568007c991feced4109e80b6b4e00d4be4f2db27a820c0000000000675e8e108b\ 8bb6c260892bc62fd0cd4ab11d15560a5c0004000000003b5fc7cc80990467354749583950268f1b\ dc932baf2e1f2eb40000000000655a0b8099be78ef96b3f1dcaee88d9c737e966f2f29de71010000\ 01977420dc00b5326ac47f375fb9dcb924228c49b7938bb37770000000010003f48000000043e8b0\ 24ab000001977420dc00c72bbb3ec6bd50f678b45f567c497517737ac0e70000000000684ee18091\ 84a309e12d4e9b88454e6e642bbdd4eefe3f6801000000037e11d600db9c6ae5094593a7f48e6b66\ 9f70603a420db5c8000000010001fa4000000001bf08eb00000000037e11d600f0f24a15c96c3b7a\ c077a0b2e03ec0a588220cff000000000d25f146c9285e0c24c9f50157b2ed7b7a9f1611efc3bfe9\ a20000000000630916a02ff5ba846d62306c8e23b803248a4d53be5b6a440000000000961033e341\ 99a8dff10509386b2aa176e6381977f1663764000000000af3d1f0e4475677807c9ad517f47eb719\ 3937c5464ecbdef00000000000342770c04f2db8eddb9837bd8a356b01de605326945f5d44000000\ 0002a64871204f63b41216bab1331d4e2b86d11814d8d558ca220000000007f8c82dcd57b314ecef\ 60c8b6f2e2c1c7a768b9f7f40657e7000000000029b92700fc3687f91b8bbb1cc24b55dd640e3c9b\ b23261a901000000098e4210c063ddd9ab4e5d3fb7b41ff2a06647beca2fa1f52a000000010001fa\ 4000000004c7210860000000098e4210c069d5d63653970853e2b019febeae72e8ddd600a5000000\ 0000009896806bad6921fb455361f6a972ebb3de0a6d67e4a4f0000000000002faf08082a59473c9\ b1cbc0a3f119dfd9d39445d57b12b2000000000290451f0a89306b4189780061c721a43aa5f1801b\ 72063a9d00000000018261a0ea8ae5758de38258ae9783ed616b9e19590fbe07440000000002da28\ 2a80924eddc10023be35b70baae59de13731dbd88e050000000000e2149640ac7dbdeb3d9ad56fae\ 7f1e36338fa0870de282fe0000000000bb9c17a2ae715f499557f5a66df7d3d395ca0f82b5adf7e0\ 000000000000989680cf483a48691a10512f9e4582f5a8f0007fd0b9a600000000021353e5c0e2dc\ 37feb51b146a86ff453815c63f4ae4c24baf00000000052c59822034991cc7d5f6d35923e101e198\ 8e8a6361d13749010000000ba43b7400ec567774bc40ae5b832b378d8791f4d5660d33a000000001\ 0001fa4000000005d21dba000000000ba43b7400efc5501237a26fc4957229aa0f6a81ed1b296e54\ 0000000000202fbf00f6c7af0f8c6213900235f95c00745cab0c843769000000000615f4914ef7c8\ 034bbad7a6f62f44ed228e8fe120ea3f4b03000000000001312d00038227c6e91cc32144c164a1ba\ 2183f7256e5767000000002618cf5c402813a2ffde3aacb03390aee56acdcfd48d009a1a00000000\ 02098a678030d2b4f935846f2600921c19ee291a9255e0adfe000000013de0c9c4915328f6604fd7\ 2ec38a2ea672bf3790c3b95e633900000000000988a6ef7a5e35367157e434b1f3c03e5632adf41f\ 73b2f600000000cfbafa68d981ccb8bd45c57366fc6f9a1a8af291de3bfda4b80000000002ce0e11\ f0abff0b570a00ee4592467ee170d76aee885d084000000000038dfc0e00ad038da0314cc101853f\ fdd3b17dda792f62edc8000000000204734734ae4ca15fbed7b878c7d727703e78150db50bb7fe00\ 00000000d060fea9cc74f9fc0794fd3bdffc3b504b55893c4685488f0000000000150cc9dfcd82c3\ 7907f1a05c9cfdb76b5511a40e94dbef3700000000007cc0a540b11d3074bce47572db0e1f06ca84\ 4655b1b6c90c0100000022ecb25c00d1336297b4f76d04c2921ea91dfef23290dea79a0000000100\ 01fa400000001176592e0000000022ecb25c00d1fa92461104000909d78eefcf99fceeded75dda00\ 000000041314cf00d722afe540f32e0152bb65df7b14f8b0d2bc1caf0000000004f4630800887a8a\ 093d0b4bd862b741ec26ba8bf5572618160100000022ecb25c00ddf7adb0d0a547af45fd75f5baaa\ 015d20a1bf59000000010001fa400000001176592e0000000022ecb25c00ec956180dcf3771ca4a1\ 2d1a851432a9604c8804000000000c2d9ea17ffc660ebc35d4de3dae65df744d2feb25d315e7d400\ 00000012a05f20003eab11d3253c2d3ab30b22a5d1a5bf4c637e4ccb0000000000684ee180410893\ 2ca62d1ae1efe1443852d7e928d5c778fc0000000002218808f46741c9aaa1ef26427145a669599f\ d09d4a74719e000000000014dc93806d28b4f435b2fe77a2b9392d908101dc6cc514c40000000000\ 0dbba0006e47bfb7cdf1f2ea32eabc8a26ec97770bad4bba0000000005997de0809523569da9361f\ 27d45d8bb01c46e5a52af355490000000000d09dc30098554f526e1b4c2f771f1d5a503b46ac7ff1\ 9bf40000000000b68a0aa09d6ca05782381c03743752a33bea339423041fcf0000000000935cdf45\ 50caa3f93655b5490ba368e19eaf8cad0a9cd0c80100000011601c2020ae26b0807ec8dd46eecc06\ 575d8ff974f3435405000000010001fa4000000008b00e101000000011601c2020b43d58ef11b5e3\ 099c45ebdc1aed387740e1ddda000000000178c1cf53b5d270b619fb0ef41f34a8e9e142daad4777\ dd6000000000012d49548ad7014cf30595fe93dbff64d75f95be9cef768eda000000000826299e00\ e476a59c63543b674cfc0af6f63f54bfd929b1d1000000000fe3b80f40e70ae916fdaaa831da4889\ 32d5c5a55c87b8959300000000623ad9855cf7d4aae4f87215652a5d5207e0f6aa896cdaee570000\ 0000009c765240f8159ae32f0a9ad692d43ff362dabe6d41c4747f00000000001c29c72019124f35\ 9b365385e513a4bd20ba5c2809b88ba20000000002048666622b723459ad70939a3ce3f35b39a7f4\ 224c31dcac0000000000fd1aab7d58b61ac624552e422cab4bc7d7a7b648f6e364df000000001526\ 13922162fbc9bb7a3d572c684dc1e6b19ae0e1e54e1ed50000000000f492a0a67a16b5d0f196870f\ 710b55ea4eded129f6282821000000000138446a1883a23ddcff584a89152b92fedafd97eb0ccec0\ 490000000000f171b1698e1439e55e0875f829929efa9e9cc88b924fd8080000000001682d0c30ab\ 453bfa23f44ab575ab5f85dd56174f55b2065f0000000001d2445c2eb66094fd82d922141f54f55e\ d9dd32e5dcfc699d00000000003823b6d1d6227fdb956f50146298229b81bda363fd5d0ed5000000\ 0004795c5c535932b8c7192412897befebf40617894be9d84e81010000065dd0837000df44962e6c\ 54d567c6bf3c06bb55271b29ff6733000000010003f4800000010fa2c092ab0000065dd0837000ee\ 69724ed8df0242dff6069c07d0db37399644d000000000146006bc00fd0caeb9b784f6df7d228a57\ 8205d043752aa07b0000000003ffb7d32febcbf0de7dae6a42d1c12967db9b2287bf2f7f0f010000\ 2fbf9bd9c800fd34ab7265a0e48c454ccbf4c9c61dfdf68f9a22000000010003f480000002632e31\ 4a0000002fbf9bd9c80004f198ed235b8a12c5446f18dcadbb707b8ab96c00000000003a82f35905\ 06cc579d24ebd3b7a7f07a2c26613f6410b37e0000000009844ab02706b5e6e9011da3febfc7e950\ e0960a387f48cdc700000000005778fa6e18f90a199048e0d7ad72647b7fc6ec7d57729199000000\ 00006103370d5700fdafb9442b7c6e0146d38335ffb79d67244e0000000004d69a86cb5f5235a35a\ f51c53ba988528501a0bf41162ee970000000005330a16771b9d98f10c9bedfca0b2275e187fffcd\ 2ef03b020100000005d21dba006a90aad1aec0259401e0ef24ef08d9890f4c74cf000000010001fa\ 4000000002e90edd0000000005d21dba00b9b7a609042dafb8e71f0aa97856441601f0b716010000\ 00183bbf2e008f37993974111be44c00ae51bcf51679673f4819000000010001fa400000000c1ddf\ 9700000000183bbf2e00913f9d43156ac22223d8417a94177aea6a016c5d000000000008f0d180c6\ 8f4b5bc1e147f39d0f0d0b0784ca4cae2109ed00000000054c017380c983a8a20b69c3e7d31a0f90\ 0f32b33ce9f5b9790000000001cd6faf38fbe040f47f707ab55a7c988746550c6a9cbf4187000000\ 0004eb215760fe0212a71ad52673f83435d5d7a52d6a878f5a7e00000000005b89ed2a02adf7a2c8\ 4ca2b82290a546e2b77d624e67535b0000000000c5c5714703b1a0346440823ef12090687c9c2f20\ 6a7eca3d0000000001c3be5cc08d692a48c3b21b5b2846166aee6e0cffe4d5bb8f00000000002376\ fac0968fe2cfac463f69e5731bbc2c283fe1cb8393ad00000000002cb59d86ae0bdc606e6c1e143b\ c9d00fbff6019e44c1b02100000000046be46780b5a2d1bfd78b8f7a51e708f41e1d4162aee54664\ 00000000001a7b0112c5bb024473a999c7f87d2604aa2fc8b3ea8ab99100000000035ef76b18c5ee\ 82deec18981e96c782e7a9a27894486c4c6d0000000004b4de5711db0e1fce4eb40ea0a4ee18b35d\ 4ef11c78b991760000000002098a6780e3f2f0ddb7856c405a70c470afe498ac73e3296f00000000\ 04127c3880ef30b02b4ff0829e7ef493a11d6167535eb0c802000000000274188aa0135434e1e92c\ 732274d09d28da61337c1751e83d0000000000341fcfa016bc48d87aa4f68119f5c1fff175ed5d47\ 733c810000000011133d2fce3a3531ae7dace65853cc20d4a31fc2e6f31d18800000000000b71c1c\ 136047af576890cec8380216c29ad2a6dfcd79129700000000008384be4678e5c138fe4c56e4247d\ c666402f5aca41c038a60000000002676aff408a7162d347f72bebc35b1fae91f7779318ea787c00\ 00000000fd1cac798a91aa161037a0b2f5302d4a40a398a25f6a8243000000000342770c009871b5\ 607c073ebe9c0bc8fdceb7e2591497ff71000000000005f5e100b2e37361cdd6d437c45d732030b9\ 5de59b0ef97500000000000d0a9fecbf0ff8f44d261ad7355efb7c4210ac9003d580410000000005\ c2485d60c26159a74cc68cfc7ec2e8fd43bfb001d975fcf300000000006743da20cef3bd7632ae0e\ 9a806845395a24785d8057e6dc0000000004633ac4bae3ca2f53b38a3075f2ec7489a7516795e997\ dc03000000000602a44829f825839e6dbbc87f847a37bf42a487d4db20710d000000000000989680\ fd6cf7b9a81138f8c33cfe1f7301a6857569756700000000012de32faf0f85d7680d3bf451fdad32\ 56031e3c8ef02023470000000003b7b9153e101921f951ced6c788ff74f1f20405141d70739a0000\ 0000002df4f510f5f84e21e5953d45f6a20b5fd7b65d50e23fca0001000001319718a50011339ad3\ d1fbb5f1b7637fb1a8a725845e7fa7bd000000010001fa4000000098cb8c5280000001319718a500\ 243cd73467386ef32b31f8cde0b10b3cc3f4887400000000012a8004002c02007a026949d5a40eed\ 4a7874da22508d22c800000000029050689832b717e44762782f93d687ad7b07962bdaa67c110000\ 0000001067380048c57c7271cb35853727c8bc4d760f2992f1667e00000000001dcd65004ee6def9\ 919aa3dcae4bc6b557a42cf5c8ac3d5e00000000003f346b80612430db2d8b1ba50a740e96752b45\ b3ae51e19d00000000043bd901e783e190d47c8ebc963b056b9f5b1a9426e7cac03e00000000009c\ 765240968d7ba6af740e27d3685854dc89985220dcd4c4000000000029b92700b98e4a742d09c063\ df85932dc56fa2112f92b42400000000001dcd65007c7450a286b85edebad3e890acca0d453de101\ 7a01000000517da02c00fda768991255ad66cfe4e5d0d40a81405e1c38b5000000010001fa400000\ 0028bed01600000000517da02c000029bf7f69711e85f15f1f6b003b648ab4b0a987000000000252\ 42208024c1d5d624adc13b41ff91e6e72bb78a9493d39600000000005b21a51029c3404d167962b2\ ba4ef630872fe3333708c960000000000051131b6f4f9b9d632147d07fa24c6b23d9b519d640a668\ 4a0000000002098a678062c08fbccd26bd23748b7de1d1e71859a6718a390000000000579e6b8069\ 479c2c05c9c961c16c0e3dbc13168744f9c89300000000015faf679a6d0e67cf08a1879673f9412b\ f124d5668b42742d00000000000904229b7e20eea835339666dc4011822f0245f411f73fd7000000\ 00011cf22ac08e8dcd3612adc818de33c3100a7c506f9ecc6c4a0000000004f35d711bdc24ac5538\ b857bab8f2b30635b4b36d799e89d70100000056e08993609b3e6fd99faa212eb6f4aae756d1d4ce\ 1997bab7000000010001fa400000002b7044c9b000000056e0899360a0208f5181418f8f558d0097\ 19410cdda0e1864f0000000000127a3980a259073902e42cb437795ad32fbb9edd6a7f6dc6000000\ 000132caf774a80d8f8e47c1e70774561924e5783439f728ad43000000000789b34bc0b125b63bfa\ cd180ef19fecfdd69c1fbab1036fca0000000000684ee180e27e14eab6c0b2ade569d1828b59b799\ fab923140000000002098a67800486c2fc0580ce5a3f81a4b2498a1521517a2d8800000000a3482a\ 2fc80ab2f12da31cc1f08b741c98df78ee4f3e3c5a1a0000000003b5744fdb1892477500eafb8192\ dbdeaf60cf5d44ce0395cf000000000553871c4033315765403d336b73c663fc20c0836593b44450\ 00000000002625a000358e06cb7091d21d9cb90f3053746f3be510dd3800000000001dcd65083991\ d4c0be3bf7c99809185ae0476e5c3fffc2f40000000003b4e7ec003b756984782ef0602798591d58\ 38ec22f76bc73e000000000bd199e56f4367e8565ec3133d783ea68fbd89e36426323b1300000000\ 02098a678043e7b92cc29a56b67bb494ede3f270493f98b2c5000000000217f4f4f75644a3f0e1e6\ be6aa2b592f59ac5e8255f56f2390000000001de042af3579dfd0a2d9e5ca57322b6c7827b2c9d85\ 21c3450000000005d21dba0064feb3e250db1d929eaf63c6a4f7a7ca7dc9659a0000000000535241\ 3d6dfb7a9e904bfa5726f918b7d559fdf3005a753700000000009c7652407c880f8fa504ba6f3c7c\ e2b37acf71e6eb264e4000000000003209a49d8536b84fab2786edd9b5689d32eb8bc0dc164aa400\ 0000000030fae880a7301b3353e7ca96d6f5134245e27006bcecaf2d0000000001dd7fd387b6d905\ d6b5b685d54aabc31e8ed81ab02551fb06000000000224723b92bb5c650329bb9d94e3bd458834b4\ 03406c0ad9b100000000041314cf00b9f566a93b98f8e201d58c95437268c918f843940100000019\ f75c3e40caf2525d62116d1ffd9f26166fb273893b11d61f000000010001fa400000000cfbae1f20\ 00000019f75c3e40d4257575632f5bee84f1da96423ec525a2e87c2c000000000af09c9bd1d93bdc\ eb281855203f154d559863bb6fa3fa5f52000000000014f1af0bf0495fc75825da6d19538cdca879\ c7529edf05f10000000000d09dc300008b1c09dc3b5a9f3522e3c9ff38be5c23b189540000000000\ 144cb3660d96906ca547926eec1378c72173d73afff6f67e00000000061c9f36800f2e7d90ba023c\ c4bbc466042b5448b8ae877f3300000000006a1fd4f115f7c56604c5962b299059426fb4ab9b6602\ 60c60000000001822e00d3207a1538f1a080b6a004eb3376cb9f52a3eac6730000000000be420e00\ 3704f6e8bf1336880f46880bb96e6244f9f3782600000000000dc755cf4a88aaad038f9b8248865c\ 4b9249efc554960e16000000264eb62cb3ef64af75c3aac5b3b428d2ba1f04e194d35660a5820000\ 00000826299e00657d865cce807ac823da15a4fb1df30aef83eb1d000000000188cac1696e291104\ e658f58073cb8c92137bc38d5938bdf9000000000026c962098d167435b1de223f4933532fceafa0\ f24601a67c00000000029b4a8fba8f49e79e8286ff58cbc114416c0df407880da26c000000000389\ b2fa70a95e35672d791c54e4fcfdae52888bd2e3eace57000000000005f14d20dca926ea86efa699\ 1fa7eac8466b848b4f5182a5010000000813eae6e0afcc373acd784edf78aa520663f7113d40e8d7\ e6000000010001fa400000000409f573700000000813eae6e0b45f87391d7dbefc68d48e2526c87a\ 84f2648a9a0000000000908a9040d53c0ccd94640028082d3a650beb44361b3a9f9e000000000543\ 4dab00d6ac3b190b94d229022273b423caebc076aa332f000000000000989680deb98e363faa545b\ 777ca5b387af45ea0762a98900000000003b9aca00ee39e4430373bc575126885b5cf7185b5ee059\ 60000000001da8b11152f37c6e6435515bce43a0792822c6275a4af769f7000000000001312d0012\ eb384849f3b717a0cc346faca0e27bee3b753800000000041314cf0019cbb770041f4eb52bf64997\ e331485401a65cc8000000000066851e002870c479941364fd7ef5eb19f05a9ba6c8770f69000000\ 00006ba2524039d49a14c355d3286570192ee088460bfdd8ca950000000000664b83e853c229989d\ ba30dc01a479ec5aef141130a5659200000000121ced096e5c33ec026903fff2c427d5e35a664e04\ 8325084100000000072536d16297fe0b030b0e3bb9e286c5f947cfbb4d28973da500000000007247\ ad20ac794620c1696807b7b34f1702041d75326b36520000000009fe6d6b74e2d80b16017a422208\ ec2de85bff22b77185c3330000000001941e9700effb3bd733f048a7055d53a3f9529cbe4f60b4cb\ 00000000003709f740faa1a1699c69136cc98fd31062430783df24a28f000000000005f5e1002203\ 8946c93a95cb5367a0d4b17d6f156617466c0000000000009896802c89c00d0b2470ef6d4ecf9185\ 6b2bb77b1b969a000000000138eca48036a9db1273b4e43126a1ee65cd4249fdf3780fe300000000\ 013e49ef00566bc15dc9de58fb751fe155d6622dcf8c7283e000000000003c4d6d10704bf66340d3\ d884ec6af406e354692c49bcee7800000000087f7b10b1746411006cad075ce6f8c66ac42048ef81\ c6612b0000000000cff588988589ac765cb763bb8c5369a6e72f146d80d441220000000000009896\ 809a1e7ecdc3167886e1380373fccac35e435373b40000000000684ee180a33859e3aa51f2535921\ e8c0ff553e6b3dd11bfc000000000079db2479aa543f0b248a0d70606dc2083bd16ee4478fafe500\ 000000006771a0e0c98f13531773daae71eae49f19f9b504eb3d06ff0000000005283978f3e488ee\ 9e2ff633565227e064fb2b8c55080b776c00000000015ca95c98f5e984c0e174bd86322b1c85d7ae\ 9218f9b7f5bc00000000003fc6e780f763a36871e53e4a853aa7a86bed241df1d8bb4c0000000169\ 09f21ec6f8b0e4e8512c87e882a1d00a5979022789d77f3c00000000cbba106e000cc92086cb2205\ a83a50cab9853abc367ef1ff3800000000002aebdaa014816f205bfa0aef472578eac86d280f91d4\ 070c00000000027bfb4780038bb07accf595e7f4616ab62405eeba90a8abef0100000011601c2020\ 26149b477ffc32f5f17f15985b9df73976fc6145000000010001fa4000000008b00e101000000011\ 601c20202c83bd300ecdc03c9dc853233736d6cb80f56eee00000000083aa6bbb0378aca95f338af\ 41314449be8e8f4ea0a8e0a52d00000000041314cf004094dc0c8376d6514f9710894d637228355b\ ee510000000000d09dc300463dd84365d39aebb8eb9fae59182a564458bbfb00000000002cb41780\ 4fa92faa9b2bc6640d30d44d479e75bdab31e82d0000000038d880faa07286cd2bfaad56a96b0899\ 2fa301b3d6244a160b00000000061ad57300734e550f02f1434c8acde94fee5156c18e73e2f10000\ 0000035e360360da79d2a81404e24f56c58cad2d5e10ff7e403d3d01000001319718a500dd468814\ ba5e31e191ca6c4621053c75f6ce7a7d000000010001fa4000000098cb8c5280000001319718a500\ ddad4970dcb89dbb92f2832603da0299ceba334d0000000003e56727c315cb035ec98c9308d61b81\ f083b0dd2658ab20e2000000000a2fb40580208a18a517690fdaecb6a15986840d60534420680000\ 000002716e52302edf89f377b2be233ee52d99616b2a2a72b8e9fc000000000485d1fa4035d6168b\ a468d2647dc12708cc05ede8d6ca9f8600000000054319335a3f5c042618dc7f909304c569252d32\ 4f4eda6f5200000000174876e80043c27686a45242b4676a921cd7c035aa135cc7aa00000000032b\ 0c296853a87f9221c494b5a51afc9e2183016b0753deab00000000041314cf005bdf1aeae384a73f\ 5df517811f9bddf4978188d00000000001a13b8600651dbffe7f1a91cba9b543a4c03e04a9a55d74\ 680000000002da282a80745e3112286c20b1b2e77dab44f12f2ca39df3b40000000002540be400bb\ 5049c1620eec25285e91ad1273713eb892def90100000000f49f98a08a417d7cceb41b0084356247\ d2bc0cc3f7fb3464000000010001fa40000000007a4fcc5000000000f49f98a0922c87ce587cee6e\ 2fe267d96a2694087c9531f0000000000169f2ff2099d4f59149e927309ea7b34511732021d581e0\ 080000000000d09dc300b3e86242d194ff7e8377a7d9c7e0b1eb326e35e80000000004cda8e9bbba\ dbb3d93160e90612089080676cbf3357094ee50000000004f689cb0fc764239f77e8079604bc29ca\ bf24e0a703fc35ed000000000008647000f496da3417067b60f57f2785e263034f11a73931000000\ 0045d964b80014c804c4d80bb639b32f01d4d2db5a4fb29cad6e00000000054c01738018aac26dbe\ 5c54d6fe502ffdc0b2e4fd19df9dde000000003887b48c684ef39a468c406939e5dfdd3336c10f2d\ 69b36232000000000006307900535edc85d7b5d0eb0c2d433bd7f42b8ccbd0e04700000000006553\ f1005ed2d51b9cb3bed909766eccf13c1528245ca49a0000000000684ee1806c25505e92c2a7bc30\ a6186e7d34bca8df424de50000000000009896807ba8904bf4fd49bcbcccca76820cefcb9f37d31c\ 0000000004a817c8008ba80d6bbf3f7da5fa25c5a3b7733b27209883a40000000002da282a809f39\ d2e6d3d378cce91cbceefa26dc1322fc99fc000000000097595b85d3c6cf68bb8d7b86dcaf807efa\ c3e58baeebdc960000000000d09dc300e3f391e961efe25b7299383f01a0f07d8be983c700000000\ 0f177bd1a0eb4426b6fb46957b3a2f2151e6ae0777cd23925f0000000033e92750c0f14220176d02\ 916c3a3770d85f3ead3f2469330d00000000105558c706fbb60f73c5e1e0e7d1e6413f8eea28cb62\ a449690000000000b784693203274715eeb0dafcc92d49b3d5a242b95d2ef7b00000000001583781\ c0097ee4758bc78000f27fdfbf91d1e70b6223d493000000000549b036fa1b3cabcf7700756534fa\ 7ada8cb92a589aae1a010000000000684ee180187f6a5baa47ef0989c761ea95819aed876a01cc01\ 00000022ecb25c002957847390b1f556f3763c15416e8c6eccd7f806000000010001fa4000000011\ 76592e0000000022ecb25c0054c67d869528f5316f8fe131cde4760353abe0860000000007aafbb6\ 53639b82c4233b77286a1f56bfc2366bafbc81fbde0000000002d3011c8082754f448f81029a7c31\ f7927974416b6058eeaa00000000000e4e1c008722ce50ae2b4c8263ca8644a0905633fe7d290100\ 000000138eca480089445dda013c7e61d08f51525cc7c10a0d84822100000000070cc9f1358b9aa1\ b77a7f43ac2f4d944f74f94cb216c2bd8400000000003cf85a5e36e7aa58567373e516e4909a013c\ f35adf17df8301000000037e11d6009103403910061e89416a2b09967b8085513d6a990000000100\ 01fa4000000001bf08eb00000000037e11d60097917c6c5fee055df152ce3da0f80ff12c5c5d6300\ 00000001487b2fc09c973d17f24b1fc1d66a8a8ab4a81d7956548c1d0000000002b71d9e6cdcd492\ 9eded73064f668383c066639404bb37ee100000000002cd11560dd22889ef7e7e8109b7d02c1547a\ 2c175781ffbc000000000e054c0318e03c8ed2e789f9edae8fcf3590633bb2b0a565f70000000009\ 2928ded0fcb1ec7f993f561a04efba683b1651528e3b8cac0000000004d0917146fe16b1685673c1\ 6e23cd7d3cc37dc57748ce025a0000000012eae09c800dea1e2c69ac5406e3bd96a7b9494b27467e\ e3dc0000000000c7dab8402e37d28d68c051ed05fbc77f90119eccc1de30a400000000003d268159\ 5195f7a76a62aef990a6c5f57471461ebb507afd00000000104c533c00711877e8f5698feb433e08\ fe1308dcc30214ae6100000000008f0d18008620edb7e37adf4d2d009b1d899c8e2c3166f6210000\ 000003810cc680942fbc821fc838d8b4c709f96f2cbad0778dcfb70000000000677bed98bab41249\ fd4f5d93c6adc4b9c90fb999642f50460000000000cddf79d8e90d2910d5470795406e208a0e2382\ e00727e57500000000072cebdc900063d97bb3effdfede3531aba9fd08810364658a000000000091\ b097a21270243c0180f90be55d108d1931f51671dd57e900000000005de598f529ddc433e00a7ff1\ b5f3c7293131a2e037cbd957000000000023c346002dcd2c604fcfe9693163210c06cfce950ca286\ 67000000000002d21d3c3405b896e66938e31ea7b4f7f2401a9ec10a78750000000001931228b649\ aa45599fb5491b9fbd7b76f21214291efa00ba0000000001df2cdcc77067e83def38f58d770010a4\ 2ae1a5af56b16c2f0000000064eac7e3a577f8d8f58e84b2c67da226280951fd003f05b7eb000000\ 0000b2d05e007cee418b519c1ad7082a8fe3e11a7e13ad93c3be000000000010a5266e92070970cd\ 4e5853132baa7d8a0ce4868824a28f0000000000684ee1809d241b99e72e5e2d79e78203189bf40b\ 3b7952b9000000001256a11ab804b9962dee342eb7c04b34627ab2a0e42df8dea70000000000c708\ c7970c440093a95657a1681212fa5472c7fc2c55a1d600000000004cb4965318470511b9d7202748\ fdca49dde600534b78cb42000000000027a318401f421926804e064ea68891eccb439d31688864d5\ 00000000008b874ca0222ce938995fee24865d53a28bbc0debc2ab70ef0000000001ac9fe80d2d2d\ 48f1972d3353c2a4469d6bf0b96c3f4d848d0000000000d09dc300442b49c889d9ca76100ce0156f\ 2a69a8c4b1ffaf0000000001dcd65000459a88c660f2a04b819a1a5e318c8abb41c955f200000000\ 0059f5933a481c665ac14b52443ce2c419283c78c7b6b6444000000000059e4294804999a6eb6a2e\ 6cfa88045ecb6ed4d5f6e32ec8a1000000000271d949004c796111f3cff4330d4a6c52964dac7482\ 2e2a2c00000000015e2b21e35cefeff1347f24fa7c87d8e898cdbfa12937971f000000000066be56\ 70647499b150702e872d1990fa5d28ef64efe5b97a0000000000009896805ac5d2d01f8ab891ae82\ 81262b406863c21bd3de010000000cec0ecb00b80a400d569c1e9a164af334e53ddee55f78717000\ 0000010001fa4000000006760765800000000cec0ecb00bbb805b0bd9762607a15f231bd7291f6ea\ e806930000000000382c1ceabd1cb7963577c51072b1a31693d674e282a3d4330100000022ecb25c\ 00d66843fb6ca5d7f9e469046f2bf90d691a1a7ad4000000010001fa400000001176592e00000000\ 22ecb25c00df19f1de808813f2a45558fd48e35a78cad1179b0000000000bec6ce6026d1ba1af297\ 15657f459f4fab25084c868010b000000000023026d56e3eab0d0cc412705a42409ebf5b7feb6589\ a2fa720000000003941fa1905d6efa1ba31887de42c2fce28035c300f3a5bffe00000000000a6e49\ c05fa4c476311b91b28f3d21452384d9248bd415450000000002d934daee6384cd77f1ab6e23dedc\ e9b45cfaf9f8a1c6ded600000000002b6c53536a664f46a6a491a7e4876c1a9f8937c3d1c9108a00\ 000000196bdbb2067e9f9bb12e0080536813062d030d8db6882c67c200000000001efe92008addc9\ 28d4f72f012ce59a333515e6027972ebe00000000005e40933dda3a3747d33c9f273ff3ef5eafd21\ 92b8348259d70000000000e67289c6a86d9afb40bf333c2ccd0aed804f396ab23dbeb50000000000\ d09dc300bcab74d57bd7b5c33fa1f6cd18d72235b8b7edd80000000000d09dc300d8cf783a965d0f\ 8d6b36dd50be72748e65f4d91100000000000d57080cf7fdf7b8743b3149aac39e7b315c1458c6d9\ 610100000000192739796fff36f6c690d24b8abaa57484ef3255d2c3582e63000000007ab13de879\ ff6af7a2e5dc70105f70f408a8eb59a30beba88800000000158a8a095a06baf3a6174dbf36fb017b\ b69fa0faf3fe020f59000000000067c95dd019a980683646391210514144141308d73e9763250000\ 00000067292630351880c1e5cc48d65bf84e65f92688197b5f51110000000000d09dc30041f55d4b\ 8f2386da3fd9ca5d4c2546443675e62f000000000000a7d8c0424194f927f7e09701b4decaf86e21\ 67b4447a2b0000000000a1ad772056f65d98c9c570d01903f38052d90e037149438c00000000006d\ 86066064f4e90f615af5ad44756a18026c2d5123f646aa0000000005b114f72f9379fe8ad815b1e7\ d2d2656d4b2d70c37298e99d0000000000c861d62c9f442dd2d5984a7b4a3b9e26a08edea5214eeb\ ae0000000006bfd4dc10abe298ae9d97ffce3dea2292269bb73bd23467890000000000c069209cae\ 805a939846f35a3a7819313b99fc08bb612f4b0000000002a600b9c0b5635d09cb4aa4e53e13c8a8\ 3a3cbe78770b2ef0000000000a2fb40580dd01a614aa5a13128850d70f9e3011897fab6ee8000000\ 0000c62f7940e69f607c7476b2ad550afbd073461445f8c54274000000000022368b80157c491e02\ d205c7f3424d8a9b26c2a1c0ac863400000000025be8fc6031fdc0eb3610c71ddccc7c3e70dcc07d\ fe5e730c000000000c8c1824803bf0136b71def36633c16282d353b21a23cb04e8000000000c0c59\ 1c2e449b7c6c3f42f66b9590f3ec3ed7d589a193633100000000041314cf0057df59ca6552f02e71\ 6fc268b60cf96bb645a3b9000000000066bef0f85a42a905eb4b92852a08bf0a9a0d909f9b3e497e\ 0000000000b2d05e005d28045a2ed817dea68a2bfe9fcaef0bf85d382f000000000251afa4807d5b\ 7f0bffa087122bf45bdd721c4234bde79deb0000000001caaae4a1619feebb031c22928943cfef49\ a2d43d122c6f2c01000001977420dc007dc5ae35322d2020e9ef8967781b967a99a95bb800000001\ 0003f48000000043e8b024ab000001977420dc00ab409b4c4f940f1ddee823d14aae442738b20c1d\ 000000001d837fccbaae73f605dd7829ec34da93000b294aff06cb7b1000000000012bdb2730b67e\ 29d613bbf23c22aec19083a1f7d97e4ac0e7000000000f7b90da00c08186503c5968e4f96c3177c3\ 73084fd1a079ff000000000082bb171bc952e94a189f0d8b9752e44c1916cdade1a3e59700000000\ 02098a6780d6ede141274feaaf7b59fbde74ecb953c71fd688000000001c0ddebd2eea62f93ddc2d\ 5ab754fee4cdcc8a638336ecd1e90000000001097fc119f29bce7a79c869d1261ed2614b8c2f8827\ c340d70000000006f7ab4340f30cfe8c502769c165440db0cc7c5f7bc4b13cae000000000044babc\ 2a1c7b254afde504141efc1278dd7e1bbae63177ce000000000035e05a7a25085ad333036c88fef7\ 66b0bf845b8237c09d720000000000d145fd682b391bb997557ee5bcdd806f1498b4cba88defb800\ 00000001348935de323f39b08a69e07dca65108bc46dedc041965e8d0000000001c8d55875362ea3\ 1baf01030f683309ae91a72f544192436b0000000000d4c7da25600fe8de60c12045941a8f8e29df\ 809d1a5945fc00000000019268a9a070f2d8f498f29022b45cd98db8cde1a331bec2140000000000\ 08fb877f8f51909285939306d53f5dd7a010cfc8714f13f0000000000171d5135aa9989821685466\ 262e6c1fbc09bb1c1c2d5cb94200000000846c244780c3d85e9db6b2ea524d741f768f1f9d387d62\ 23660000000000486c2d39da169db24ebe463b2f3f399d2506a281e26490410000000000d963aaf6\ e931e73605b8e015aafa46c059b4c938a5565e9000000000003296e5b0ec35402e0ca22b1ea85298\ eee177bc49e90ad1060000000002bc75d8e0f0ff58e6ad7336ba2769a0e3d66114f4b5363c0f0000\ 0000005089ad00093e43a8f5ac3c5b1afa63890227456a6472c6c40000000000af3cd700384f5f4b\ dda58b7ee33e141e556545b5e8f384ec00000000002602e1c348b018f3532d7e99b468824458741e\ ea99e14574000000000bfd60e96d5cfaa0f9b07c01539f475623e41ff970c6d688210000000002ee\ 6c278077d22c2b303d45abd277640f47439434481df51a000000000015e79ae095616f96f7fbaa08\ 5a2b9b389051efcfbdbaedd900000000023db1d840a2aafeecf08839f7ba15affe137b141ecd76ef\ c0000000000254a47a80af10f4e2f543e73871702f4814649e5f82ea4062000000000074d33a00b0\ 448459e5bbb18941381013cf4a15dda8d0d6070000000005d9992f74bc3d5f818bb289e2d0243fa9\ 57bc1c1926f5f623000000003279b0e180cd624cb6661f64b7bb204a927a9d1072bb9c24f9000000\ 0000826299e0049d58ed26a452e6e5dd6d62e40451544bb504900000000000684ee180459407b276\ 3cbf4b77b303588e19a7f0f4f3601c000000000014dc9380567ffe755542bb1ecb47be61d03ec844\ 1e889c6b0000000005602e5024582ae312177dca3adae38ce582a4988bed089219000000000067a6\ a7188d09f5b96d280c1dab30ef6176838072f5e835b70000000000d09dc300a3c6b754ba760c2ea0\ 5f5787e8241f3ed760a36b00000000000e9a6740a754cd3e92f61d24f487a63d4915aecd09596d4d\ 000000000342770c00aaf9d78c7d2ac10e958ce543b9591dae5752d2e200000000043ccdf600ab6d\ fee4bbe68b983ab47c4b4588d8e360692eeb000000000059c8cc07ca3afb38685f68757670c1b29b\ 84c95e071d531f0000000000217ab022db1be5f6bbaeb6ec78736a750b3d68367ff5f82b00000000\ 0000989680de0086b188032758faf4dddec921e3f16bcd798a00000000020ff40a4af1948fe53bb6\ adcf133b60f1eda8691b1e5c832d01000001977420dc00e336165bc6a094ba3a02d9f330f3a1815a\ 499a14000000010003f48000000043e8b024ab000001977420dc006152c081a3c42784b9f8ec51f3\ 120a9512ca12e20100000015b822c680f0d90cfb9ac370b445cc4b3b10632d6e2cbd89d900000001\ 0001fa400000000adc11634000000015b822c680fd65e5744fe4f66dc4d504e0fb988d3dd3afc2b0\ 000000000012c684c0041e573a68aaf6f63d62277a93470a2db546c42c000000000ac4b6fe8010aa\ 81a18582cfea054e997b5f65cf4e1a77983f00000000012a12c3401a255a972b52f4e990bf60ef8b\ d45edfb2c5d88300000000025b278050226d357f4a31310827c8160c6dc8bdc6a1515bce00000000\ 00684ee1802fe1c2789a0c8c9c9618a05246d6f59a3c9919530000000007c632f7803531e14764ae\ 2efe80c5c31556174538f4a5d4920000000022f89e1e00460cb613327ecfed969f25b9424e3864eb\ 9f4e64000000000068029640473d8a57c741396902b3c7454b27bdd0d6a134ce0000000000209b2b\ 0063a34a9fbcef1e62eda4711a62f2c839e89d4b87000000000000989680645ae642aafd3e62787e\ 75b8bff980ba9513ae4a00000000019c2518c56a30ee0050b9c3f7e29acbe615bcea9498c9f52000\ 00000000359efc6870b58b43e2a44d00826dbad5d543c1d0f41e0c81000000000267d5ef10787612\ 82c37682b801473ebfc649a84053493fa00000000003810cc68079d202f62c8d05c89f13a461ffde\ 25f0c705ff590000000008d0c2357c8b0648f256a086dddccbf5312aceda8deb811d2b0000000001\ 500d0d80b7816b642fcfc7fa3bd0403c661eca855710faca00000000041314cf00c7f9cd94923b41\ b3ad4f2741004deebce12a05ea00000000003e95ba80d7d340f7eb7b65caee23abb154f203f3e924\ 6e43000000000191836069e4b1114a86ab74616ea9b7b825ce15931bf1e33b000000000039d10680\ f4fab2abe4aa1e8c7da7f82104f8514d48a95f440000000000684ee1800fd4b3eab9ba7252380db3\ 57951659712fff5f0500000000001296bf42259178933ad25f845a032f10204cfc1d55a9b88f0000\ 000000579e6b80299078dee7c2dc448f1c6e36b5041a0b9ef674c70000000000cbb6b9f03fdd114f\ ddb31693c1c81e839f3fd7299a63f0160000000000b6bc45226183604cacd1834606a771ab3700ee\ 9804fa335a000000000009e8c610680777a5cde5cfe0130dca0c73c8f0634a52f289000000000068\ 4ee1806f494e6b2a717fc4ce90faca520e75abaefaa49500000000000bebc20079de4495619e83ed\ 63607d71dd65bda41e88508500000000007a3fc6c09a77f0787f78fcb35b557320cb8e67fa8021b7\ 2300000000000ce90dc0a5507bd6c7990a7cbd7d62b34baab39ed0af7537000000000002faf080aa\ 3b23342a0cbde043ac0c4f3729984881ba6a7d00000000006bf57b50d5c19ad331a048f00305ca80\ 4cd2dd2e4226294400000000012bdb2730eb2426afa3ed909105bd5a29050d99e646ac42e9000000\ 000513493000ab3dc5715bfb300493cac91e99f9f9b239c6809e0100000011601c2020ec74064fd5\ 2b987aa391c8f112d5b5fbfd13e5a3000000010001fa4000000008b00e101000000011601c2020fb\ 6653c85ca496cd0ead67549ba43b01fa192b0000000000000cd47450956c10a45f93a6f1551d2d43\ 36ca7b7aef1713db01000000517da02c00466e5f891b5693eee6b7514cd0d4b41b1c9c8161000000\ 010001fa4000000028bed01600000000517da02c0056468b3666a3794f544f8b480d73065c8514d5\ 790000000001794c22805d45e7c7f599f2fbfae955754b420c9aecdd89e400000000001f4add4068\ 812af6b461b8fe24b10458125c5f6a64eeb2d30000000004b2d1cdcc6aad52f4fd213b6b56724329\ efd2fca167becbe500000000005d81fd0077931b44c96c6abe8a5bb92be70368d91b7c9abf000000\ 0015436e5f2085ecf7b47e236ce13977858dd7d20ef26808cfc70000000000037502809d1d847d53\ b5391b92fc814860870b50146e6abc00000000042b091bbe9e2defc01f109f39258ec99ec7cf1f40\ d69fc897000000000ba43b7400ebcd43eee1bc3e65256db7693bf8545b6460a6f900000000016c09\ 0de0ec06643251ebc320a967999091f5fd6d921d957c000000000052a0867af4da56a25f236be6a2\ 72e02b864b9f59e04a1f2c000000000d722b1180fbc1f648a3a3397fe9e7ac927cbb75a0a5bf7ccf\ 00000000006959e8e0fd3937027ff09c5c295d3ad9f1f9cf84a6322640000000000000a7d8c002e8\ 105822f3b7039d3dae5afc34aedeb2b0186d0000000000131d7e6003e7bfd63b373b4ebfd6b2e078\ 287cb55d1dd8d2000000000e66690220179f9df528170f8c191a9e1a8a09487f80fc972a01000000\ 15a37707000e4efeceb2111428012ba546ee801c88cd874628000000010001fa400000000ad1bb83\ 8000000015a377070012a84058cba3e5a057f20ee0ea9571a69b954cc800000000019781eb421523\ dce52fec5daf966e2a601437e889b5f7113f000000001732f1c59418475a184a480ee8a66e2612ce\ 1290cef16c569300000000000098968019a96168b1ee3f698868bcc79ebaf81bb9bec7b400000000\ 00a43fc76b289ce62b7c304d03373a119fad3170b6f12304d1000000000001bcb7282944d33c9584\ 55cb5341a730564b2c7a493f55010000000006521a42452d63176728c9941684367c8df22269d436\ bd530c00000000010c388d0032eb8d34303feaa007ddab51d4eb34171f09b1950000000000be2198\ 3c5fe88b77e889616aa0db5f83cb36bf0194ab85b4000000000342770c006228cdb46cc1e222543c\ 1dc4eb2d1745b5819ea90000000000684ee1806357a04c1c2cc20c58148d52dfd177a160618a2a00\ 00000000611f24cc7f53858eb74cd1bf7a2b4d874c51b20bceaf5fb5000000000bd4946f708807f2\ 4e7c64452646bd628571395ff82bf1990800000000000a6e49c08ba9427c28734553483aa2ebe8d3\ 8527f15d78b1000000007a3c704200912a686122e90db31d11d8b717f41585c3c810fc0000000002\ 60432d03ac4e3afe4c75cb13e14f3d2c456c0c6678f89f4f0000000000d09dc300b23f2135e4efac\ ed6b4ed0ba673d6206c61ec7190000000017be2d0647d8a3647b318d415c7bf0f909c5dd31149584\ 7c7b0000000001e069d700fa0fa85982c8fb0d5f16d8fbe9e343c7dddf5a4f0000000003d8a07c27\ d12e0f24ce0ba902afd72dd02d984798da887a14010000000013ddc120268ee9cb7a2a1b4fb10a39\ 3163ebf1bb253dca94000000010001fa400000000009eee0900000000013ddc1202d9ed186b202ee\ 438176b3709944b357bf92df2f000000000271d9490032af6e62c122cb08884575a65ff43ef239b2\ 5e8a000000000061b1367c39bcb6acbc6eee0f0635e3dc5d5c82989fd611d700000000039abb75a2\ 59c1ccc185ceb45ea42b0616f3e4341142c1ebd30000000000bc46f97f5bc0995dab943f099b3c7d\ 3ffaaf326da41d6f4000000000016048e3b063c222d9777383d84a3e05eb26d1e5b102aa33d10100\ 00000135f1b40070bb852483af3e2fc565b57a212e9578ec994a0a000000010001fa40000000009a\ f8da000000000135f1b400779d078c97e399ff3e52e066aaa7a4305faf11740000000000d09dc300\ 8710d869ee52f5c896c6af48db885fbb3686e6ac0000000005600995df68093f5725070c6af0e581\ 7c61c900fec0b7d702010000008bb2c97000a68fc5172d94b2bc17b7346699c526b8f7898d7a0000\ 00010001fa4000000045d964b8000000008bb2c97000ad0b04757f1b11b2bcf0a3bd9d05855250ab\ e7600000000002098a6780d52204dfb45af47d9aee040d4dfd4ee659f59da100000000027863eff0\ e4cef8a2a16fac1ce26f58251cf526c2d1603da90000000000342770c0ffe48b4932ebf98ffed079\ 2093c63ac9e392d78c00000000009b55c1c10a5803bee8ad8dea076e2b820b4050cc07c11b810000\ 000000b2d069972b45c6ebe69bc210918305ca332663c8d5b601570000000000fe24ef9e2cda63f1\ dcd0c29ce7858c8b0b5b5992be728a930000000045d964b8005411e70d7794cf3858ab0b2ade8f56\ 7601ec5dae000000000148d407f655cf7cad4d0381de9a4837330efcc2031fa23d3600000000061c\ 9f36808ff0922102bdc7cc16c19d1bd8aeeb46c6d6d93c00000000018046075a95bbf0b6d3421ee5\ 5e92a0f98d1eb50968db4a1b00000000005c221bd7b3163812dafb225481d1e10c1adefd3ececfee\ 890000000005d21dba00e794b16f6c357c709eb1f7a6d761e69b34066e6300000000000c09eba9ee\ ea944ec3f39c781ae5cd867828f6b9451036bd000000000104c533c0162d306b7e2467925d01e567\ 47ab35ca1f35ce2f00000000023bab1d9318c52ef7b5bdbb20c6f82d02e7e037e132d043b8000000\ 0000810914511ae15fc742ccf87ca80ab63ac2e2192fd46b4e09000000001aa8eb73ec323a47584a\ 5297d2bb2240499a6065c8a3e4f0e80000000002da282a8037b993a060b384f80ff7942fc3e0dd3d\ 66f508de00000000012ea32a214fabbfdbec3e15ead1fe20abc21d649ad0788da300000000016685\ 9dc05520534190a86d58bfe8bf23c248558c4ba2385e000000000950372c235eb163db0265319f11\ 593250db03129b2f70129a0000000001e34b62bb6415bdbcdaaede689fcd67ac780e49361aed3553\ 000000000192dfedb6683bbd5baeeddf706096174a8a76df6b40fbf33b000000000ba271b0809331\ 39a1a575f7ef5266b94eb00be18b23c25c64000000000017b56aeec024711c863ddf881329360119\ c5863621f115940000000000ef409bc0c48cb67268a855b7a9246b2f20958339e1a0143a00000000\ 0103d4e050fd22702dff32cdbb481176cc36e1b9ecacf6786e01000000adc114ad600fe2000f5edb\ bd459c78604ec9b923b56fc7993b000000010001fa4000000056e08a56b0000000adc114ad601f4b\ f8be2336827c8a874954d55d3638885d417d00000000000a5765fc25a11a4ff308fdf2b3b382462a\ 5348bc928c9677000000000be0945de032396c0057675a55bbfb4f0a8d6b93f60365001d00000000\ 001f26b9402a10b1a406eba1fe97b70e22be68e54bceb3ef8f0100000006e76bb78036a76acb4168\ 7aa1ba4736368b755f7e997cf64d000000010001fa400000000373b5dbc000000006e76bb780473a\ 25a3282f348c5f42876b073222123fbff7ec00000000017f0fa8e07449f076f287d58f07cf8e1933\ f51661c60e13b50000000005715c2b388294f45b6cba5f3ef5c844e4fcb3974530e9145100000000\ 01a13b8600aaefb5e58b5010a19d1a365a8809e19aac82de7400000000012ae79b9cb26ca3b059df\ a1d451092034cee07c52c552230c000000000236e34a80d27b0ea419ff6f6862bc92188120a1e456\ d31f21000000001e1704cc33ddc43f752f8f596d2b3a5c6a15d8433f55fcaef9000000000d0a1be7\ 7651e767ee66507e237e184cbee1a162e7d1884548010000002b46ee0e00f962d255fb6c8ba06b11\ 92f7f73109f5df843ded000000010001fa4000000015a37707000000002b46ee0e0003d2fef7e8a6\ 83125c75d355a6d42317dba073f100000000000ee2e814f095219727aed45415e36a2b5c19fb5029\ e89e7701000001977420dc001144fb0f59a1ef7da03ad76408fc7d57eab0e7e0000000010003f480\ 00000043e8b024ab000001977420dc00279546cd3a5327d154ff8860b3e74c748e63efac00000000\ 007444c5a4282787e57f03ad0993d88ef704d6f8b63d4a1dd30000000001248a23003bc7216922db\ 157c5dd2194e797e665f68d71a9b0000000000d09dc3004076f4bdca02961be2fdd607780d941c04\ 509b8e0000000000bd5cc160ed324bce36b78c66bd9aea0e001dbe4cf1eb8d050100000009590c8b\ 6047488d3ec1c521b448495d27117cb60bffd0fdd2000000010001fa4000000004ac8645b0000000\ 09590c8b604c6c13937e9941020c8404cd9bc299290792bc40000000000ae2846380609393725d1a\ 32c6ef9e808b53762f1ef10805a0000000000138eca48062243ce8e78e7fc31e5e2d15e343c514d3\ ea36e20000000000684ee180644237da413f40f5416b301dbd21a31c91bcdc8b00000000061c9f36\ 806ee6119e0b6585292ad34a71299316a482f49087000000000931c5df1e6fb3b4cd6052da5627ef\ 8ef0090ec9790a432b1f00000000016d75832d3bca276c437c1c17a40bec0d2c51bdba4f5038ac01\ 0000003c967fddc072a2a2f97cd7864f6cf471dace59981747cb13b1000000010001fa400000001e\ 4b3feee00000003c967fddc07ffb764c47c1155d2b87be7afcd57da26d673cf00000000000fa0b59\ fb8412614ad8113a5d54c5ef33b07a6739d99b3d1300000000003d4bb822a8aa654e9506e47d291a\ 219ba6327c6af6a18c4f00000000002de6cb20b311b33c9dfc5df0e8178487919402cd4523a28a00\ 0000000188be036ec0119fe21e835399cd053571175bd995aa8ae8cb0000000009e79b6e9ac75491\ 0da42fee00eb84210e68989ee3c05a2b8500000000019f71c2803fde33e53abe41b5c3c63c6cb641\ de639ee3ac48010000001a10296ce0080eaa9aa1fa175cee84a62b135b81c52113f2f70000000100\ 01fa400000000d0814b6700000001a10296ce01017c3c6defe88a022660daf01f4aa124506bbc700\ 000000054c716581212e0e716c52c0f54fdb7dd8bb0b73d5905a7c7d000000000051a88a80480def\ 76f9209e5860ffbacf8826508140aa993100000000001f4add40593063f464fbd6744d78f620155f\ 6185fee22363000000001921b9b1006d1ba9eacde5cd41e41dbaf395bc09caa27fd1140000000002\ 087f60207b0cf6313e1a18ea8c80b7f2246d9da11115a39b00000000030836ab4489dcf37c39a593\ fbe93b7dad9be5bcee95f244b70000000000535c28fb8e8f7edadd6c8162372bdf69f8c2af59d774\ 9fe7000000000b4725510fcd5fb59687fa21f09f82d2c392dd64965e4547a80000000001959bb2a0\ e5a394c4fbb99739e6125e5bc9c3708aea10880100000000010d0f8bed0a859fb3dce8a824a25151\ f389d0a72a53627f95000000000001fb5ad0149ef8aaf6d2755f6fed9a10ca123ec999952af70000\ 00000171c5b670215e273f29f6f0ea0f048d60ec602614ad34d6120100000022ecb25c00285fa395\ b7bd1b1303f3e5e75d0142b3ca5f3935000000010001fa400000001176592e0000000022ecb25c00\ 2d0f2ca4ffb222e0b45bf7cac566fdab8100e8a70000000000342770c040bd4f62f7cec9c2588ce7\ 80909f9b22661ffd2b0000000000684ee180435b54f40121c8ffb4500fec9b0b1c72f361a6490000\ 0000041314cf004937079ff49769a3a4a640609ead7c192f3d3d8200000004e5af8e5d484e976856\ c089b5e855d456bef4bb5562c5a2b366000000000b08a024f35b792852421a2c3cfd1bc2cb769bde\ df4c535c10000000000053efad21855a4b220698d4f6ab096b01f37ab4305307cc39000000000a58\ d1d416867b2ff89a0effcd9b327368ae80cd995be16aa40000000008bf7adbbf86dcfa56464d00d5\ c2d5c1c25359741a856f15f5000000000080befc008e0a8e2f68e8084d4bddd3ba8421428670f1af\ 180000000000009896808e7013b7f44732cdc8ab2e5cb31e2737e02278b10000000001fc72d5cc91\ 62443606feeb4b57705d90ef324e01139dbdcb000000000034dfcda9bd759e714618b5ad4251ca07\ 3e35e134abf629bc0000000000d09dc30016f9abb72ba6141b3b15d5633114fa493382987d010000\ 01319718a500c7f50beb51066e5def28e1f49d98feb72ae0a631000000010001fa4000000098cb8c\ 5280000001319718a500c91adcbe5430bf9d9f5cd89311fd65151b6f076e00000000001ad2fba5d0\ 6418511da16f8cb443f4b9999a36aec1d29aeb0000000000d9a4284fc09391125573874e379f1f0a\ 9297ca74ae7fb5e2010000002b70458d000958e3590a269916569e7de9c631fcad6c6bd73b000000\ 010001fa4000000015b822c6800000002b70458d0012a967c749eea600a432988e41948980ec03ec\ 9e0000000002d21dddd615c10814895abbe27dae398311ac7a795e6fc9b5000000000171fb1e501a\ 356b628c57af3bed89702d904864d8a0d4dc980000000001938a946033c0b260ecc2a8715cf7b707\ 8d60585ac707a34f0000000000b1fc6f003e09b20202bca4a7e45f4768808c6d2c927dffc8000000\ 0004a817c800476f02fca635de0dcbacac55502ea75e1dc8366200000000007b347a654809e9f0e8\ 9772c6bd996794e0c31cc85e2854880000000001b618198068ee1c9a14a85ec2d960c5b50af97e47\ bc89d3d300000000001b7125806a3a1a17b47f869f1250a2c626508ddec86e84f200000000042422\ a740725631934f7c59bdb872d63f4ceb34672c84e36b000000000015c9c34774fc44d06c5360721c\ f1570e03c791780ef3159f0000000001aa5aa9a18800973446cabe423c6b327742f51da55b1f803e\ 0000000000d09dc300e446d4a5ad6f7299138b6b2a3fd56f18ea23149c00000000007b8fb86ae909\ bab205ce9f86c26f3626f1fc91642bbecca1000000000001312d0004d2e3a7a383c25ea344bd112e\ 4d48cc284b833d000000000084914e0b121a9af5c41e1921cab2319ee796a869571671d200000000\ 1ba66d9c8b7412c4ce16bc1fcbf96320f04f9355a6a04e4b7f00000000009c765240e842a94923e0\ f2f20adb087f524f81aeb105579b01000001977420dc0097489276ba44fea671be53943e6d0712fc\ a587de000000010003f48000000043e8b024ab000001977420dc00a01fbb7a4ad8b3153e0d7eba04\ 5d3f7e0c09475400000000007f8dcf00bd0c0a74a17a1382e6960e27f0d95ce545ca07e700000000\ 040d1eee00c89656f5ed3e4ac13f1013c6da5fc36472b38c3c000000000295b24077cc606a4bb273\ e49a64603d037f083d97c22fdd130000000007d6f5e69cfa5b19d0a7b149aad96315bb432e5e157b\ 2bb85b00000000005f10bf510039751a833f4641f4cfa4c42d1bd54adafedbe70000000000684ee1\ 8034d6aa3f06452d1e221e0d25e34473ad411170f9000000000005f5e1003df3ee2c1b7f33478563\ 900fe880379b46743e8d0000000024322636af4abc5029a3f4697c1641e6c7772b4023a423215b00\ 000000003398ab2e5b760eb50f1d8740508f4e2fef0488fbb1e80bd600000000012ffbd3007e1d6b\ 23752bbfd423b426f57d216917f8ae006500000000058ecc0c738ccff95c847f268899fff6abdf78\ ada7f1524f57000000000050a153b0964316d13b7365589448efa2dd44d4c5db9eac240000000001\ 759f07e4b0b275dcca9a959a7e9c4c3ccc0e61344eff9ed300000000041314cf00d16c0ee40d4efe\ fc349182d5a57796f5ec31a4d6000000000095957500d3ba323877c19e76fe5ef6098b1e5bd9e913\ 493f0000000001fc5e364058ec9228c514efaca08543ba0969b145f1ab039601000000114f91cfc0\ ebd4ec914becf85c41585d50a81dcc0f4bae4d3d000000010001fa4000000008a7c8e7e000000011\ 4f91cfc0ef60558d333e77839026fa1b0574e1b0f1e4d0790000000029ea5e4814f3a531509d46df\ 87e058c2762672a51366c3ce3b000000000014345918"}, } } pub fn test() -> Self { Self { genesis_block: Block { header: BlockHeader { version: 1, prev_hash: [0u8; 32].into(), interlink_hash: [0u8; 32].into(), body_hash: "f6ba2bbf7e1478a209057000471d73fbdc28df0b717747d929cfde829c4120f6".into(), accounts_hash: "2e02da3d162e20fa982029dbde9cc20f6b431ab05df1764f34af4c62a4f2b33f".into(), n_bits: 0x1f01_0000.into(), height: 1, timestamp: 1_522_735_199, nonce: 79_001 }, interlink: BlockInterlink::new(vec![], &[0u8; 32].into()), body: Some(BlockBody { miner: [0u8; Address::SIZE].into(), extra_data: b"TestNet".to_vec(), transactions: vec![], receipts: Receipts::default() }) }, genesis_hash: "1fc28119e35b1418713218192012c7eda9e1d6d142ce8138a313366bd6068300".into(), genesis_accounts: {"\ 00646da588646cae6840ee6c5344b4f92311ced1c2490000000236610228efaf96fd0771d7c9aa4e\ e9e7b205c938a08e0f7b4b00000001df68823f7294ac3ba70a75d2993ef78f63165a89c40993d0ed\ 0100000363f08a2af7b5d050775729289baf93eb7519fa5e5174ca95080000000100001680000003\ 63f08a2af700000363f08a2af75ee07ff5a5ac9eb2d44f08436a243023fa9e986301000003e649e2\ 1e68d9fe7a95fece5d2557ffea4f3cc87513f57b17730000000100001680000003e649e21e680000\ 03e649e21e68f59610d3b74cb438d96630d25e71aca6cc92450c000000020bccdd8f69a3be3ca0d9\ 40a4ca56736b22657f8ed22ad49567010000025d8fceea3c589a896e740c1d336b0888a542d7c7fb\ 4fe5f11e0000000100000b4000000064ed4d270a0000025d8fceea3c5050a51230a7ccbe61933f09\ 83d6c0127138dc7901000000f514d44f1c9abec8825ff904e19608ffb95db30e4f11d3f7a3000000\ 0100001680000000f514d44f1c000000f514d44f1c926764d423cb22232441295da9b5af5596e531\ 03010000015bd30a896921556425e540e081228f2a42508a4b7896d60ec30000000100000b400000\ 0039f881c1920000015bd30a8969ad56cda50eec6e0d0a2f2830a142c2adb75ec01b010000023113\ a123d09f358887df0c1c32985a110490ed7e8692eb55ef00000001000016800000023113a123d000\ 00023113a123d052737972f6b29aac44432ed80bd29d7e67dd679b0000000159a733d02b9232771c\ 1d9d7b9be8e607a75913815037f5e901010000024fbd4d3fbd28c4049b6244aaeaaa1ecf6cc6f10f\ 0923e53b5400000001000016800000024fbd4d3fbd0000024fbd4d3fbdebe8386b5ce72f48c56530\ 78f1c491a263fc91b701000003c00095dcc4e03d50ee5db2e8bc548eb5cb3a204e848a89fc5a0000\ 000100001680000003c00095dcc4000003c00095dcc4587f2b2db2db2a8466df6d6fd1515ae62aad\ 5fcc010000016b9f8fcfb4d59c8cee3ca46e0d39413acc59a3bcb7dcaf828f0000000100000b4000\ 00003c9a97f7f40000016b9f8fcfb4388b6920ce807c245e0c6d04a6828abd970d72a30100000275\ 171c9aca1b7fedd3579b9595805a4880cecfd541403887d10000000100000b4000000068d92f6f22\ 00000275171c9acac59e3db32da67862f2dd7271b42427442f529f3b00000000ff126f9df34e599f\ c4c9ae57a9e256a00a2fa570b41159afb101000001e55bf8fa42669f5db01fc4d4ec10984e061477\ 5b02144bee9f0000000100000b4000000050e4a97f0b000001e55bf8fa4205e992c028334e6ef195\ c76d876bd40e15ec2b7200000000832822885501ce7bb152c16a2550abc34fd14df01bc2849ce601\ 0000008728f756ee01c3a64cf4841d1d112d7f77f2798818ddd0cc10000000010000168000000087\ 28f756ee0000008728f756ee76c0b6de1cdb5fc9623c335ae651fb2a66f33c9000000000b2714427\ d445b35e54967cbe63d4d2ab1f5070c8cb67074f99010000034e8fb3325a9e82fb79b9020704f086\ 31c1ea31093e756c64660000000100000b400000008d17f3330f0000034e8fb3325a093ca2da434d\ cb4e1932c22e6e01b9292a67a8ac00000003edb4d4b738a2ba464f6c81e0822c57c8905327958af1\ 887c4601000000a6b598e6150c4b9473e7917ffd31b3f3baf6ab5e163adf409e0000000100001680\ 000000a6b598e615000000a6b598e615b219d9cf4a171e56521662bd633489eb166498d800000000\ bd19c42f97e143f6b967561f9d78fc9d4ff1dc0fb25932981700000000b6c100ef75c88ea43f52d0\ 9207e8169861a2eeb0dbc2ab4ce700000003feb30f43df78337153476acce55ce2bd7112e894902a\ f59a9e01000001eb4264d4600545b7130501f09598e23f08f5b0d6e17af627b60000000100000b40\ 00000051e0662366000001eb4264d4608781d7228e4f98d897359156877c67debbff733401000000\ bd3c42e86c45fac3473a9978f44b5d4e439ced7ee20eb350f30000000100000b400000001f8a0b26\ bd000000bd3c42e86cd02833731b5a6008b7e8d9dc89089079abdc6ad201000000b3bea5172fc046\ 2446736925009739616eb41899f6571ad9570000000100000b400000001df51b83de000000b3bea5\ 172fd1bd4514de81a530a4e54e28f50744a25ae4243e010000017b6eb8526891ba3432de13ffaccf\ b2b96463998af34e9ed39e00000001000016800000017b6eb852680000017b6eb852689cb5c0c23e\ d6a161360c26b55b81b145260e258200000003276a31363c6fde1108c4632d5de11c049c45cfa9fb\ bfe41c9401000001b56be11ca93294e45746bf250971286a33693c063464f5203200000001000016\ 80000001b56be11ca9000001b56be11ca942c810596d31684b09b983582a5170f0da463b36010000\ 00038b686e83339d50b087fd68473ff75a7028665258b901b7670000000100001680000000038b68\ 6e83000000038b686e8356bffb78b46f7e092c5f8fb38e9a55427926b4df010000015bb97f697f53\ 4dd811cc4fadc948d76d950ef1e3b46c87791b0000000100000b4000000039f43fe6eb0000015bb9\ 7f697f01a98ff1d06170b12a77ea1031bb9b4526e3c8c80100000095f29e9f1a73d588484a6f4703\ 0e19b15cd17ea37ead2868e40000000100000b4000000018fdc51a8500000095f29e9f1a13c3c7e6\ 45e0f49f30ba6fd40bb794cad33fe33801000002d6ec3cee8a0907dc5f74db022d3c25bb0e769c34\ 12c1ab42000000000100001680000002d6ec3cee8a000002d6ec3cee8a3d1adeac0f8e249781d0ad\ 6ac3804820e1145acd010000014290c188e7e3f26a269a13a9c5689f6bb53c5b1fe9bafcc45a0000\ 0001000016800000014290c188e70000014290c188e73aee2669670f9c17986ac8aa023c8dd222a7\ 333f00000002c012557024e9ec5dd3df5373963e089205258599ab70392a740100000135ae5b83f4\ 9d1d89d0d5a0b042c8629ac41cbc1903bcc4d6760000000100000b40000000339d0f40a900000135\ ae5b83f4153609b995f4c85c279a01c1c5965b2c81c606a601000003cfd23119733667d7971e116f\ bacf82f6e51743c952099ae6ea0000000100000b40000000a2a3082ee9000003cfd231197390873d\ 59b2c34cc40f176555a86b97f95e256bcf000000032ce64059314eaa7e2f4fea4e80ea65c5ae8834\ 3c2fb5135007000000032c5700f59eaf1cfec9a68ea72f57d72af2e9ffce14ce5f00d401000000e6\ 164ffbbd98d156bf52ef5d5351ea71466015bf19fd2b5e370000000100000b4000000026590d54a0\ 000000e6164ffbbdec4e5a3bc655edf1ba0fa0f8e69676242baba636010000030ed1b624c79d3cf9\ 14bfd480fdd56bf7e94084d260c0f60dd900000001000016800000030ed1b624c70000030ed1b624\ c7cd3d7bdf5e3cfcd4222f8388a2b40085ac65abcd010000004515d7fec87c5981ba83e3987c32ec\ 36fec157eef0af9f5e7900000001000016800000004515d7fec80000004515d7fec8ddcbee00c6bd\ ecf5c14a48c3a5e968a924d38ee500000001ebb10633051a8506278a5f1ec59ac404c9d1fd55ccf4\ 78b6f101000003769fefc017412447441a9aba967eadb963c3f25591c0a6b5310000000100000b40\ 00000093c552a004000003769fefc017f5761d2fd1fc369cf7d0688e48de018c319c5b1201000002\ 63f1341eae547f5b8de16c5446537ed64642aac22417242a68000000010000168000000263f1341e\ ae00000263f1341eae0b46fe0ce7d107117df715e0f1b351db952e78c200000001ebb922803ebea2\ 57be4ad62fcb111fcae4589bdb7285af60660000000109119a403e0c64ae25a4543deaa4dff18873\ 37ba416209670f01000003fa30031212d430319fec839164377ed1ea7d26889ec563e92000000001\ 00000b40000000a9b2ab2dae000003fa300312126ff072fb8a80fbd1f77502ea083bb8b4460f40a7\ 01000001b7c4e149ff4ed87748fe96b49f5ee1c1ff3f3a5db34a2da69d0000000100000b40000000\ 494b7ae1ab000001b7c4e149ff716de993484e749315ec6e10c896b4d33f0407d20100000249a849\ ac74739a86524b04334367254efd03e4e59bc07bf0720000000100000b40000000619c0c47690000\ 0249a849ac742f3e9f4d98d06349c9e17b123cd2581dcc3e5a4600000003f33939525015f9481cc7\ 6e4fabdaa6ff10db6ed30079075b42010000012d15c481712c41d119e465797e50260add8169f005\ 6f64f81400000001000016800000012d15c481710000012d15c481718141c7e952a5ed1e97c62fae\ 502f7bc415f86a7501000003b8656816253af84cd39abdfca33d73dc8ba5c96a3fc15469a8000000\ 0100000b400000009ebb915907000003b8656816256fadcb68b108843c0147e0136127815d61d3f9\ 8e00000003fd9381f31e07d888e60462ea516deb180a7efa26a3d7b9e9620100000048fb07e25de0\ e2afdfa1c8aae2b79f9b5508f7680bf678645c000000010000168000000048fb07e25d00000048fb\ 07e25d5c3782eab866522d42f06d77dd58c4ff38eb7d94010000008a694f1e16ca2cbdf95ba266d9\ a4d7bc6ad26723ee8ce4fb3400000001000016800000008a694f1e160000008a694f1e16a8bff3cc\ bf106cbc95c9da953d0f6d5c4120346e000000039a53c554f226a8f07bcce9e5b6f081c6e7582249\ 5e4d3c7958000000040f94a6e8631bbc88b2e99633686a480e9fba3d3108209e37af000000003267\ 7064b0500eeae9df51b42e8d4ecdc23f057a152ab448ad010000038baa61f6269a3bf3e4291514b2\ 36c7368d1a79bd9637c4a59600000001000016800000038baa61f6260000038baa61f626ea781fca\ a7b671a01d6b1bf8a30b79d38bb4b43d01000002d31b26f0f4b8526257821c8ade3146cee18ef845\ d4ec211a5e0000000100000b400000007884867d7e000002d31b26f0f43199bc99dd8b9e501d0a19\ a8910398b1f75ae7e2000000020ead169440a95932f95d9457897088076f5ec4c9ffa0d640420100\ 0004006c248950255a8f5a69e301c976913b5e87b2ec3ed35477700000000100000b40000000aabc\ b0c18e000004006c24895005768b1d0b046a26b87d35b09b225fb21ecacec80000000020a26e2d4d\ 428b686cf8d90fe3157b9dc59bc203f6310b227d0100000093ee2d1ac4c72aabfed0364fcf42d49d\ 4433a05773234d8bb8000000010000168000000093ee2d1ac400000093ee2d1ac43014f90eead8a6\ c86e93595e6dcb30c36ef9e90301000003d8ccc940266d6f639854c2cec3bd997e7ba93a786840a2\ b4a30000000100001680000003d8ccc94026000003d8ccc940264327188e001602f152342a7777a1\ c4cc1fc7798600000000e07c2b35178da3279c540b3ffec7e856d2c8594fe7dcd680ed01000000d8\ 3917700c2cd90b492eb21b623963d31c076ea4811c5fe24e0000000100000b40000000240983e802\ 000000d83917700cef5b8323326480818f1d2c9290ffc046003cd65500000002dca4f9cf32ec111d\ d1117b02004879eeb65ac72cb8cb735c6f010000018393d9196c25ab61f886646b2b55d02277e658\ d546c21a4a890000000100000b400000004098a42ee80000018393d9196cb12336c2fe6cbe5d1e86\ 26efcaaed644ef765862010000025233143c21c8b507458a67a1dcbd953aacaca74dec5d74e92200\ 000001000016800000025233143c210000025233143c218bd91563395e02fccea5270bc583fa2373\ d98299010000029390a90ae137be58533b587e812e29f0d952bddac13e7fe2000000000100001680\ 0000029390a90ae10000029390a90ae1e2ec6010363ebbcd5e95022e7021fcd7a80d4c1b01000002\ 78cefc48ee8f901b5a3b82f23083de193249d0f7bc2b2a048f0000000100000b400000006977d4b6\ d300000278cefc48eec88357e32c64f9161c58f0013d71941f269bcd3e00000001c14273fc1c2ac1\ 6c1aa8608bbf0b82adcd506bfdcb735a59b60000000027265edf4a4479165f396a309a13c4c244df\ 8394a056dabb82010000003535ed5c7e300ad53bcb1a2b1cac54fd0c9c92a101ff43528800000001\ 000016800000003535ed5c7e0000003535ed5c7ee1e35f08e866f35cec5cb91409c089fcb2267f13\ 010000007c50e88f85e885535e8a6e3b3f5007ebd976abda32e11000600000000100000b40000000\ 14b826c2970000007c50e88f8593e76e5fdcf626f37fe3e774c7b9e1201d9fdf6901000001a9f5db\ b34034ccf4577cebaa6981f1ae2cc18f7ec4205395e80000000100000b4000000046fe4f488b0000\ 01a9f5dbb3400b902b6d72a00bef2fab1f102294457229674978000000028e5112a5e40332b1ae1d\ e84ca56800b26eeab6181a78e6d51400000001934d67c97a7f9a7e91aab143a13571613c127724f8\ 12a44afa01000001837f7eec9ae5073cdd38a619176ea82417fd8acbb92cd06e9300000001000016\ 80000001837f7eec9a000001837f7eec9a12efffef3fbd8da27f487a4d46e5a35abbaa1e1a000000\ 029aa439d8cd4432aee90df9073d5b0cfdbf88212f2365ef5bbc0100000272cd595fd7cb03ec8bc0\ 46646f822b2d67c36fb139b349343a0000000100000b4000000068778ee54f00000272cd595fd709\ fdc307d7396db5516288669dc25d01c7fab79b0100000123c39f1706f3c3bfcb4cbe9e378dca8032\ 58bf13c8fa8d6a07000000010000168000000123c39f170600000123c39f170662bd6dc779b9eab9\ db0909dbf89f1ffc73288289010000014cb830065f04192e7d4d74be26a76c361a9c2f0065ee9ce6\ 2d00000001000016800000014cb830065f0000014cb830065ff1103c8a6473f379e40bf849b761de\ 652c013865010000033c6d5f7c0bef64d8521ec56103122131c9047452688d467ac8000000010000\ 0b400000008a123a94ad0000033c6d5f7c0b6e8e810d3e1658b08510818154ff71c5ced44be40000\ 0002c0033d1714573a0c2cef34e97bb59d8f650a7103c6e4e9fb1001000000e26ec95facf9664914\ cb9e447586e527fb1294b08e87a8a5090000000100000b4000000025bd218ff2000000e26ec95fac\ b49b5c6d86843df744bd64f489889658a67650dc01000002b1088dd46cfb7946a4718f6c2a27e2b1\ 0bf6701e38195145660000000100001680000002b1088dd46c000002b1088dd46cec2aba38362cf5\ 617686597c5be1966fca18a14901000002ca27eef5b3a483648b0b9b8bebb9b52667150e9dcb566e\ 6cc60000000100000b400000007706a7d39e000002ca27eef5b3c639aaf193ef947f5afef900d40f\ f47f12afa71b010000005949a1b514a122721dc8fbd234752e83fece129198e02d42ab0000000100\ 0016800000005949a1b5140000005949a1b5145dbfd10c85bb283fd69efaa7e793d285b39b9c1500\ 00000176031236752bac8c9603e9982868fdeb38b7ad49a70ada4fbe01000003ee9ebb987ab3ce50\ fbf10c8b16ab0d68edec81b2bf6c96c4e50000000100000b40000000a7c51f4415000003ee9ebb98\ 7a0cb4c3580042ef446d18298c1a56deb53f1d4ec501000003539603d76c0aeab801dc726b37dcf4\ c84a06a106cf558629310000000100001680000003539603d76c000003539603d76ca2a79d29572e\ 49a84da03fa3cfd9c12a7426ec8d00000002bcbe476a90bdb14e77cd000150f247165f08da4d77de\ 512af2010000019d422c96dde9567a308ae0d0c4cb7a517b4d4454102e7653020000000100001680\ 0000019d422c96dd0000019d422c96dd5b291431937662d3c513abb5829af6d1f169723d01000001\ 0f7c77899b8b4ec5a3b2d0fd583024a08c963da6d8a0725b7b00000001000016800000010f7c7789\ 9b0000010f7c77899b1702dc1b2cb05e372a24f84ece01c4835c6f1c800100001e9dbaf7b6df9ac9\ 4a1a46736363a20b8e52074ca76981cc43390000000100000b400000051a49d3f3d000001e9dbaf7\ b6df"} } } pub fn dev() -> Self
}
{ Self { genesis_block: Block { header: BlockHeader { version: 1, prev_hash: [0u8; 32].into(), interlink_hash: [0u8; 32].into(), body_hash: "26f32bf5cf65da6f0758d7450064c46acb4a1fe68366f967f44a295de9615488".into(), accounts_hash: "d6dfd99bdd6d374a75efcf9e3dcc724796cfc6f0ba8c52ec92a8a274514edf06".into(), n_bits: 0x1f01_0000.into(), height: 1, timestamp: 1_522_338_300, nonce: 12_432 }, interlink: BlockInterlink::new(vec![], &[0u8; 32].into()), body: Some(BlockBody { miner: [0u8; Address::SIZE].into(), extra_data: b"DevNet".to_vec(), transactions: vec![], receipts: Receipts::default() }) }, genesis_hash: "5fbc78d778f12485b121cb43c4c0e50d51a06a3def993e19d5862e8fdd4874c4".into(), genesis_accounts: {"\ 0064c7c9a18a298b4c5bdce6f35370d94602216af66d000000038ef61816ca1ff9133bfed4b770f4\ e89ddb3c1a63e2e345702301000001da717a91a84d54f2db90e3f563995e0d52032a02b429849206\ 0000000100001680000001da717a91a8000001da717a91a8311405eb9d66b00a26d798d22872910d\ 6d8d8ce1000000008f811df66dd685b26e86e418381fde940c5b493057257c0f2b01000003e67c61\ 3727ede27bc617c157d93bf5ad9b0cba5bd1258481220000000100000b40000000a66a1033dc0000\ 03e67c613727e210dc0adcd350181e9fc4a4bada4be8029a1ee2010000028b6dfe7d70cf0fe34257\ c882b34a4afbe3d98eaa9e67d7faee0000000100000b400000006c925514e80000028b6dfe7d7066\ cf4df5e59b35e5122c3cb9a9b85ee825bb43de010000032ec4882f458b700e8db79262f8fbb68f6a\ 78c0980df25a485700000001000016800000032ec4882f450000032ec4882f4571a8b114560648e2\ 23258e140ce9dd66581a142f00000000df81c8ee8b29c9512800fc8ae7b200f198fd99d321b33909\ b501000000317114559fa1f4a6f9047fa5cd04ca1dd54c8857e646c241580000000100000b400000\ 00083d83639b000000317114559fe841e3be88d00eaa4038c24b3cefba6c2c4077fd010000034dc1\ e8b3babc73dfa59b5b9c35ad3e537b5a53b497e6b0d4a20000000100000b400000008cf5a6c89f00\ 00034dc1e8b3ba9242b8e2ddc411bdd85913225a6bd385fe95131b01000001096b0c0ed381939a7c\ 8f4f685c9dea4a4d6c2a51edeb0655700000000100000b400000002c3c820279000001096b0c0ed3\ 29de96bfe9c3cfbbd300c0c2bb7f6c00b6ca89ac01000001240afa685b6451ffeb72287d69454756\ 2818fbc508cb6ffe410000000100000b4000000030ac7f1165000001240afa685b8a1a218851bd5d\ c150ba3d6b3cd863229e782c6b010000034ddc0e72520e35223db8671c863fedaa28316fd8d04a5e\ 4f9f00000001000016800000034ddc0e72520000034ddc0e72528517b1e4be3447b748399f03f33f\ 9f57f5481ef7010000018612097b920d9a4818e7e00e4e779fb42e1e6c0699ba75aafb0000000100\ 0016800000018612097b920000018612097b9293cec8c254ba7b8613fa4a0263afe52f6f03dec601\ 0000028c8118a86c85b861c8a5b4037226dbb8cf249221e0ba110a0300000001000016800000028c\ 8118a86c0000028c8118a86c2521f974a8d2995be8dad5a025c72f0b7b5f33590000000357cbbf0e\ bb89c09b401057a2094fb4f31191e967d52b6889260000000215cd81f72c2b6f77afc979119f6948\ afdf7e714830022a5dc301000003f10ccac5b2628112ed38060e8b12a1efac98f67b02a7677aa600\ 00000100001680000003f10ccac5b2000003f10ccac5b2737886f6a643ccbfee08b2447619b029a0\ 078083010000023d430d9bf52ed6f4a88b5801b1e363229d66a75e784b0efd500000000100001680\ 0000023d430d9bf50000023d430d9bf5870273cf08cabc743546cc64e2b7ac975cfe082601000002\ 7b29b4e65986b8496bee59f3dacafb09318f352d8401d57c3a00000001000016800000027b29b4e6\ 590000027b29b4e65921a29bc6a19800b09dfc5de4fe288e95159e87550100000299351d1c67737f\ 57f565009d60d6f06489d3c62417c805065c0000000100000b400000006ede2f84bc00000299351d\ 1c67c16184aa2eebc2be9b67d75ff1486be5c1b79c0500000001f9eb6de71251c87626bbcd1a2218\ 2cb0229e0fd29828d948fa01000001dc99a01924a00cc43fb9bb53bebb900b9fae554f81c60c8c70\ 0000000100000b400000004f6ef00431000001dc99a019244cecb28a63f34fd260847047552051a8\ 74d8eaa30100000132232fdefdc5ab2d11abfc8e047bb74653f2fe5d76d50aca0300000001000016\ 8000000132232fdefd00000132232fdefd3c9381f9b501395fbf5d6faa067698680ba79237000000\ 041ebe8af706a8f99c5d53d1ea317300d43fa0771f6006d62c2000000002114cd8db8450b70edfb2\ 43d89c0b8e0ab558bd85a3cbf4aad901000000357a322ef13d74dce02343a74e3bb475a9d97dbe88\ 894f23330000000100000b4000000008e9b307d3000000357a322ef1305efc2cef80990b78ed0d85\ f2524eb4598ffbcb0100000322b37796f4cb4cf5b5d0b7a750b4aa20d07c76144c0a3668aa000000\ 0100000b4000000085c893ee7e00000322b37796f4f7e9588782900aa29dfb5361ffa3c93fe20311\ 36000000002b5742bac6d443720ba305d086498718f0d5ab73d71a8784fc01000000be2ddcd78ad5\ 564ca5be7cf62d70d29c0774029f68e84707930000000100000b400000001fb24f7942000000be2d\ dcd78adcaf7e4b017cb95636edc8cdd11738028a8ee2ea000000016286c9a8260c0dad3e5821ff86\ e70801c4513cba5f8dd7971600000001a59029b9a2e832adcb3f6ce3575356bc8cb267d59785b8ca\ ae00000003929f0df51a959d2c21ee8aeaaa46253e73affb42934dbb8d7901000003f3cee37babf6\ 912ca990b1f9cb50d2e2d93e23dc42d67802da0000000100000b40000000a8a27b3f48000003f3ce\ e37bab72ee4caa7c3958a5ec5aeb33bad7e38f73b3de9a010000015584dd234f374854cf57cdaf60\ 10724453f3535cb0ded625c50000000100000b4000000038eb7a308e0000015584dd234ff3de4939\ bff064ae22785fd458566fc46887e723000000015ad32dca157588505d6b51990b43a4392c2df400\ 0351391abc010000018b4d6a241ac8be4065c6e55722261d172a1818d857f0de7161000000010000\ 0b4000000041e23c5b5a0000018b4d6a241ac728f85243d15e6741e115093a6634ffe0c68b6b0000\ 00019ada6d334d0ddcf5bc4e26520c6abbe70eb910d02a22f575ee01000003d56ae224af479e44d5\ bcb3dbaf2c89b104e37c39973e9eb24b0000000100000b40000000a391d05b73000003d56ae224af\ 2cc4ea169b089f257b2d75f4ec05652c1e0bc0af0000000254e250745f789568540f4a4eb703ec4a\ 939c39f54ce1cde560010000025985094f103e6f77bd02f5525c47d134ffa915463343ddc8690000\ 000100000b400000006440d6e2830000025985094f10e7e6d3e64f7633e11dc94f3b7e354220e1e0\ 26f4010000012c0878e391b81d8fe0081a7c5b0f3a0e3673aaedbae7a4b55b000000010000168000\ 00012c0878e3910000012c0878e391f1aeff1d4600bc9f96ee6ce87fd18c0d2e17614901000001f5\ 10d72212c6a2edf51fad7a447880f388ea08e6f060827b090000000100001680000001f510d72212\ 000001f510d72212e6a0f24f98539f0a5599b51b73f12207655f9162010000019be8d670ea293d2e\ 15af6240ac8e8ef9dc2e319a5e3286e2e400000001000016800000019be8d670ea0000019be8d670\ ead2559f49aaec5de4211a293e5b2d3484540f60d201000002e3095f8359ccc7a9dc7547885d0a7f\ 307c841f4435199a0d880000000100000b400000007b2c3a95e5000002e3095f83591273bdc322e9\ f43e44c807e29ded871a19913dca010000029968c627da407984a24a5680f654cb62642f3d1f22c8\ f1233e0000000100000b400000006ee6cbb14f0000029968c627da2330fa3b0bdcbc21bfe77173e1\ c140bad1b879bf000000008d9d0bf6bf88a70699eedb155922777b54094c5ca29d620ba401000002\ 904de82c8bb7e4dd233d3d7b9f03c542855b83caebe95c92670000000100000b400000006d62515c\ c2000002904de82c8b3f5b6bc238dfdcb0667e06b5d7cce7cf7f7fa7c2000000005d704173b5e65a\ 6929bcef0c916704b6aef79aae12c96f4c1201000003997706f7d63195ecce4fac3863b36021fa49\ cb28d39df400210000000100001680000003997706f7d6000003997706f7d627d7e092c559b46fd5\ 4499d5d23d4f4c2d9e9eb9010000009cec264f05959a6d69dcddd46f0502183320e02c2cb1d80b4c\ 0000000100000b400000001a275bb7d70000009cec264f0532cbfe44dac401e321931410eecc2542\ 6fcc59cb0100000416acfd6737f1042f190ff8795db3f9fd35b5e7c1323458ed0f0000000100000b\ 40000000ae722a3bdf00000416acfd6737d596e3801bc1c1f7ac2f41d0b0d91061ffe8b196000000\ 034fe64ef372abe7b833137b508e8b58c0eb1e5e1b22355b46a9010000000f0444e0e7c66e46f533\ 82ef253b2e8391aec2fe148a9520950000000100000b400000000280b6257c0000000f0444e0e715\ e7e17c668d12fc1fbab2acc2ad3ce43af1df6001000003e9292636513bc76ce6be59d796ec680bd4\ cf51e2fa28329bec0000000100000b40000000a6dc31090e000003e9292636516d3519243b37b79b\ 1a9b1069fc2e6ba00d81bd00010000021e2a0a3fe90dd52bd161e3ee66bed53faa708629c2f97997\ ba0000000100000b400000005a5c570aa70000021e2a0a3fe95643abf365dea1b4d1612a8cc018ad\ f832b6e8bd00000003131744d4a1b07931b030db4e52bd2691332e2593dd531fe51601000002189e\ 64433b952cecd64b6cf29b7d2a85cfd41ccf5db1a7b1080000000100000b40000000596fbb608a00\ 0002189e64433b40905ba6c5b94f2494dc039a46ad30e92b6a0db901000000418b54b58fa7793906\ a74157df2056948f557d72fdad8d93d00000000100001680000000418b54b58f000000418b54b58f\ a09f8f1ca9a40493ff98fec8db27a59e266fe2cc01000003077b6b831e65a6ef9826aea8953173f7\ 6726df5075bbb40beb0000000100000b40000000813f3c95db000003077b6b831e97c024450804f2\ e8f4f9c7397fb53ca73eafa95701000003f7455236db770cb1a7b457b38f26482cbaa55b1e20ee49\ fc290000000100001680000003f7455236db000003f7455236dba9c88d6766de88c9fe915729ae65\ af0a8d2a34d600000000a7fc7c022b8fd203929865dcf8fc96fa66b7978874ed8b0aa401000002a1\ b8c51294489922d06978b7e3ea1205bc147ab74ff56728760000000100001680000002a1b8c51294\ 000002a1b8c5129431b474978d1d9715c9893105b870f6f739910c1f01000002d2a0ce5189ee8c93\ f44116bb750d8027487570813422411e860000000100001680000002d2a0ce5189000002d2a0ce51\ 89ea40e6986090c610a3dbe086f10cb3a5b611789701000002c29fc93497e9900e53ea2b5cdc4ecf\ 4c1d06e02a5476ce895d0000000100001680000002c29fc93497000002c29fc93497015686798086\ ab1a9be61b91b86b53975b7f36d501000003444e6bcb58170109ec8201de992da57c1a743c0dae78\ 800cab0000000100001680000003444e6bcb58000003444e6bcb58f89ad04e7f64d3a3c1ec87fd37\ 409f61c864fc2d01000002ae1495b92d9ece4dd266b3932e523a756b864c2efa9b06a07c00000001\ 00000b400000007258c39edd000002ae1495b92d895002586240fb1f9169c77dea6245bca0695730\ 01000001b87f19bf0d26a138e42cbb97e1735fa7a4805dd1ff932dafee0000000100001680000001\ b87f19bf0d000001b87f19bf0d0fd3de061ebffa02c575030c45edceb542ad91480000000250c2ab\ 1f7f6a4bc3c30b1d2763fd3608fefb9af52d6a165a32010000019332d60bd1d29d52ff7a1100e67c\ 9c36a72efef733716fd79d00000001000016800000019332d60bd10000019332d60bd1780e525ef3\ 63db2fbf64238fbe1d6e86961afc38010000001c2a4d75f9214dc48e11f52fde6d01d03b65798c7a\ 42312d610000000100000b4000000004b1b793aa0000001c2a4d75f9b12bc132e3685136d2d7d526\ 7ad89a0e9f2dfefb0100000191a2a748bd8cea1597c7cfc3524554b46c405775ddf60b5c7c000000\ 010000168000000191a2a748bd00000191a2a748bd38bd2bc58bdb83bdd302b52c8bb0bcdbe4451f\ 62010000010e4c0e42f0efb7b83c7c526a950b5771076825a7455d042df400000001000016800000\ 010e4c0e42f00000010e4c0e42f078e606620b0e734ad04c5de4a17a933a88b96d9200000001df97\ 42cdf2eef00b587cba1c91218e6cd48d6064f7c5d6a9fe00000003cc42293ba98a000c50422dbcd0\ 755df262e5ddb44c0e288a390100000185e0c0c9afd5afbffe69e5d38e3acb20063ff06f7ea6cee3\ 0c0000000100000b4000000040facacc4800000185e0c0c9af248681123bef27f42acaeced4e5853\ 5bf8de516b000000024ae66363ab17bef1b1c61da10a8afcaafc49ce6747fc3dd60a01000003a089\ 4fc74dc2d2033bc0c82fcda520967b38049cefabc7d6510000000100001680000003a0894fc74d00\ 0003a0894fc74d1d39f5024d27a7a0c929e3439b2c53f83df3b7f001000000e4a3c204b8b7177ee3\ 1b12708399da327c826e8f1466f820f70000000100000b40000000261b4b00ca000000e4a3c204b8\ 041223d0e4bf3424abda424654a954d0545a1c450100000020ad98afa0e2a23891703a9f7876ca76\ 75c7a2467251a760430000000100000b400000000572441d4600000020ad98afa00a8baebeab844f\ bf4642926f9438f1a1d6665a520100000375f966215b4ec70ce1532f433f653fd5d838e37ce25bee\ 387c0000000100000b4000000093a991059000000375f966215bb5bbf2212a4563c39bd2f4fa7575\ b20c6f0bb6800000000278d3e466f0ca55900b62b0b71131c3b0f5d32e3dbca728a09d000000037f\ a546d504525baa04aac578d1b2b76e49aace85dda2dea2e20100000132eabdb6a70584c67f79b23a\ 3ac6809d2347bf798283fc67ed000000010000168000000132eabdb6a700000132eabdb6a70f922f\ fb694ca3cc06bb7f1c2d09e14161a9644a01000001dd69383e76ff91e4837cc6e22a60465c407f47\ be529bcd72650000000100001680000001dd69383e76000001dd69383e7650f7346fe7417f58b9ea\ 2e1b4ec6d067af271d9c0100000120d0db966aa074e66ae2de0b5bb16db25cf69b982bb89b506400\ 0000010000168000000120d0db966a00000120d0db966ac3c5efd1f4009386a5ebcbac41776f20c3\ 39816c000000003726dc05cbb80e69dda79c0b9fe1cf5ad890ebf21eb96c1ab101000002921f80d8\ 98681f6769ddba0c3d3ddf18983f1710ab67cb501d0000000100000b400000006dafeacec4000002\ 921f80d8984fd30a55008428eda698189cf5abe050fc9bda01010000010b685d0aef529f91ea913c\ ced3f108a93c9f05557caf7b961400000001000016800000010b685d0aef0000010b685d0aef27f8\ e3e7a194b1a1f137d9833173f946c5f4f25f0000000313540cb6554d2ff4ebde57d37d9199e1245d\ ecaa3511bb2578010000028dde5d1bc2dddb4222987fe417b74a3c623442e03799c0381100000001\ 00000b400000006cfa64d9f60000028dde5d1bc2549107674ab2f85d69bd356e960d0fd4274c093a\ 000000027c62faa3470d6d13bf44be42fed9633356f85fd1a26e02898601000003c43ed4d8152ef4\ 84847dbc61b906cb3628ad1fc8bfd0ed97f00000000100001680000003c43ed4d815000003c43ed4\ d8158f13316cdc1b4aa9b11670c4800d89d88a3a435e00000001d0597c3a0015da61187e36f78d1b\ 9c3d8b5183db3334e6392c01000000e38fd7c11032487632b1997d1ed5a88f1256b9aed7acf342e1\ 0000000100000b4000000025ed4ea02e000000e38fd7c11006462e53882f9074eb569815af29aa96\ 6788577701000000d8d8c2deff75d1ce3ce07f1c8999abf65608a3ae9209e172430000000100000b\ 400000002424207a80000000d8d8c2deff53c5ebb2b741cb9027e9b83fbb4e888ad599d161010000\ 02e9021c325466b9c798ba1550b47aafc1c305aca0347fe0344f0000000100001680000002e9021c\ 3254000002e9021c3254b87623ef788011f1c8aa7fa59f2d50afc464353100000002654076df8b19\ a8182f2101e7cc0e39db9454bdf3dccb0b184a01000003ee75f7f02156239d8970c001db5ce7fb9e\ 4ccd2cca89fc558a0000000100001680000003ee75f7f021000003ee75f7f021f4725ca23a28c4fe\ 357eb0ec26e990798b91af6101000001b2fc634c4d0cb36a1d3be937aa313fa3c8c2611a07ac7bcb\ db0000000100000b40000000487f65e20d000001b2fc634c4daff24dcccbd3b3381072a31c0c1133\ b5f28e35f800000011a48952856d"} } }
emails.py
import os from flask_mail import Message from flask import render_template from . import mail def
(subject,template,to,**kwargs): sender_email =os.environ.get("MAIL_USERNAME") email = Message(subject, sender=sender_email, recipients=[to]) email.body= render_template(template + ".txt",**kwargs) email.html = render_template(template + ".html",**kwargs) mail.send(email)
mail_message
api.go
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package storagegateway import ( "fmt" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/protocol" ) const opActivateGateway = "ActivateGateway" // ActivateGatewayRequest generates a "aws/request.Request" representing the // client's request for the ActivateGateway operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See ActivateGateway for more information on using the ActivateGateway // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the ActivateGatewayRequest method. // req, resp := client.ActivateGatewayRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ActivateGateway func (c *StorageGateway) ActivateGatewayRequest(input *ActivateGatewayInput) (req *request.Request, output *ActivateGatewayOutput) { op := &request.Operation{ Name: opActivateGateway, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &ActivateGatewayInput{} } output = &ActivateGatewayOutput{} req = c.newRequest(op, input, output) return } // ActivateGateway API operation for AWS Storage Gateway. // // Activates the gateway you previously deployed on your host. In the activation // process, you specify information such as the AWS Region that you want to // use for storing snapshots or tapes, the time zone for scheduled snapshots // the gateway snapshot schedule window, an activation key, and a name for your // gateway. The activation process also associates your gateway with your account. // For more information, see UpdateGatewayInformation. // // You must turn on the gateway VM before you can activate your gateway. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation ActivateGateway for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ActivateGateway func (c *StorageGateway) ActivateGateway(input *ActivateGatewayInput) (*ActivateGatewayOutput, error) { req, out := c.ActivateGatewayRequest(input) return out, req.Send() } // ActivateGatewayWithContext is the same as ActivateGateway with the addition of // the ability to pass a context and additional request options. // // See ActivateGateway for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) ActivateGatewayWithContext(ctx aws.Context, input *ActivateGatewayInput, opts ...request.Option) (*ActivateGatewayOutput, error) { req, out := c.ActivateGatewayRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opAddCache = "AddCache" // AddCacheRequest generates a "aws/request.Request" representing the // client's request for the AddCache operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See AddCache for more information on using the AddCache // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the AddCacheRequest method. // req, resp := client.AddCacheRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AddCache func (c *StorageGateway) AddCacheRequest(input *AddCacheInput) (req *request.Request, output *AddCacheOutput) { op := &request.Operation{ Name: opAddCache, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &AddCacheInput{} } output = &AddCacheOutput{} req = c.newRequest(op, input, output) return } // AddCache API operation for AWS Storage Gateway. // // Configures one or more gateway local disks as cache for a gateway. This operation // is only supported in the cached volume, tape, and file gateway type (see // How AWS Storage Gateway works (architecture) (https://docs.aws.amazon.com/storagegateway/latest/userguide/StorageGatewayConcepts.html). // // In the request, you specify the gateway Amazon Resource Name (ARN) to which // you want to add cache, and one or more disk IDs that you want to configure // as cache. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation AddCache for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AddCache func (c *StorageGateway) AddCache(input *AddCacheInput) (*AddCacheOutput, error) { req, out := c.AddCacheRequest(input) return out, req.Send() } // AddCacheWithContext is the same as AddCache with the addition of // the ability to pass a context and additional request options. // // See AddCache for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) AddCacheWithContext(ctx aws.Context, input *AddCacheInput, opts ...request.Option) (*AddCacheOutput, error) { req, out := c.AddCacheRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opAddTagsToResource = "AddTagsToResource" // AddTagsToResourceRequest generates a "aws/request.Request" representing the // client's request for the AddTagsToResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See AddTagsToResource for more information on using the AddTagsToResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the AddTagsToResourceRequest method. // req, resp := client.AddTagsToResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AddTagsToResource func (c *StorageGateway) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *request.Request, output *AddTagsToResourceOutput) { op := &request.Operation{ Name: opAddTagsToResource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &AddTagsToResourceInput{} } output = &AddTagsToResourceOutput{} req = c.newRequest(op, input, output) return } // AddTagsToResource API operation for AWS Storage Gateway. // // Adds one or more tags to the specified resource. You use tags to add metadata // to resources, which you can use to categorize these resources. For example, // you can categorize resources by purpose, owner, environment, or team. Each // tag consists of a key and a value, which you define. You can add tags to // the following AWS Storage Gateway resources: // // * Storage gateways of all types // // * Storage volumes // // * Virtual tapes // // * NFS and SMB file shares // // You can create a maximum of 50 tags for each resource. Virtual tapes and // storage volumes that are recovered to a new gateway maintain their tags. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation AddTagsToResource for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AddTagsToResource func (c *StorageGateway) AddTagsToResource(input *AddTagsToResourceInput) (*AddTagsToResourceOutput, error) { req, out := c.AddTagsToResourceRequest(input) return out, req.Send() } // AddTagsToResourceWithContext is the same as AddTagsToResource with the addition of // the ability to pass a context and additional request options. // // See AddTagsToResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) AddTagsToResourceWithContext(ctx aws.Context, input *AddTagsToResourceInput, opts ...request.Option) (*AddTagsToResourceOutput, error) { req, out := c.AddTagsToResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opAddUploadBuffer = "AddUploadBuffer" // AddUploadBufferRequest generates a "aws/request.Request" representing the // client's request for the AddUploadBuffer operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See AddUploadBuffer for more information on using the AddUploadBuffer // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the AddUploadBufferRequest method. // req, resp := client.AddUploadBufferRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AddUploadBuffer func (c *StorageGateway) AddUploadBufferRequest(input *AddUploadBufferInput) (req *request.Request, output *AddUploadBufferOutput) { op := &request.Operation{ Name: opAddUploadBuffer, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &AddUploadBufferInput{} } output = &AddUploadBufferOutput{} req = c.newRequest(op, input, output) return } // AddUploadBuffer API operation for AWS Storage Gateway. // // Configures one or more gateway local disks as upload buffer for a specified // gateway. This operation is supported for the stored volume, cached volume // and tape gateway types. // // In the request, you specify the gateway Amazon Resource Name (ARN) to which // you want to add upload buffer, and one or more disk IDs that you want to // configure as upload buffer. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation AddUploadBuffer for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AddUploadBuffer func (c *StorageGateway) AddUploadBuffer(input *AddUploadBufferInput) (*AddUploadBufferOutput, error) { req, out := c.AddUploadBufferRequest(input) return out, req.Send() } // AddUploadBufferWithContext is the same as AddUploadBuffer with the addition of // the ability to pass a context and additional request options. // // See AddUploadBuffer for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) AddUploadBufferWithContext(ctx aws.Context, input *AddUploadBufferInput, opts ...request.Option) (*AddUploadBufferOutput, error) { req, out := c.AddUploadBufferRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opAddWorkingStorage = "AddWorkingStorage" // AddWorkingStorageRequest generates a "aws/request.Request" representing the // client's request for the AddWorkingStorage operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See AddWorkingStorage for more information on using the AddWorkingStorage // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the AddWorkingStorageRequest method. // req, resp := client.AddWorkingStorageRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AddWorkingStorage func (c *StorageGateway) AddWorkingStorageRequest(input *AddWorkingStorageInput) (req *request.Request, output *AddWorkingStorageOutput) { op := &request.Operation{ Name: opAddWorkingStorage, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &AddWorkingStorageInput{} } output = &AddWorkingStorageOutput{} req = c.newRequest(op, input, output) return } // AddWorkingStorage API operation for AWS Storage Gateway. // // Configures one or more gateway local disks as working storage for a gateway. // This operation is only supported in the stored volume gateway type. This // operation is deprecated in cached volume API version 20120630. Use AddUploadBuffer // instead. // // Working storage is also referred to as upload buffer. You can also use the // AddUploadBuffer operation to add upload buffer to a stored volume gateway. // // In the request, you specify the gateway Amazon Resource Name (ARN) to which // you want to add working storage, and one or more disk IDs that you want to // configure as working storage. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation AddWorkingStorage for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AddWorkingStorage func (c *StorageGateway) AddWorkingStorage(input *AddWorkingStorageInput) (*AddWorkingStorageOutput, error) { req, out := c.AddWorkingStorageRequest(input) return out, req.Send() } // AddWorkingStorageWithContext is the same as AddWorkingStorage with the addition of // the ability to pass a context and additional request options. // // See AddWorkingStorage for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) AddWorkingStorageWithContext(ctx aws.Context, input *AddWorkingStorageInput, opts ...request.Option) (*AddWorkingStorageOutput, error) { req, out := c.AddWorkingStorageRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opAssignTapePool = "AssignTapePool" // AssignTapePoolRequest generates a "aws/request.Request" representing the // client's request for the AssignTapePool operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See AssignTapePool for more information on using the AssignTapePool // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the AssignTapePoolRequest method. // req, resp := client.AssignTapePoolRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AssignTapePool func (c *StorageGateway) AssignTapePoolRequest(input *AssignTapePoolInput) (req *request.Request, output *AssignTapePoolOutput) { op := &request.Operation{ Name: opAssignTapePool, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &AssignTapePoolInput{} } output = &AssignTapePoolOutput{} req = c.newRequest(op, input, output) return } // AssignTapePool API operation for AWS Storage Gateway. // // Assigns a tape to a tape pool for archiving. The tape assigned to a pool // is archived in the S3 storage class that is associated with the pool. When // you use your backup application to eject the tape, the tape is archived directly // into the S3 storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds // to the pool. // // Valid Values: GLACIER | DEEP_ARCHIVE // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation AssignTapePool for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AssignTapePool func (c *StorageGateway) AssignTapePool(input *AssignTapePoolInput) (*AssignTapePoolOutput, error) { req, out := c.AssignTapePoolRequest(input) return out, req.Send() } // AssignTapePoolWithContext is the same as AssignTapePool with the addition of // the ability to pass a context and additional request options. // // See AssignTapePool for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) AssignTapePoolWithContext(ctx aws.Context, input *AssignTapePoolInput, opts ...request.Option) (*AssignTapePoolOutput, error) { req, out := c.AssignTapePoolRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opAttachVolume = "AttachVolume" // AttachVolumeRequest generates a "aws/request.Request" representing the // client's request for the AttachVolume operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See AttachVolume for more information on using the AttachVolume // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the AttachVolumeRequest method. // req, resp := client.AttachVolumeRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AttachVolume func (c *StorageGateway) AttachVolumeRequest(input *AttachVolumeInput) (req *request.Request, output *AttachVolumeOutput) { op := &request.Operation{ Name: opAttachVolume, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &AttachVolumeInput{} } output = &AttachVolumeOutput{} req = c.newRequest(op, input, output) return } // AttachVolume API operation for AWS Storage Gateway. // // Connects a volume to an iSCSI connection and then attaches the volume to // the specified gateway. Detaching and attaching a volume enables you to recover // your data from one gateway to a different gateway without creating a snapshot. // It also makes it easier to move your volumes from an on-premises gateway // to a gateway hosted on an Amazon EC2 instance. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation AttachVolume for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/AttachVolume func (c *StorageGateway) AttachVolume(input *AttachVolumeInput) (*AttachVolumeOutput, error) { req, out := c.AttachVolumeRequest(input) return out, req.Send() } // AttachVolumeWithContext is the same as AttachVolume with the addition of // the ability to pass a context and additional request options. // // See AttachVolume for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) AttachVolumeWithContext(ctx aws.Context, input *AttachVolumeInput, opts ...request.Option) (*AttachVolumeOutput, error) { req, out := c.AttachVolumeRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCancelArchival = "CancelArchival" // CancelArchivalRequest generates a "aws/request.Request" representing the // client's request for the CancelArchival operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CancelArchival for more information on using the CancelArchival // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CancelArchivalRequest method. // req, resp := client.CancelArchivalRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CancelArchival func (c *StorageGateway) CancelArchivalRequest(input *CancelArchivalInput) (req *request.Request, output *CancelArchivalOutput) { op := &request.Operation{ Name: opCancelArchival, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CancelArchivalInput{} } output = &CancelArchivalOutput{} req = c.newRequest(op, input, output) return } // CancelArchival API operation for AWS Storage Gateway. // // Cancels archiving of a virtual tape to the virtual tape shelf (VTS) after // the archiving process is initiated. This operation is only supported in the // tape gateway type. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation CancelArchival for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CancelArchival func (c *StorageGateway) CancelArchival(input *CancelArchivalInput) (*CancelArchivalOutput, error) { req, out := c.CancelArchivalRequest(input) return out, req.Send() } // CancelArchivalWithContext is the same as CancelArchival with the addition of // the ability to pass a context and additional request options. // // See CancelArchival for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) CancelArchivalWithContext(ctx aws.Context, input *CancelArchivalInput, opts ...request.Option) (*CancelArchivalOutput, error) { req, out := c.CancelArchivalRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCancelRetrieval = "CancelRetrieval" // CancelRetrievalRequest generates a "aws/request.Request" representing the // client's request for the CancelRetrieval operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CancelRetrieval for more information on using the CancelRetrieval // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CancelRetrievalRequest method. // req, resp := client.CancelRetrievalRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CancelRetrieval func (c *StorageGateway) CancelRetrievalRequest(input *CancelRetrievalInput) (req *request.Request, output *CancelRetrievalOutput) { op := &request.Operation{ Name: opCancelRetrieval, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CancelRetrievalInput{} } output = &CancelRetrievalOutput{} req = c.newRequest(op, input, output) return } // CancelRetrieval API operation for AWS Storage Gateway. // // Cancels retrieval of a virtual tape from the virtual tape shelf (VTS) to // a gateway after the retrieval process is initiated. The virtual tape is returned // to the VTS. This operation is only supported in the tape gateway type. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation CancelRetrieval for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CancelRetrieval func (c *StorageGateway) CancelRetrieval(input *CancelRetrievalInput) (*CancelRetrievalOutput, error) { req, out := c.CancelRetrievalRequest(input) return out, req.Send() } // CancelRetrievalWithContext is the same as CancelRetrieval with the addition of // the ability to pass a context and additional request options. // // See CancelRetrieval for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) CancelRetrievalWithContext(ctx aws.Context, input *CancelRetrievalInput, opts ...request.Option) (*CancelRetrievalOutput, error) { req, out := c.CancelRetrievalRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreateCachediSCSIVolume = "CreateCachediSCSIVolume" // CreateCachediSCSIVolumeRequest generates a "aws/request.Request" representing the // client's request for the CreateCachediSCSIVolume operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreateCachediSCSIVolume for more information on using the CreateCachediSCSIVolume // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreateCachediSCSIVolumeRequest method. // req, resp := client.CreateCachediSCSIVolumeRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateCachediSCSIVolume func (c *StorageGateway) CreateCachediSCSIVolumeRequest(input *CreateCachediSCSIVolumeInput) (req *request.Request, output *CreateCachediSCSIVolumeOutput) { op := &request.Operation{ Name: opCreateCachediSCSIVolume, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateCachediSCSIVolumeInput{} } output = &CreateCachediSCSIVolumeOutput{} req = c.newRequest(op, input, output) return } // CreateCachediSCSIVolume API operation for AWS Storage Gateway. // // Creates a cached volume on a specified cached volume gateway. This operation // is only supported in the cached volume gateway type. // // Cache storage must be allocated to the gateway before you can create a cached // volume. Use the AddCache operation to add cache storage to a gateway. // // In the request, you must specify the gateway, size of the volume in bytes, // the iSCSI target name, an IP address on which to expose the target, and a // unique client token. In response, the gateway creates the volume and returns // information about it. This information includes the volume Amazon Resource // Name (ARN), its size, and the iSCSI target ARN that initiators can use to // connect to the volume target. // // Optionally, you can provide the ARN for an existing volume as the SourceVolumeARN // for this cached volume, which creates an exact copy of the existing volume’s // latest recovery point. The VolumeSizeInBytes value must be equal to or larger // than the size of the copied volume, in bytes. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation CreateCachediSCSIVolume for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateCachediSCSIVolume func (c *StorageGateway) CreateCachediSCSIVolume(input *CreateCachediSCSIVolumeInput) (*CreateCachediSCSIVolumeOutput, error) { req, out := c.CreateCachediSCSIVolumeRequest(input) return out, req.Send() } // CreateCachediSCSIVolumeWithContext is the same as CreateCachediSCSIVolume with the addition of // the ability to pass a context and additional request options. // // See CreateCachediSCSIVolume for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) CreateCachediSCSIVolumeWithContext(ctx aws.Context, input *CreateCachediSCSIVolumeInput, opts ...request.Option) (*CreateCachediSCSIVolumeOutput, error) { req, out := c.CreateCachediSCSIVolumeRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreateNFSFileShare = "CreateNFSFileShare" // CreateNFSFileShareRequest generates a "aws/request.Request" representing the // client's request for the CreateNFSFileShare operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreateNFSFileShare for more information on using the CreateNFSFileShare // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreateNFSFileShareRequest method. // req, resp := client.CreateNFSFileShareRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateNFSFileShare func (c *StorageGateway) CreateNFSFileShareRequest(input *CreateNFSFileShareInput) (req *request.Request, output *CreateNFSFileShareOutput) { op := &request.Operation{ Name: opCreateNFSFileShare, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateNFSFileShareInput{} } output = &CreateNFSFileShareOutput{} req = c.newRequest(op, input, output) return } // CreateNFSFileShare API operation for AWS Storage Gateway. // // Creates a Network File System (NFS) file share on an existing file gateway. // In Storage Gateway, a file share is a file system mount point backed by Amazon // S3 cloud storage. Storage Gateway exposes file shares using an NFS interface. // This operation is only supported for file gateways. // // File gateway requires AWS Security Token Service (AWS STS) to be activated // to enable you to create a file share. Make sure AWS STS is activated in the // AWS Region you are creating your file gateway in. If AWS STS is not activated // in the AWS Region, activate it. For information about how to activate AWS // STS, see Activating and deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the AWS Identity and Access Management User Guide. // // File gateway does not support creating hard or symbolic links on a file share. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation CreateNFSFileShare for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateNFSFileShare func (c *StorageGateway) CreateNFSFileShare(input *CreateNFSFileShareInput) (*CreateNFSFileShareOutput, error) { req, out := c.CreateNFSFileShareRequest(input) return out, req.Send() } // CreateNFSFileShareWithContext is the same as CreateNFSFileShare with the addition of // the ability to pass a context and additional request options. // // See CreateNFSFileShare for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) CreateNFSFileShareWithContext(ctx aws.Context, input *CreateNFSFileShareInput, opts ...request.Option) (*CreateNFSFileShareOutput, error) { req, out := c.CreateNFSFileShareRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreateSMBFileShare = "CreateSMBFileShare" // CreateSMBFileShareRequest generates a "aws/request.Request" representing the // client's request for the CreateSMBFileShare operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreateSMBFileShare for more information on using the CreateSMBFileShare // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreateSMBFileShareRequest method. // req, resp := client.CreateSMBFileShareRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateSMBFileShare func (c *StorageGateway) CreateSMBFileShareRequest(input *CreateSMBFileShareInput) (req *request.Request, output *CreateSMBFileShareOutput) { op := &request.Operation{ Name: opCreateSMBFileShare, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateSMBFileShareInput{} } output = &CreateSMBFileShareOutput{} req = c.newRequest(op, input, output) return } // CreateSMBFileShare API operation for AWS Storage Gateway. // // Creates a Server Message Block (SMB) file share on an existing file gateway. // In Storage Gateway, a file share is a file system mount point backed by Amazon // S3 cloud storage. Storage Gateway expose file shares using an SMB interface. // This operation is only supported for file gateways. // // File gateways require AWS Security Token Service (AWS STS) to be activated // to enable you to create a file share. Make sure that AWS STS is activated // in the AWS Region you are creating your file gateway in. If AWS STS is not // activated in this AWS Region, activate it. For information about how to activate // AWS STS, see Activating and deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the AWS Identity and Access Management User Guide. // // File gateways don't support creating hard or symbolic links on a file share. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation CreateSMBFileShare for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateSMBFileShare func (c *StorageGateway) CreateSMBFileShare(input *CreateSMBFileShareInput) (*CreateSMBFileShareOutput, error) { req, out := c.CreateSMBFileShareRequest(input) return out, req.Send() } // CreateSMBFileShareWithContext is the same as CreateSMBFileShare with the addition of // the ability to pass a context and additional request options. // // See CreateSMBFileShare for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) CreateSMBFileShareWithContext(ctx aws.Context, input *CreateSMBFileShareInput, opts ...request.Option) (*CreateSMBFileShareOutput, error) { req, out := c.CreateSMBFileShareRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreateSnapshot = "CreateSnapshot" // CreateSnapshotRequest generates a "aws/request.Request" representing the // client's request for the CreateSnapshot operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreateSnapshot for more information on using the CreateSnapshot // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreateSnapshotRequest method. // req, resp := client.CreateSnapshotRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateSnapshot func (c *StorageGateway) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Request, output *CreateSnapshotOutput) { op := &request.Operation{ Name: opCreateSnapshot, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateSnapshotInput{} } output = &CreateSnapshotOutput{} req = c.newRequest(op, input, output) return } // CreateSnapshot API operation for AWS Storage Gateway. // // Initiates a snapshot of a volume. // // AWS Storage Gateway provides the ability to back up point-in-time snapshots // of your data to Amazon Simple Storage (Amazon S3) for durable off-site recovery, // as well as import the data to an Amazon Elastic Block Store (EBS) volume // in Amazon Elastic Compute Cloud (EC2). You can take snapshots of your gateway // volume on a scheduled or ad hoc basis. This API enables you to take ad-hoc // snapshot. For more information, see Editing a snapshot schedule (https://docs.aws.amazon.com/storagegateway/latest/userguide/managing-volumes.html#SchedulingSnapshot). // // In the CreateSnapshot request you identify the volume by providing its Amazon // Resource Name (ARN). You must also provide description for the snapshot. // When AWS Storage Gateway takes the snapshot of specified volume, the snapshot // and description appears in the AWS Storage Gateway Console. In response, // AWS Storage Gateway returns you a snapshot ID. You can use this snapshot // ID to check the snapshot progress or later use it when you want to create // a volume from a snapshot. This operation is only supported in stored and // cached volume gateway type. // // To list or delete a snapshot, you must use the Amazon EC2 API. For more information, // see DescribeSnapshots (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) // or DeleteSnapshot (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DeleteSnapshot.html) // in the Amazon Elastic Compute Cloud API Reference. // // Volume and snapshot IDs are changing to a longer length ID format. For more // information, see the important note on the Welcome (https://docs.aws.amazon.com/storagegateway/latest/APIReference/Welcome.html) // page. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation CreateSnapshot for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // * ServiceUnavailableError // An internal server error has occurred because the service is unavailable. // For more information, see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateSnapshot func (c *StorageGateway) CreateSnapshot(input *CreateSnapshotInput) (*CreateSnapshotOutput, error) { req, out := c.CreateSnapshotRequest(input) return out, req.Send() } // CreateSnapshotWithContext is the same as CreateSnapshot with the addition of // the ability to pass a context and additional request options. // // See CreateSnapshot for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) CreateSnapshotWithContext(ctx aws.Context, input *CreateSnapshotInput, opts ...request.Option) (*CreateSnapshotOutput, error) { req, out := c.CreateSnapshotRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreateSnapshotFromVolumeRecoveryPoint = "CreateSnapshotFromVolumeRecoveryPoint" // CreateSnapshotFromVolumeRecoveryPointRequest generates a "aws/request.Request" representing the // client's request for the CreateSnapshotFromVolumeRecoveryPoint operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreateSnapshotFromVolumeRecoveryPoint for more information on using the CreateSnapshotFromVolumeRecoveryPoint // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreateSnapshotFromVolumeRecoveryPointRequest method. // req, resp := client.CreateSnapshotFromVolumeRecoveryPointRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateSnapshotFromVolumeRecoveryPoint func (c *StorageGateway) CreateSnapshotFromVolumeRecoveryPointRequest(input *CreateSnapshotFromVolumeRecoveryPointInput) (req *request.Request, output *CreateSnapshotFromVolumeRecoveryPointOutput) { op := &request.Operation{ Name: opCreateSnapshotFromVolumeRecoveryPoint, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateSnapshotFromVolumeRecoveryPointInput{} } output = &CreateSnapshotFromVolumeRecoveryPointOutput{} req = c.newRequest(op, input, output) return } // CreateSnapshotFromVolumeRecoveryPoint API operation for AWS Storage Gateway. // // Initiates a snapshot of a gateway from a volume recovery point. This operation // is only supported in the cached volume gateway type. // // A volume recovery point is a point in time at which all data of the volume // is consistent and from which you can create a snapshot. To get a list of // volume recovery point for cached volume gateway, use ListVolumeRecoveryPoints. // // In the CreateSnapshotFromVolumeRecoveryPoint request, you identify the volume // by providing its Amazon Resource Name (ARN). You must also provide a description // for the snapshot. When the gateway takes a snapshot of the specified volume, // the snapshot and its description appear in the AWS Storage Gateway console. // In response, the gateway returns you a snapshot ID. You can use this snapshot // ID to check the snapshot progress or later use it when you want to create // a volume from a snapshot. // // To list or delete a snapshot, you must use the Amazon EC2 API. For more information, // see DescribeSnapshots (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) // or DeleteSnapshot (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DeleteSnapshot.html) // in the Amazon Elastic Compute Cloud API Reference. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation CreateSnapshotFromVolumeRecoveryPoint for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // * ServiceUnavailableError // An internal server error has occurred because the service is unavailable. // For more information, see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateSnapshotFromVolumeRecoveryPoint func (c *StorageGateway) CreateSnapshotFromVolumeRecoveryPoint(input *CreateSnapshotFromVolumeRecoveryPointInput) (*CreateSnapshotFromVolumeRecoveryPointOutput, error) { req, out := c.CreateSnapshotFromVolumeRecoveryPointRequest(input) return out, req.Send() } // CreateSnapshotFromVolumeRecoveryPointWithContext is the same as CreateSnapshotFromVolumeRecoveryPoint with the addition of // the ability to pass a context and additional request options. // // See CreateSnapshotFromVolumeRecoveryPoint for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) CreateSnapshotFromVolumeRecoveryPointWithContext(ctx aws.Context, input *CreateSnapshotFromVolumeRecoveryPointInput, opts ...request.Option) (*CreateSnapshotFromVolumeRecoveryPointOutput, error) { req, out := c.CreateSnapshotFromVolumeRecoveryPointRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreateStorediSCSIVolume = "CreateStorediSCSIVolume" // CreateStorediSCSIVolumeRequest generates a "aws/request.Request" representing the // client's request for the CreateStorediSCSIVolume operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreateStorediSCSIVolume for more information on using the CreateStorediSCSIVolume // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreateStorediSCSIVolumeRequest method. // req, resp := client.CreateStorediSCSIVolumeRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateStorediSCSIVolume func (c *StorageGateway) CreateStorediSCSIVolumeRequest(input *CreateStorediSCSIVolumeInput) (req *request.Request, output *CreateStorediSCSIVolumeOutput) { op := &request.Operation{ Name: opCreateStorediSCSIVolume, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateStorediSCSIVolumeInput{} } output = &CreateStorediSCSIVolumeOutput{} req = c.newRequest(op, input, output) return } // CreateStorediSCSIVolume API operation for AWS Storage Gateway. // // Creates a volume on a specified gateway. This operation is only supported // in the stored volume gateway type. // // The size of the volume to create is inferred from the disk size. You can // choose to preserve existing data on the disk, create volume from an existing // snapshot, or create an empty volume. If you choose to create an empty gateway // volume, then any existing data on the disk is erased. // // In the request you must specify the gateway and the disk information on which // you are creating the volume. In response, the gateway creates the volume // and returns volume information such as the volume Amazon Resource Name (ARN), // its size, and the iSCSI target ARN that initiators can use to connect to // the volume target. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation CreateStorediSCSIVolume for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateStorediSCSIVolume func (c *StorageGateway) CreateStorediSCSIVolume(input *CreateStorediSCSIVolumeInput) (*CreateStorediSCSIVolumeOutput, error) { req, out := c.CreateStorediSCSIVolumeRequest(input) return out, req.Send() } // CreateStorediSCSIVolumeWithContext is the same as CreateStorediSCSIVolume with the addition of // the ability to pass a context and additional request options. // // See CreateStorediSCSIVolume for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) CreateStorediSCSIVolumeWithContext(ctx aws.Context, input *CreateStorediSCSIVolumeInput, opts ...request.Option) (*CreateStorediSCSIVolumeOutput, error) { req, out := c.CreateStorediSCSIVolumeRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreateTapeWithBarcode = "CreateTapeWithBarcode" // CreateTapeWithBarcodeRequest generates a "aws/request.Request" representing the // client's request for the CreateTapeWithBarcode operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreateTapeWithBarcode for more information on using the CreateTapeWithBarcode // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreateTapeWithBarcodeRequest method. // req, resp := client.CreateTapeWithBarcodeRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateTapeWithBarcode func (c *StorageGateway) CreateTapeWithBarcodeRequest(input *CreateTapeWithBarcodeInput) (req *request.Request, output *CreateTapeWithBarcodeOutput) { op := &request.Operation{ Name: opCreateTapeWithBarcode, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateTapeWithBarcodeInput{} } output = &CreateTapeWithBarcodeOutput{} req = c.newRequest(op, input, output) return } // CreateTapeWithBarcode API operation for AWS Storage Gateway. // // Creates a virtual tape by using your own barcode. You write data to the virtual // tape and then archive the tape. A barcode is unique and can not be reused // if it has already been used on a tape. This applies to barcodes used on deleted // tapes. This operation is only supported in the tape gateway type. // // Cache storage must be allocated to the gateway before you can create a virtual // tape. Use the AddCache operation to add cache storage to a gateway. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation CreateTapeWithBarcode for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateTapeWithBarcode func (c *StorageGateway) CreateTapeWithBarcode(input *CreateTapeWithBarcodeInput) (*CreateTapeWithBarcodeOutput, error) { req, out := c.CreateTapeWithBarcodeRequest(input) return out, req.Send() } // CreateTapeWithBarcodeWithContext is the same as CreateTapeWithBarcode with the addition of // the ability to pass a context and additional request options. // // See CreateTapeWithBarcode for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) CreateTapeWithBarcodeWithContext(ctx aws.Context, input *CreateTapeWithBarcodeInput, opts ...request.Option) (*CreateTapeWithBarcodeOutput, error) { req, out := c.CreateTapeWithBarcodeRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreateTapes = "CreateTapes" // CreateTapesRequest generates a "aws/request.Request" representing the // client's request for the CreateTapes operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See CreateTapes for more information on using the CreateTapes // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the CreateTapesRequest method. // req, resp := client.CreateTapesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateTapes func (c *StorageGateway) CreateTapesRequest(input *CreateTapesInput) (req *request.Request, output *CreateTapesOutput) { op := &request.Operation{ Name: opCreateTapes, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateTapesInput{} } output = &CreateTapesOutput{} req = c.newRequest(op, input, output) return } // CreateTapes API operation for AWS Storage Gateway. // // Creates one or more virtual tapes. You write data to the virtual tapes and // then archive the tapes. This operation is only supported in the tape gateway // type. // // Cache storage must be allocated to the gateway before you can create virtual // tapes. Use the AddCache operation to add cache storage to a gateway. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation CreateTapes for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateTapes func (c *StorageGateway) CreateTapes(input *CreateTapesInput) (*CreateTapesOutput, error) { req, out := c.CreateTapesRequest(input) return out, req.Send() } // CreateTapesWithContext is the same as CreateTapes with the addition of // the ability to pass a context and additional request options. // // See CreateTapes for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) CreateTapesWithContext(ctx aws.Context, input *CreateTapesInput, opts ...request.Option) (*CreateTapesOutput, error) { req, out := c.CreateTapesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeleteAutomaticTapeCreationPolicy = "DeleteAutomaticTapeCreationPolicy" // DeleteAutomaticTapeCreationPolicyRequest generates a "aws/request.Request" representing the // client's request for the DeleteAutomaticTapeCreationPolicy operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DeleteAutomaticTapeCreationPolicy for more information on using the DeleteAutomaticTapeCreationPolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DeleteAutomaticTapeCreationPolicyRequest method. // req, resp := client.DeleteAutomaticTapeCreationPolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteAutomaticTapeCreationPolicy func (c *StorageGateway) DeleteAutomaticTapeCreationPolicyRequest(input *DeleteAutomaticTapeCreationPolicyInput) (req *request.Request, output *DeleteAutomaticTapeCreationPolicyOutput) { op := &request.Operation{ Name: opDeleteAutomaticTapeCreationPolicy, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteAutomaticTapeCreationPolicyInput{} } output = &DeleteAutomaticTapeCreationPolicyOutput{} req = c.newRequest(op, input, output) return } // DeleteAutomaticTapeCreationPolicy API operation for AWS Storage Gateway. // // Deletes the automatic tape creation policy of a gateway. If you delete this // policy, new virtual tapes must be created manually. Use the Amazon Resource // Name (ARN) of the gateway in your request to remove the policy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DeleteAutomaticTapeCreationPolicy for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteAutomaticTapeCreationPolicy func (c *StorageGateway) DeleteAutomaticTapeCreationPolicy(input *DeleteAutomaticTapeCreationPolicyInput) (*DeleteAutomaticTapeCreationPolicyOutput, error) { req, out := c.DeleteAutomaticTapeCreationPolicyRequest(input) return out, req.Send() } // DeleteAutomaticTapeCreationPolicyWithContext is the same as DeleteAutomaticTapeCreationPolicy with the addition of // the ability to pass a context and additional request options. // // See DeleteAutomaticTapeCreationPolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DeleteAutomaticTapeCreationPolicyWithContext(ctx aws.Context, input *DeleteAutomaticTapeCreationPolicyInput, opts ...request.Option) (*DeleteAutomaticTapeCreationPolicyOutput, error) { req, out := c.DeleteAutomaticTapeCreationPolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeleteBandwidthRateLimit = "DeleteBandwidthRateLimit" // DeleteBandwidthRateLimitRequest generates a "aws/request.Request" representing the // client's request for the DeleteBandwidthRateLimit operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DeleteBandwidthRateLimit for more information on using the DeleteBandwidthRateLimit // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DeleteBandwidthRateLimitRequest method. // req, resp := client.DeleteBandwidthRateLimitRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteBandwidthRateLimit func (c *StorageGateway) DeleteBandwidthRateLimitRequest(input *DeleteBandwidthRateLimitInput) (req *request.Request, output *DeleteBandwidthRateLimitOutput) { op := &request.Operation{ Name: opDeleteBandwidthRateLimit, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteBandwidthRateLimitInput{} } output = &DeleteBandwidthRateLimitOutput{} req = c.newRequest(op, input, output) return } // DeleteBandwidthRateLimit API operation for AWS Storage Gateway. // // Deletes the bandwidth rate limits of a gateway. You can delete either the // upload and download bandwidth rate limit, or you can delete both. If you // delete only one of the limits, the other limit remains unchanged. To specify // which gateway to work with, use the Amazon Resource Name (ARN) of the gateway // in your request. This operation is supported for the stored volume, cached // volume and tape gateway types. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DeleteBandwidthRateLimit for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteBandwidthRateLimit func (c *StorageGateway) DeleteBandwidthRateLimit(input *DeleteBandwidthRateLimitInput) (*DeleteBandwidthRateLimitOutput, error) { req, out := c.DeleteBandwidthRateLimitRequest(input) return out, req.Send() } // DeleteBandwidthRateLimitWithContext is the same as DeleteBandwidthRateLimit with the addition of // the ability to pass a context and additional request options. // // See DeleteBandwidthRateLimit for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DeleteBandwidthRateLimitWithContext(ctx aws.Context, input *DeleteBandwidthRateLimitInput, opts ...request.Option) (*DeleteBandwidthRateLimitOutput, error) { req, out := c.DeleteBandwidthRateLimitRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeleteChapCredentials = "DeleteChapCredentials" // DeleteChapCredentialsRequest generates a "aws/request.Request" representing the // client's request for the DeleteChapCredentials operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DeleteChapCredentials for more information on using the DeleteChapCredentials // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DeleteChapCredentialsRequest method. // req, resp := client.DeleteChapCredentialsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteChapCredentials func (c *StorageGateway) DeleteChapCredentialsRequest(input *DeleteChapCredentialsInput) (req *request.Request, output *DeleteChapCredentialsOutput) { op := &request.Operation{ Name: opDeleteChapCredentials, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteChapCredentialsInput{} } output = &DeleteChapCredentialsOutput{} req = c.newRequest(op, input, output) return } // DeleteChapCredentials API operation for AWS Storage Gateway. // // Deletes Challenge-Handshake Authentication Protocol (CHAP) credentials for // a specified iSCSI target and initiator pair. This operation is supported // in volume and tape gateway types. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DeleteChapCredentials for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteChapCredentials func (c *StorageGateway) DeleteChapCredentials(input *DeleteChapCredentialsInput) (*DeleteChapCredentialsOutput, error) { req, out := c.DeleteChapCredentialsRequest(input) return out, req.Send() } // DeleteChapCredentialsWithContext is the same as DeleteChapCredentials with the addition of // the ability to pass a context and additional request options. // // See DeleteChapCredentials for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DeleteChapCredentialsWithContext(ctx aws.Context, input *DeleteChapCredentialsInput, opts ...request.Option) (*DeleteChapCredentialsOutput, error) { req, out := c.DeleteChapCredentialsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeleteFileShare = "DeleteFileShare" // DeleteFileShareRequest generates a "aws/request.Request" representing the // client's request for the DeleteFileShare operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DeleteFileShare for more information on using the DeleteFileShare // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DeleteFileShareRequest method. // req, resp := client.DeleteFileShareRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteFileShare func (c *StorageGateway) DeleteFileShareRequest(input *DeleteFileShareInput) (req *request.Request, output *DeleteFileShareOutput) { op := &request.Operation{ Name: opDeleteFileShare, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteFileShareInput{} } output = &DeleteFileShareOutput{} req = c.newRequest(op, input, output) return } // DeleteFileShare API operation for AWS Storage Gateway. // // Deletes a file share from a file gateway. This operation is only supported // for file gateways. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DeleteFileShare for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteFileShare func (c *StorageGateway) DeleteFileShare(input *DeleteFileShareInput) (*DeleteFileShareOutput, error) { req, out := c.DeleteFileShareRequest(input) return out, req.Send() } // DeleteFileShareWithContext is the same as DeleteFileShare with the addition of // the ability to pass a context and additional request options. // // See DeleteFileShare for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DeleteFileShareWithContext(ctx aws.Context, input *DeleteFileShareInput, opts ...request.Option) (*DeleteFileShareOutput, error) { req, out := c.DeleteFileShareRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeleteGateway = "DeleteGateway" // DeleteGatewayRequest generates a "aws/request.Request" representing the // client's request for the DeleteGateway operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DeleteGateway for more information on using the DeleteGateway // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DeleteGatewayRequest method. // req, resp := client.DeleteGatewayRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteGateway func (c *StorageGateway) DeleteGatewayRequest(input *DeleteGatewayInput) (req *request.Request, output *DeleteGatewayOutput) { op := &request.Operation{ Name: opDeleteGateway, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteGatewayInput{} } output = &DeleteGatewayOutput{} req = c.newRequest(op, input, output) return } // DeleteGateway API operation for AWS Storage Gateway. // // Deletes a gateway. To specify which gateway to delete, use the Amazon Resource // Name (ARN) of the gateway in your request. The operation deletes the gateway; // however, it does not delete the gateway virtual machine (VM) from your host // computer. // // After you delete a gateway, you cannot reactivate it. Completed snapshots // of the gateway volumes are not deleted upon deleting the gateway, however, // pending snapshots will not complete. After you delete a gateway, your next // step is to remove it from your environment. // // You no longer pay software charges after the gateway is deleted; however, // your existing Amazon EBS snapshots persist and you will continue to be billed // for these snapshots. You can choose to remove all remaining Amazon EBS snapshots // by canceling your Amazon EC2 subscription. If you prefer not to cancel your // Amazon EC2 subscription, you can delete your snapshots using the Amazon EC2 // console. For more information, see the AWS Storage Gateway detail page (http://aws.amazon.com/storagegateway). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DeleteGateway for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteGateway func (c *StorageGateway) DeleteGateway(input *DeleteGatewayInput) (*DeleteGatewayOutput, error) { req, out := c.DeleteGatewayRequest(input) return out, req.Send() } // DeleteGatewayWithContext is the same as DeleteGateway with the addition of // the ability to pass a context and additional request options. // // See DeleteGateway for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DeleteGatewayWithContext(ctx aws.Context, input *DeleteGatewayInput, opts ...request.Option) (*DeleteGatewayOutput, error) { req, out := c.DeleteGatewayRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeleteSnapshotSchedule = "DeleteSnapshotSchedule" // DeleteSnapshotScheduleRequest generates a "aws/request.Request" representing the // client's request for the DeleteSnapshotSchedule operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DeleteSnapshotSchedule for more information on using the DeleteSnapshotSchedule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DeleteSnapshotScheduleRequest method. // req, resp := client.DeleteSnapshotScheduleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteSnapshotSchedule func (c *StorageGateway) DeleteSnapshotScheduleRequest(input *DeleteSnapshotScheduleInput) (req *request.Request, output *DeleteSnapshotScheduleOutput) { op := &request.Operation{ Name: opDeleteSnapshotSchedule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteSnapshotScheduleInput{} } output = &DeleteSnapshotScheduleOutput{} req = c.newRequest(op, input, output) return } // DeleteSnapshotSchedule API operation for AWS Storage Gateway. // // Deletes a snapshot of a volume. // // You can take snapshots of your gateway volumes on a scheduled or ad hoc basis. // This API action enables you to delete a snapshot schedule for a volume. For // more information, see Backing up your volumes (https://docs.aws.amazon.com/storagegatewaylatest/userguide/backing-up-volumes.html). // In the DeleteSnapshotSchedule request, you identify the volume by providing // its Amazon Resource Name (ARN). This operation is only supported in stored // and cached volume gateway types. // // To list or delete a snapshot, you must use the Amazon EC2 API. For more information, // go to DescribeSnapshots (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) // in the Amazon Elastic Compute Cloud API Reference. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DeleteSnapshotSchedule for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteSnapshotSchedule func (c *StorageGateway) DeleteSnapshotSchedule(input *DeleteSnapshotScheduleInput) (*DeleteSnapshotScheduleOutput, error) { req, out := c.DeleteSnapshotScheduleRequest(input) return out, req.Send() } // DeleteSnapshotScheduleWithContext is the same as DeleteSnapshotSchedule with the addition of // the ability to pass a context and additional request options. // // See DeleteSnapshotSchedule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DeleteSnapshotScheduleWithContext(ctx aws.Context, input *DeleteSnapshotScheduleInput, opts ...request.Option) (*DeleteSnapshotScheduleOutput, error) { req, out := c.DeleteSnapshotScheduleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeleteTape = "DeleteTape" // DeleteTapeRequest generates a "aws/request.Request" representing the // client's request for the DeleteTape operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DeleteTape for more information on using the DeleteTape // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DeleteTapeRequest method. // req, resp := client.DeleteTapeRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteTape func (c *StorageGateway) DeleteTapeRequest(input *DeleteTapeInput) (req *request.Request, output *DeleteTapeOutput) { op := &request.Operation{ Name: opDeleteTape, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteTapeInput{} } output = &DeleteTapeOutput{} req = c.newRequest(op, input, output) return } // DeleteTape API operation for AWS Storage Gateway. // // Deletes the specified virtual tape. This operation is only supported in the // tape gateway type. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DeleteTape for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteTape func (c *StorageGateway) DeleteTape(input *DeleteTapeInput) (*DeleteTapeOutput, error) { req, out := c.DeleteTapeRequest(input) return out, req.Send() } // DeleteTapeWithContext is the same as DeleteTape with the addition of // the ability to pass a context and additional request options. // // See DeleteTape for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DeleteTapeWithContext(ctx aws.Context, input *DeleteTapeInput, opts ...request.Option) (*DeleteTapeOutput, error) { req, out := c.DeleteTapeRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeleteTapeArchive = "DeleteTapeArchive" // DeleteTapeArchiveRequest generates a "aws/request.Request" representing the // client's request for the DeleteTapeArchive operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DeleteTapeArchive for more information on using the DeleteTapeArchive // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DeleteTapeArchiveRequest method. // req, resp := client.DeleteTapeArchiveRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteTapeArchive func (c *StorageGateway) DeleteTapeArchiveRequest(input *DeleteTapeArchiveInput) (req *request.Request, output *DeleteTapeArchiveOutput) { op := &request.Operation{ Name: opDeleteTapeArchive, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteTapeArchiveInput{} } output = &DeleteTapeArchiveOutput{} req = c.newRequest(op, input, output) return } // DeleteTapeArchive API operation for AWS Storage Gateway. // // Deletes the specified virtual tape from the virtual tape shelf (VTS). This // operation is only supported in the tape gateway type. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DeleteTapeArchive for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteTapeArchive func (c *StorageGateway) DeleteTapeArchive(input *DeleteTapeArchiveInput) (*DeleteTapeArchiveOutput, error) { req, out := c.DeleteTapeArchiveRequest(input) return out, req.Send() } // DeleteTapeArchiveWithContext is the same as DeleteTapeArchive with the addition of // the ability to pass a context and additional request options. // // See DeleteTapeArchive for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DeleteTapeArchiveWithContext(ctx aws.Context, input *DeleteTapeArchiveInput, opts ...request.Option) (*DeleteTapeArchiveOutput, error) { req, out := c.DeleteTapeArchiveRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeleteVolume = "DeleteVolume" // DeleteVolumeRequest generates a "aws/request.Request" representing the // client's request for the DeleteVolume operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DeleteVolume for more information on using the DeleteVolume // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DeleteVolumeRequest method. // req, resp := client.DeleteVolumeRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteVolume func (c *StorageGateway) DeleteVolumeRequest(input *DeleteVolumeInput) (req *request.Request, output *DeleteVolumeOutput) { op := &request.Operation{ Name: opDeleteVolume, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteVolumeInput{} } output = &DeleteVolumeOutput{} req = c.newRequest(op, input, output) return } // DeleteVolume API operation for AWS Storage Gateway. // // Deletes the specified storage volume that you previously created using the // CreateCachediSCSIVolume or CreateStorediSCSIVolume API. This operation is // only supported in the cached volume and stored volume types. For stored volume // gateways, the local disk that was configured as the storage volume is not // deleted. You can reuse the local disk to create another storage volume. // // Before you delete a volume, make sure there are no iSCSI connections to the // volume you are deleting. You should also make sure there is no snapshot in // progress. You can use the Amazon Elastic Compute Cloud (Amazon EC2) API to // query snapshots on the volume you are deleting and check the snapshot status. // For more information, go to DescribeSnapshots (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSnapshots.html) // in the Amazon Elastic Compute Cloud API Reference. // // In the request, you must provide the Amazon Resource Name (ARN) of the storage // volume you want to delete. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DeleteVolume for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteVolume func (c *StorageGateway) DeleteVolume(input *DeleteVolumeInput) (*DeleteVolumeOutput, error) { req, out := c.DeleteVolumeRequest(input) return out, req.Send() } // DeleteVolumeWithContext is the same as DeleteVolume with the addition of // the ability to pass a context and additional request options. // // See DeleteVolume for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DeleteVolumeWithContext(ctx aws.Context, input *DeleteVolumeInput, opts ...request.Option) (*DeleteVolumeOutput, error) { req, out := c.DeleteVolumeRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDescribeAvailabilityMonitorTest = "DescribeAvailabilityMonitorTest" // DescribeAvailabilityMonitorTestRequest generates a "aws/request.Request" representing the // client's request for the DescribeAvailabilityMonitorTest operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeAvailabilityMonitorTest for more information on using the DescribeAvailabilityMonitorTest // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeAvailabilityMonitorTestRequest method. // req, resp := client.DescribeAvailabilityMonitorTestRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeAvailabilityMonitorTest func (c *StorageGateway) DescribeAvailabilityMonitorTestRequest(input *DescribeAvailabilityMonitorTestInput) (req *request.Request, output *DescribeAvailabilityMonitorTestOutput) { op := &request.Operation{ Name: opDescribeAvailabilityMonitorTest, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DescribeAvailabilityMonitorTestInput{} } output = &DescribeAvailabilityMonitorTestOutput{} req = c.newRequest(op, input, output) return } // DescribeAvailabilityMonitorTest API operation for AWS Storage Gateway. // // Returns information about the most recent High Availability monitoring test // that was performed on the host in a cluster. If a test isn't performed, the // status and start time in the response would be null. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DescribeAvailabilityMonitorTest for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeAvailabilityMonitorTest func (c *StorageGateway) DescribeAvailabilityMonitorTest(input *DescribeAvailabilityMonitorTestInput) (*DescribeAvailabilityMonitorTestOutput, error) { req, out := c.DescribeAvailabilityMonitorTestRequest(input) return out, req.Send() } // DescribeAvailabilityMonitorTestWithContext is the same as DescribeAvailabilityMonitorTest with the addition of // the ability to pass a context and additional request options. // // See DescribeAvailabilityMonitorTest for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeAvailabilityMonitorTestWithContext(ctx aws.Context, input *DescribeAvailabilityMonitorTestInput, opts ...request.Option) (*DescribeAvailabilityMonitorTestOutput, error) { req, out := c.DescribeAvailabilityMonitorTestRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDescribeBandwidthRateLimit = "DescribeBandwidthRateLimit" // DescribeBandwidthRateLimitRequest generates a "aws/request.Request" representing the // client's request for the DescribeBandwidthRateLimit operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeBandwidthRateLimit for more information on using the DescribeBandwidthRateLimit // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeBandwidthRateLimitRequest method. // req, resp := client.DescribeBandwidthRateLimitRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeBandwidthRateLimit func (c *StorageGateway) DescribeBandwidthRateLimitRequest(input *DescribeBandwidthRateLimitInput) (req *request.Request, output *DescribeBandwidthRateLimitOutput) { op := &request.Operation{ Name: opDescribeBandwidthRateLimit, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DescribeBandwidthRateLimitInput{} } output = &DescribeBandwidthRateLimitOutput{} req = c.newRequest(op, input, output) return } // DescribeBandwidthRateLimit API operation for AWS Storage Gateway. // // Returns the bandwidth rate limits of a gateway. By default, these limits // are not set, which means no bandwidth rate limiting is in effect. This operation // is supported for the stored volume, cached volume and tape gateway types. // // This operation only returns a value for a bandwidth rate limit only if the // limit is set. If no limits are set for the gateway, then this operation returns // only the gateway ARN in the response body. To specify which gateway to describe, // use the Amazon Resource Name (ARN) of the gateway in your request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DescribeBandwidthRateLimit for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeBandwidthRateLimit func (c *StorageGateway) DescribeBandwidthRateLimit(input *DescribeBandwidthRateLimitInput) (*DescribeBandwidthRateLimitOutput, error) { req, out := c.DescribeBandwidthRateLimitRequest(input) return out, req.Send() } // DescribeBandwidthRateLimitWithContext is the same as DescribeBandwidthRateLimit with the addition of // the ability to pass a context and additional request options. // // See DescribeBandwidthRateLimit for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeBandwidthRateLimitWithContext(ctx aws.Context, input *DescribeBandwidthRateLimitInput, opts ...request.Option) (*DescribeBandwidthRateLimitOutput, error) { req, out := c.DescribeBandwidthRateLimitRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDescribeCache = "DescribeCache" // DescribeCacheRequest generates a "aws/request.Request" representing the // client's request for the DescribeCache operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeCache for more information on using the DescribeCache // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeCacheRequest method. // req, resp := client.DescribeCacheRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeCache func (c *StorageGateway) DescribeCacheRequest(input *DescribeCacheInput) (req *request.Request, output *DescribeCacheOutput) { op := &request.Operation{ Name: opDescribeCache, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DescribeCacheInput{} } output = &DescribeCacheOutput{} req = c.newRequest(op, input, output) return } // DescribeCache API operation for AWS Storage Gateway. // // Returns information about the cache of a gateway. This operation is only // supported in the cached volume, tape, and file gateway types. // // The response includes disk IDs that are configured as cache, and it includes // the amount of cache allocated and used. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DescribeCache for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeCache func (c *StorageGateway) DescribeCache(input *DescribeCacheInput) (*DescribeCacheOutput, error) { req, out := c.DescribeCacheRequest(input) return out, req.Send() } // DescribeCacheWithContext is the same as DescribeCache with the addition of // the ability to pass a context and additional request options. // // See DescribeCache for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeCacheWithContext(ctx aws.Context, input *DescribeCacheInput, opts ...request.Option) (*DescribeCacheOutput, error) { req, out := c.DescribeCacheRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDescribeCachediSCSIVolumes = "DescribeCachediSCSIVolumes" // DescribeCachediSCSIVolumesRequest generates a "aws/request.Request" representing the // client's request for the DescribeCachediSCSIVolumes operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeCachediSCSIVolumes for more information on using the DescribeCachediSCSIVolumes // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeCachediSCSIVolumesRequest method. // req, resp := client.DescribeCachediSCSIVolumesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeCachediSCSIVolumes func (c *StorageGateway) DescribeCachediSCSIVolumesRequest(input *DescribeCachediSCSIVolumesInput) (req *request.Request, output *DescribeCachediSCSIVolumesOutput) { op := &request.Operation{ Name: opDescribeCachediSCSIVolumes, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DescribeCachediSCSIVolumesInput{} } output = &DescribeCachediSCSIVolumesOutput{} req = c.newRequest(op, input, output) return } // DescribeCachediSCSIVolumes API operation for AWS Storage Gateway. // // Returns a description of the gateway volumes specified in the request. This // operation is only supported in the cached volume gateway types. // // The list of gateway volumes in the request must be from one gateway. In the // response, AWS Storage Gateway returns volume information sorted by volume // Amazon Resource Name (ARN). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DescribeCachediSCSIVolumes for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeCachediSCSIVolumes func (c *StorageGateway) DescribeCachediSCSIVolumes(input *DescribeCachediSCSIVolumesInput) (*DescribeCachediSCSIVolumesOutput, error) { req, out := c.DescribeCachediSCSIVolumesRequest(input) return out, req.Send() } // DescribeCachediSCSIVolumesWithContext is the same as DescribeCachediSCSIVolumes with the addition of // the ability to pass a context and additional request options. // // See DescribeCachediSCSIVolumes for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeCachediSCSIVolumesWithContext(ctx aws.Context, input *DescribeCachediSCSIVolumesInput, opts ...request.Option) (*DescribeCachediSCSIVolumesOutput, error) { req, out := c.DescribeCachediSCSIVolumesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDescribeChapCredentials = "DescribeChapCredentials" // DescribeChapCredentialsRequest generates a "aws/request.Request" representing the // client's request for the DescribeChapCredentials operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeChapCredentials for more information on using the DescribeChapCredentials // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeChapCredentialsRequest method. // req, resp := client.DescribeChapCredentialsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeChapCredentials func (c *StorageGateway) DescribeChapCredentialsRequest(input *DescribeChapCredentialsInput) (req *request.Request, output *DescribeChapCredentialsOutput) { op := &request.Operation{ Name: opDescribeChapCredentials, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DescribeChapCredentialsInput{} } output = &DescribeChapCredentialsOutput{} req = c.newRequest(op, input, output) return } // DescribeChapCredentials API operation for AWS Storage Gateway. // // Returns an array of Challenge-Handshake Authentication Protocol (CHAP) credentials // information for a specified iSCSI target, one for each target-initiator pair. // This operation is supported in the volume and tape gateway types. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DescribeChapCredentials for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeChapCredentials func (c *StorageGateway) DescribeChapCredentials(input *DescribeChapCredentialsInput) (*DescribeChapCredentialsOutput, error) { req, out := c.DescribeChapCredentialsRequest(input) return out, req.Send() } // DescribeChapCredentialsWithContext is the same as DescribeChapCredentials with the addition of // the ability to pass a context and additional request options. // // See DescribeChapCredentials for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeChapCredentialsWithContext(ctx aws.Context, input *DescribeChapCredentialsInput, opts ...request.Option) (*DescribeChapCredentialsOutput, error) { req, out := c.DescribeChapCredentialsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDescribeGatewayInformation = "DescribeGatewayInformation" // DescribeGatewayInformationRequest generates a "aws/request.Request" representing the // client's request for the DescribeGatewayInformation operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeGatewayInformation for more information on using the DescribeGatewayInformation // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeGatewayInformationRequest method. // req, resp := client.DescribeGatewayInformationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeGatewayInformation func (c *StorageGateway) DescribeGatewayInformationRequest(input *DescribeGatewayInformationInput) (req *request.Request, output *DescribeGatewayInformationOutput) { op := &request.Operation{ Name: opDescribeGatewayInformation, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DescribeGatewayInformationInput{} } output = &DescribeGatewayInformationOutput{} req = c.newRequest(op, input, output) return } // DescribeGatewayInformation API operation for AWS Storage Gateway. // // Returns metadata about a gateway such as its name, network interfaces, configured // time zone, and the state (whether the gateway is running or not). To specify // which gateway to describe, use the Amazon Resource Name (ARN) of the gateway // in your request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DescribeGatewayInformation for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeGatewayInformation func (c *StorageGateway) DescribeGatewayInformation(input *DescribeGatewayInformationInput) (*DescribeGatewayInformationOutput, error) { req, out := c.DescribeGatewayInformationRequest(input) return out, req.Send() } // DescribeGatewayInformationWithContext is the same as DescribeGatewayInformation with the addition of // the ability to pass a context and additional request options. // // See DescribeGatewayInformation for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeGatewayInformationWithContext(ctx aws.Context, input *DescribeGatewayInformationInput, opts ...request.Option) (*DescribeGatewayInformationOutput, error) { req, out := c.DescribeGatewayInformationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDescribeMaintenanceStartTime = "DescribeMaintenanceStartTime" // DescribeMaintenanceStartTimeRequest generates a "aws/request.Request" representing the // client's request for the DescribeMaintenanceStartTime operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeMaintenanceStartTime for more information on using the DescribeMaintenanceStartTime // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeMaintenanceStartTimeRequest method. // req, resp := client.DescribeMaintenanceStartTimeRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeMaintenanceStartTime func (c *StorageGateway) DescribeMaintenanceStartTimeRequest(input *DescribeMaintenanceStartTimeInput) (req *request.Request, output *DescribeMaintenanceStartTimeOutput) { op := &request.Operation{ Name: opDescribeMaintenanceStartTime, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DescribeMaintenanceStartTimeInput{} } output = &DescribeMaintenanceStartTimeOutput{} req = c.newRequest(op, input, output) return } // DescribeMaintenanceStartTime API operation for AWS Storage Gateway. // // Returns your gateway's weekly maintenance start time including the day and // time of the week. Note that values are in terms of the gateway's time zone. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DescribeMaintenanceStartTime for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeMaintenanceStartTime func (c *StorageGateway) DescribeMaintenanceStartTime(input *DescribeMaintenanceStartTimeInput) (*DescribeMaintenanceStartTimeOutput, error) { req, out := c.DescribeMaintenanceStartTimeRequest(input) return out, req.Send() } // DescribeMaintenanceStartTimeWithContext is the same as DescribeMaintenanceStartTime with the addition of // the ability to pass a context and additional request options. // // See DescribeMaintenanceStartTime for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeMaintenanceStartTimeWithContext(ctx aws.Context, input *DescribeMaintenanceStartTimeInput, opts ...request.Option) (*DescribeMaintenanceStartTimeOutput, error) { req, out := c.DescribeMaintenanceStartTimeRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDescribeNFSFileShares = "DescribeNFSFileShares" // DescribeNFSFileSharesRequest generates a "aws/request.Request" representing the // client's request for the DescribeNFSFileShares operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeNFSFileShares for more information on using the DescribeNFSFileShares // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeNFSFileSharesRequest method. // req, resp := client.DescribeNFSFileSharesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeNFSFileShares func (c *StorageGateway) DescribeNFSFileSharesRequest(input *DescribeNFSFileSharesInput) (req *request.Request, output *DescribeNFSFileSharesOutput) { op := &request.Operation{ Name: opDescribeNFSFileShares, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DescribeNFSFileSharesInput{} } output = &DescribeNFSFileSharesOutput{} req = c.newRequest(op, input, output) return } // DescribeNFSFileShares API operation for AWS Storage Gateway. // // Gets a description for one or more Network File System (NFS) file shares // from a file gateway. This operation is only supported for file gateways. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DescribeNFSFileShares for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeNFSFileShares func (c *StorageGateway) DescribeNFSFileShares(input *DescribeNFSFileSharesInput) (*DescribeNFSFileSharesOutput, error) { req, out := c.DescribeNFSFileSharesRequest(input) return out, req.Send() } // DescribeNFSFileSharesWithContext is the same as DescribeNFSFileShares with the addition of // the ability to pass a context and additional request options. // // See DescribeNFSFileShares for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeNFSFileSharesWithContext(ctx aws.Context, input *DescribeNFSFileSharesInput, opts ...request.Option) (*DescribeNFSFileSharesOutput, error) { req, out := c.DescribeNFSFileSharesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDescribeSMBFileShares = "DescribeSMBFileShares" // DescribeSMBFileSharesRequest generates a "aws/request.Request" representing the // client's request for the DescribeSMBFileShares operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeSMBFileShares for more information on using the DescribeSMBFileShares // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeSMBFileSharesRequest method. // req, resp := client.DescribeSMBFileSharesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeSMBFileShares func (c *StorageGateway) DescribeSMBFileSharesRequest(input *DescribeSMBFileSharesInput) (req *request.Request, output *DescribeSMBFileSharesOutput) { op := &request.Operation{ Name: opDescribeSMBFileShares, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DescribeSMBFileSharesInput{} } output = &DescribeSMBFileSharesOutput{} req = c.newRequest(op, input, output) return } // DescribeSMBFileShares API operation for AWS Storage Gateway. // // Gets a description for one or more Server Message Block (SMB) file shares // from a file gateway. This operation is only supported for file gateways. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DescribeSMBFileShares for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeSMBFileShares func (c *StorageGateway) DescribeSMBFileShares(input *DescribeSMBFileSharesInput) (*DescribeSMBFileSharesOutput, error) { req, out := c.DescribeSMBFileSharesRequest(input) return out, req.Send() } // DescribeSMBFileSharesWithContext is the same as DescribeSMBFileShares with the addition of // the ability to pass a context and additional request options. // // See DescribeSMBFileShares for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeSMBFileSharesWithContext(ctx aws.Context, input *DescribeSMBFileSharesInput, opts ...request.Option) (*DescribeSMBFileSharesOutput, error) { req, out := c.DescribeSMBFileSharesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDescribeSMBSettings = "DescribeSMBSettings" // DescribeSMBSettingsRequest generates a "aws/request.Request" representing the // client's request for the DescribeSMBSettings operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeSMBSettings for more information on using the DescribeSMBSettings // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeSMBSettingsRequest method. // req, resp := client.DescribeSMBSettingsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeSMBSettings func (c *StorageGateway) DescribeSMBSettingsRequest(input *DescribeSMBSettingsInput) (req *request.Request, output *DescribeSMBSettingsOutput) { op := &request.Operation{ Name: opDescribeSMBSettings, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DescribeSMBSettingsInput{} } output = &DescribeSMBSettingsOutput{} req = c.newRequest(op, input, output) return } // DescribeSMBSettings API operation for AWS Storage Gateway. // // Gets a description of a Server Message Block (SMB) file share settings from // a file gateway. This operation is only supported for file gateways. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DescribeSMBSettings for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeSMBSettings func (c *StorageGateway) DescribeSMBSettings(input *DescribeSMBSettingsInput) (*DescribeSMBSettingsOutput, error) { req, out := c.DescribeSMBSettingsRequest(input) return out, req.Send() } // DescribeSMBSettingsWithContext is the same as DescribeSMBSettings with the addition of // the ability to pass a context and additional request options. // // See DescribeSMBSettings for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeSMBSettingsWithContext(ctx aws.Context, input *DescribeSMBSettingsInput, opts ...request.Option) (*DescribeSMBSettingsOutput, error) { req, out := c.DescribeSMBSettingsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDescribeSnapshotSchedule = "DescribeSnapshotSchedule" // DescribeSnapshotScheduleRequest generates a "aws/request.Request" representing the // client's request for the DescribeSnapshotSchedule operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeSnapshotSchedule for more information on using the DescribeSnapshotSchedule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeSnapshotScheduleRequest method. // req, resp := client.DescribeSnapshotScheduleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeSnapshotSchedule func (c *StorageGateway) DescribeSnapshotScheduleRequest(input *DescribeSnapshotScheduleInput) (req *request.Request, output *DescribeSnapshotScheduleOutput) { op := &request.Operation{ Name: opDescribeSnapshotSchedule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DescribeSnapshotScheduleInput{} } output = &DescribeSnapshotScheduleOutput{} req = c.newRequest(op, input, output) return } // DescribeSnapshotSchedule API operation for AWS Storage Gateway. // // Describes the snapshot schedule for the specified gateway volume. The snapshot // schedule information includes intervals at which snapshots are automatically // initiated on the volume. This operation is only supported in the cached volume // and stored volume types. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DescribeSnapshotSchedule for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeSnapshotSchedule func (c *StorageGateway) DescribeSnapshotSchedule(input *DescribeSnapshotScheduleInput) (*DescribeSnapshotScheduleOutput, error) { req, out := c.DescribeSnapshotScheduleRequest(input) return out, req.Send() } // DescribeSnapshotScheduleWithContext is the same as DescribeSnapshotSchedule with the addition of // the ability to pass a context and additional request options. // // See DescribeSnapshotSchedule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeSnapshotScheduleWithContext(ctx aws.Context, input *DescribeSnapshotScheduleInput, opts ...request.Option) (*DescribeSnapshotScheduleOutput, error) { req, out := c.DescribeSnapshotScheduleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDescribeStorediSCSIVolumes = "DescribeStorediSCSIVolumes" // DescribeStorediSCSIVolumesRequest generates a "aws/request.Request" representing the // client's request for the DescribeStorediSCSIVolumes operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeStorediSCSIVolumes for more information on using the DescribeStorediSCSIVolumes // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeStorediSCSIVolumesRequest method. // req, resp := client.DescribeStorediSCSIVolumesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeStorediSCSIVolumes func (c *StorageGateway) DescribeStorediSCSIVolumesRequest(input *DescribeStorediSCSIVolumesInput) (req *request.Request, output *DescribeStorediSCSIVolumesOutput) { op := &request.Operation{ Name: opDescribeStorediSCSIVolumes, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DescribeStorediSCSIVolumesInput{} } output = &DescribeStorediSCSIVolumesOutput{} req = c.newRequest(op, input, output) return } // DescribeStorediSCSIVolumes API operation for AWS Storage Gateway. // // Returns the description of the gateway volumes specified in the request. // The list of gateway volumes in the request must be from one gateway. In the // response, AWS Storage Gateway returns volume information sorted by volume // ARNs. This operation is only supported in stored volume gateway type. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DescribeStorediSCSIVolumes for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeStorediSCSIVolumes func (c *StorageGateway) DescribeStorediSCSIVolumes(input *DescribeStorediSCSIVolumesInput) (*DescribeStorediSCSIVolumesOutput, error) { req, out := c.DescribeStorediSCSIVolumesRequest(input) return out, req.Send() } // DescribeStorediSCSIVolumesWithContext is the same as DescribeStorediSCSIVolumes with the addition of // the ability to pass a context and additional request options. // // See DescribeStorediSCSIVolumes for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeStorediSCSIVolumesWithContext(ctx aws.Context, input *DescribeStorediSCSIVolumesInput, opts ...request.Option) (*DescribeStorediSCSIVolumesOutput, error) { req, out := c.DescribeStorediSCSIVolumesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDescribeTapeArchives = "DescribeTapeArchives" // DescribeTapeArchivesRequest generates a "aws/request.Request" representing the // client's request for the DescribeTapeArchives operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeTapeArchives for more information on using the DescribeTapeArchives // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeTapeArchivesRequest method. // req, resp := client.DescribeTapeArchivesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeTapeArchives func (c *StorageGateway) DescribeTapeArchivesRequest(input *DescribeTapeArchivesInput) (req *request.Request, output *DescribeTapeArchivesOutput) { op := &request.Operation{ Name: opDescribeTapeArchives, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"Marker"}, OutputTokens: []string{"Marker"}, LimitToken: "Limit", TruncationToken: "", }, } if input == nil { input = &DescribeTapeArchivesInput{} } output = &DescribeTapeArchivesOutput{} req = c.newRequest(op, input, output) return } // DescribeTapeArchives API operation for AWS Storage Gateway. // // Returns a description of specified virtual tapes in the virtual tape shelf // (VTS). This operation is only supported in the tape gateway type. // // If a specific TapeARN is not specified, AWS Storage Gateway returns a description // of all virtual tapes found in the VTS associated with your account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DescribeTapeArchives for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeTapeArchives func (c *StorageGateway) DescribeTapeArchives(input *DescribeTapeArchivesInput) (*DescribeTapeArchivesOutput, error) { req, out := c.DescribeTapeArchivesRequest(input) return out, req.Send() } // DescribeTapeArchivesWithContext is the same as DescribeTapeArchives with the addition of // the ability to pass a context and additional request options. // // See DescribeTapeArchives for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeTapeArchivesWithContext(ctx aws.Context, input *DescribeTapeArchivesInput, opts ...request.Option) (*DescribeTapeArchivesOutput, error) { req, out := c.DescribeTapeArchivesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // DescribeTapeArchivesPages iterates over the pages of a DescribeTapeArchives operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See DescribeTapeArchives method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a DescribeTapeArchives operation. // pageNum := 0 // err := client.DescribeTapeArchivesPages(params, // func(page *storagegateway.DescribeTapeArchivesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *StorageGateway) DescribeTapeArchivesPages(input *DescribeTapeArchivesInput, fn func(*DescribeTapeArchivesOutput, bool) bool) error { return c.DescribeTapeArchivesPagesWithContext(aws.BackgroundContext(), input, fn) } // DescribeTapeArchivesPagesWithContext same as DescribeTapeArchivesPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeTapeArchivesPagesWithContext(ctx aws.Context, input *DescribeTapeArchivesInput, fn func(*DescribeTapeArchivesOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *DescribeTapeArchivesInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.DescribeTapeArchivesRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } for p.Next() { if !fn(p.Page().(*DescribeTapeArchivesOutput), !p.HasNextPage()) { break } } return p.Err() } const opDescribeTapeRecoveryPoints = "DescribeTapeRecoveryPoints" // DescribeTapeRecoveryPointsRequest generates a "aws/request.Request" representing the // client's request for the DescribeTapeRecoveryPoints operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeTapeRecoveryPoints for more information on using the DescribeTapeRecoveryPoints // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeTapeRecoveryPointsRequest method. // req, resp := client.DescribeTapeRecoveryPointsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeTapeRecoveryPoints func (c *StorageGateway) DescribeTapeRecoveryPointsRequest(input *DescribeTapeRecoveryPointsInput) (req *request.Request, output *DescribeTapeRecoveryPointsOutput) { op := &request.Operation{ Name: opDescribeTapeRecoveryPoints, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"Marker"}, OutputTokens: []string{"Marker"}, LimitToken: "Limit", TruncationToken: "", }, } if input == nil { input = &DescribeTapeRecoveryPointsInput{} } output = &DescribeTapeRecoveryPointsOutput{} req = c.newRequest(op, input, output) return } // DescribeTapeRecoveryPoints API operation for AWS Storage Gateway. // // Returns a list of virtual tape recovery points that are available for the // specified tape gateway. // // A recovery point is a point-in-time view of a virtual tape at which all the // data on the virtual tape is consistent. If your gateway crashes, virtual // tapes that have recovery points can be recovered to a new gateway. This operation // is only supported in the tape gateway type. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DescribeTapeRecoveryPoints for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeTapeRecoveryPoints func (c *StorageGateway) DescribeTapeRecoveryPoints(input *DescribeTapeRecoveryPointsInput) (*DescribeTapeRecoveryPointsOutput, error) { req, out := c.DescribeTapeRecoveryPointsRequest(input) return out, req.Send() } // DescribeTapeRecoveryPointsWithContext is the same as DescribeTapeRecoveryPoints with the addition of // the ability to pass a context and additional request options. // // See DescribeTapeRecoveryPoints for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeTapeRecoveryPointsWithContext(ctx aws.Context, input *DescribeTapeRecoveryPointsInput, opts ...request.Option) (*DescribeTapeRecoveryPointsOutput, error) { req, out := c.DescribeTapeRecoveryPointsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // DescribeTapeRecoveryPointsPages iterates over the pages of a DescribeTapeRecoveryPoints operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See DescribeTapeRecoveryPoints method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a DescribeTapeRecoveryPoints operation. // pageNum := 0 // err := client.DescribeTapeRecoveryPointsPages(params, // func(page *storagegateway.DescribeTapeRecoveryPointsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *StorageGateway) DescribeTapeRecoveryPointsPages(input *DescribeTapeRecoveryPointsInput, fn func(*DescribeTapeRecoveryPointsOutput, bool) bool) error { return c.DescribeTapeRecoveryPointsPagesWithContext(aws.BackgroundContext(), input, fn) } // DescribeTapeRecoveryPointsPagesWithContext same as DescribeTapeRecoveryPointsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeTapeRecoveryPointsPagesWithContext(ctx aws.Context, input *DescribeTapeRecoveryPointsInput, fn func(*DescribeTapeRecoveryPointsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *DescribeTapeRecoveryPointsInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.DescribeTapeRecoveryPointsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } for p.Next() { if !fn(p.Page().(*DescribeTapeRecoveryPointsOutput), !p.HasNextPage()) { break } } return p.Err() } const opDescribeTapes = "DescribeTapes" // DescribeTapesRequest generates a "aws/request.Request" representing the // client's request for the DescribeTapes operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeTapes for more information on using the DescribeTapes // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeTapesRequest method. // req, resp := client.DescribeTapesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeTapes func (c *StorageGateway) DescribeTapesRequest(input *DescribeTapesInput) (req *request.Request, output *DescribeTapesOutput) { op := &request.Operation{ Name: opDescribeTapes, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"Marker"}, OutputTokens: []string{"Marker"}, LimitToken: "Limit", TruncationToken: "", }, } if input == nil { input = &DescribeTapesInput{} } output = &DescribeTapesOutput{} req = c.newRequest(op, input, output) return } // DescribeTapes API operation for AWS Storage Gateway. // // Returns a description of the specified Amazon Resource Name (ARN) of virtual // tapes. If a TapeARN is not specified, returns a description of all virtual // tapes associated with the specified gateway. This operation is only supported // in the tape gateway type. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DescribeTapes for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeTapes func (c *StorageGateway) DescribeTapes(input *DescribeTapesInput) (*DescribeTapesOutput, error) { req, out := c.DescribeTapesRequest(input) return out, req.Send() } // DescribeTapesWithContext is the same as DescribeTapes with the addition of // the ability to pass a context and additional request options. // // See DescribeTapes for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeTapesWithContext(ctx aws.Context, input *DescribeTapesInput, opts ...request.Option) (*DescribeTapesOutput, error) { req, out := c.DescribeTapesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // DescribeTapesPages iterates over the pages of a DescribeTapes operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See DescribeTapes method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a DescribeTapes operation. // pageNum := 0 // err := client.DescribeTapesPages(params, // func(page *storagegateway.DescribeTapesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *StorageGateway) DescribeTapesPages(input *DescribeTapesInput, fn func(*DescribeTapesOutput, bool) bool) error { return c.DescribeTapesPagesWithContext(aws.BackgroundContext(), input, fn) } // DescribeTapesPagesWithContext same as DescribeTapesPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeTapesPagesWithContext(ctx aws.Context, input *DescribeTapesInput, fn func(*DescribeTapesOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *DescribeTapesInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.DescribeTapesRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } for p.Next() { if !fn(p.Page().(*DescribeTapesOutput), !p.HasNextPage()) { break } } return p.Err() } const opDescribeUploadBuffer = "DescribeUploadBuffer" // DescribeUploadBufferRequest generates a "aws/request.Request" representing the // client's request for the DescribeUploadBuffer operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeUploadBuffer for more information on using the DescribeUploadBuffer // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeUploadBufferRequest method. // req, resp := client.DescribeUploadBufferRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeUploadBuffer func (c *StorageGateway) DescribeUploadBufferRequest(input *DescribeUploadBufferInput) (req *request.Request, output *DescribeUploadBufferOutput) { op := &request.Operation{ Name: opDescribeUploadBuffer, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DescribeUploadBufferInput{} } output = &DescribeUploadBufferOutput{} req = c.newRequest(op, input, output) return } // DescribeUploadBuffer API operation for AWS Storage Gateway. // // Returns information about the upload buffer of a gateway. This operation // is supported for the stored volume, cached volume, and tape gateway types. // // The response includes disk IDs that are configured as upload buffer space, // and it includes the amount of upload buffer space allocated and used. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DescribeUploadBuffer for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeUploadBuffer func (c *StorageGateway) DescribeUploadBuffer(input *DescribeUploadBufferInput) (*DescribeUploadBufferOutput, error) { req, out := c.DescribeUploadBufferRequest(input) return out, req.Send() } // DescribeUploadBufferWithContext is the same as DescribeUploadBuffer with the addition of // the ability to pass a context and additional request options. // // See DescribeUploadBuffer for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeUploadBufferWithContext(ctx aws.Context, input *DescribeUploadBufferInput, opts ...request.Option) (*DescribeUploadBufferOutput, error) { req, out := c.DescribeUploadBufferRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDescribeVTLDevices = "DescribeVTLDevices" // DescribeVTLDevicesRequest generates a "aws/request.Request" representing the // client's request for the DescribeVTLDevices operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeVTLDevices for more information on using the DescribeVTLDevices // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeVTLDevicesRequest method. // req, resp := client.DescribeVTLDevicesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeVTLDevices func (c *StorageGateway) DescribeVTLDevicesRequest(input *DescribeVTLDevicesInput) (req *request.Request, output *DescribeVTLDevicesOutput) { op := &request.Operation{ Name: opDescribeVTLDevices, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"Marker"}, OutputTokens: []string{"Marker"}, LimitToken: "Limit", TruncationToken: "", }, } if input == nil { input = &DescribeVTLDevicesInput{} } output = &DescribeVTLDevicesOutput{} req = c.newRequest(op, input, output) return } // DescribeVTLDevices API operation for AWS Storage Gateway. // // Returns a description of virtual tape library (VTL) devices for the specified // tape gateway. In the response, AWS Storage Gateway returns VTL device information. // // This operation is only supported in the tape gateway type. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DescribeVTLDevices for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeVTLDevices func (c *StorageGateway) DescribeVTLDevices(input *DescribeVTLDevicesInput) (*DescribeVTLDevicesOutput, error) { req, out := c.DescribeVTLDevicesRequest(input) return out, req.Send() } // DescribeVTLDevicesWithContext is the same as DescribeVTLDevices with the addition of // the ability to pass a context and additional request options. // // See DescribeVTLDevices for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeVTLDevicesWithContext(ctx aws.Context, input *DescribeVTLDevicesInput, opts ...request.Option) (*DescribeVTLDevicesOutput, error) { req, out := c.DescribeVTLDevicesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // DescribeVTLDevicesPages iterates over the pages of a DescribeVTLDevices operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See DescribeVTLDevices method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a DescribeVTLDevices operation. // pageNum := 0 // err := client.DescribeVTLDevicesPages(params, // func(page *storagegateway.DescribeVTLDevicesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *StorageGateway) DescribeVTLDevicesPages(input *DescribeVTLDevicesInput, fn func(*DescribeVTLDevicesOutput, bool) bool) error { return c.DescribeVTLDevicesPagesWithContext(aws.BackgroundContext(), input, fn) } // DescribeVTLDevicesPagesWithContext same as DescribeVTLDevicesPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeVTLDevicesPagesWithContext(ctx aws.Context, input *DescribeVTLDevicesInput, fn func(*DescribeVTLDevicesOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *DescribeVTLDevicesInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.DescribeVTLDevicesRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } for p.Next() { if !fn(p.Page().(*DescribeVTLDevicesOutput), !p.HasNextPage()) { break } } return p.Err() } const opDescribeWorkingStorage = "DescribeWorkingStorage" // DescribeWorkingStorageRequest generates a "aws/request.Request" representing the // client's request for the DescribeWorkingStorage operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DescribeWorkingStorage for more information on using the DescribeWorkingStorage // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DescribeWorkingStorageRequest method. // req, resp := client.DescribeWorkingStorageRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeWorkingStorage func (c *StorageGateway) DescribeWorkingStorageRequest(input *DescribeWorkingStorageInput) (req *request.Request, output *DescribeWorkingStorageOutput) { op := &request.Operation{ Name: opDescribeWorkingStorage, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DescribeWorkingStorageInput{} } output = &DescribeWorkingStorageOutput{} req = c.newRequest(op, input, output) return } // DescribeWorkingStorage API operation for AWS Storage Gateway. // // Returns information about the working storage of a gateway. This operation // is only supported in the stored volumes gateway type. This operation is deprecated // in cached volumes API version (20120630). Use DescribeUploadBuffer instead. // // Working storage is also referred to as upload buffer. You can also use the // DescribeUploadBuffer operation to add upload buffer to a stored volume gateway. // // The response includes disk IDs that are configured as working storage, and // it includes the amount of working storage allocated and used. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DescribeWorkingStorage for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeWorkingStorage func (c *StorageGateway) DescribeWorkingStorage(input *DescribeWorkingStorageInput) (*DescribeWorkingStorageOutput, error) { req, out := c.DescribeWorkingStorageRequest(input) return out, req.Send() } // DescribeWorkingStorageWithContext is the same as DescribeWorkingStorage with the addition of // the ability to pass a context and additional request options. // // See DescribeWorkingStorage for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DescribeWorkingStorageWithContext(ctx aws.Context, input *DescribeWorkingStorageInput, opts ...request.Option) (*DescribeWorkingStorageOutput, error) { req, out := c.DescribeWorkingStorageRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDetachVolume = "DetachVolume" // DetachVolumeRequest generates a "aws/request.Request" representing the // client's request for the DetachVolume operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DetachVolume for more information on using the DetachVolume // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DetachVolumeRequest method. // req, resp := client.DetachVolumeRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DetachVolume func (c *StorageGateway) DetachVolumeRequest(input *DetachVolumeInput) (req *request.Request, output *DetachVolumeOutput) { op := &request.Operation{ Name: opDetachVolume, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DetachVolumeInput{} } output = &DetachVolumeOutput{} req = c.newRequest(op, input, output) return } // DetachVolume API operation for AWS Storage Gateway. // // Disconnects a volume from an iSCSI connection and then detaches the volume // from the specified gateway. Detaching and attaching a volume enables you // to recover your data from one gateway to a different gateway without creating // a snapshot. It also makes it easier to move your volumes from an on-premises // gateway to a gateway hosted on an Amazon EC2 instance. This operation is // only supported in the volume gateway type. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DetachVolume for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DetachVolume func (c *StorageGateway) DetachVolume(input *DetachVolumeInput) (*DetachVolumeOutput, error) { req, out := c.DetachVolumeRequest(input) return out, req.Send() } // DetachVolumeWithContext is the same as DetachVolume with the addition of // the ability to pass a context and additional request options. // // See DetachVolume for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DetachVolumeWithContext(ctx aws.Context, input *DetachVolumeInput, opts ...request.Option) (*DetachVolumeOutput, error) { req, out := c.DetachVolumeRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDisableGateway = "DisableGateway" // DisableGatewayRequest generates a "aws/request.Request" representing the // client's request for the DisableGateway operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See DisableGateway for more information on using the DisableGateway // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the DisableGatewayRequest method. // req, resp := client.DisableGatewayRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DisableGateway func (c *StorageGateway) DisableGatewayRequest(input *DisableGatewayInput) (req *request.Request, output *DisableGatewayOutput) { op := &request.Operation{ Name: opDisableGateway, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DisableGatewayInput{} } output = &DisableGatewayOutput{} req = c.newRequest(op, input, output) return } // DisableGateway API operation for AWS Storage Gateway. // // Disables a tape gateway when the gateway is no longer functioning. For example, // if your gateway VM is damaged, you can disable the gateway so you can recover // virtual tapes. // // Use this operation for a tape gateway that is not reachable or not functioning. // This operation is only supported in the tape gateway type. // // After a gateway is disabled, it cannot be enabled. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation DisableGateway for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DisableGateway func (c *StorageGateway) DisableGateway(input *DisableGatewayInput) (*DisableGatewayOutput, error) { req, out := c.DisableGatewayRequest(input) return out, req.Send() } // DisableGatewayWithContext is the same as DisableGateway with the addition of // the ability to pass a context and additional request options. // // See DisableGateway for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) DisableGatewayWithContext(ctx aws.Context, input *DisableGatewayInput, opts ...request.Option) (*DisableGatewayOutput, error) { req, out := c.DisableGatewayRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opJoinDomain = "JoinDomain" // JoinDomainRequest generates a "aws/request.Request" representing the // client's request for the JoinDomain operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See JoinDomain for more information on using the JoinDomain // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the JoinDomainRequest method. // req, resp := client.JoinDomainRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/JoinDomain func (c *StorageGateway) JoinDomainRequest(input *JoinDomainInput) (req *request.Request, output *JoinDomainOutput) { op := &request.Operation{ Name: opJoinDomain, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &JoinDomainInput{} } output = &JoinDomainOutput{} req = c.newRequest(op, input, output) return } // JoinDomain API operation for AWS Storage Gateway. // // Adds a file gateway to an Active Directory domain. This operation is only // supported for file gateways that support the SMB file protocol. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation JoinDomain for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/JoinDomain func (c *StorageGateway) JoinDomain(input *JoinDomainInput) (*JoinDomainOutput, error) { req, out := c.JoinDomainRequest(input) return out, req.Send() } // JoinDomainWithContext is the same as JoinDomain with the addition of // the ability to pass a context and additional request options. // // See JoinDomain for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) JoinDomainWithContext(ctx aws.Context, input *JoinDomainInput, opts ...request.Option) (*JoinDomainOutput, error) { req, out := c.JoinDomainRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opListAutomaticTapeCreationPolicies = "ListAutomaticTapeCreationPolicies" // ListAutomaticTapeCreationPoliciesRequest generates a "aws/request.Request" representing the // client's request for the ListAutomaticTapeCreationPolicies operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See ListAutomaticTapeCreationPolicies for more information on using the ListAutomaticTapeCreationPolicies // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the ListAutomaticTapeCreationPoliciesRequest method. // req, resp := client.ListAutomaticTapeCreationPoliciesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListAutomaticTapeCreationPolicies func (c *StorageGateway) ListAutomaticTapeCreationPoliciesRequest(input *ListAutomaticTapeCreationPoliciesInput) (req *request.Request, output *ListAutomaticTapeCreationPoliciesOutput) { op := &request.Operation{ Name: opListAutomaticTapeCreationPolicies, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &ListAutomaticTapeCreationPoliciesInput{} } output = &ListAutomaticTapeCreationPoliciesOutput{} req = c.newRequest(op, input, output) return } // ListAutomaticTapeCreationPolicies API operation for AWS Storage Gateway. // // Lists the automatic tape creation policies for a gateway. If there are no // automatic tape creation policies for the gateway, it returns an empty list. // // This operation is only supported for tape gateways. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation ListAutomaticTapeCreationPolicies for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListAutomaticTapeCreationPolicies func (c *StorageGateway) ListAutomaticTapeCreationPolicies(input *ListAutomaticTapeCreationPoliciesInput) (*ListAutomaticTapeCreationPoliciesOutput, error) { req, out := c.ListAutomaticTapeCreationPoliciesRequest(input) return out, req.Send() } // ListAutomaticTapeCreationPoliciesWithContext is the same as ListAutomaticTapeCreationPolicies with the addition of // the ability to pass a context and additional request options. // // See ListAutomaticTapeCreationPolicies for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) ListAutomaticTapeCreationPoliciesWithContext(ctx aws.Context, input *ListAutomaticTapeCreationPoliciesInput, opts ...request.Option) (*ListAutomaticTapeCreationPoliciesOutput, error) { req, out := c.ListAutomaticTapeCreationPoliciesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opListFileShares = "ListFileShares" // ListFileSharesRequest generates a "aws/request.Request" representing the // client's request for the ListFileShares operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See ListFileShares for more information on using the ListFileShares // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the ListFileSharesRequest method. // req, resp := client.ListFileSharesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListFileShares func (c *StorageGateway) ListFileSharesRequest(input *ListFileSharesInput) (req *request.Request, output *ListFileSharesOutput) { op := &request.Operation{ Name: opListFileShares, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"Marker"}, OutputTokens: []string{"NextMarker"}, LimitToken: "Limit", TruncationToken: "", }, } if input == nil { input = &ListFileSharesInput{} } output = &ListFileSharesOutput{} req = c.newRequest(op, input, output) return } // ListFileShares API operation for AWS Storage Gateway. // // Gets a list of the file shares for a specific file gateway, or the list of // file shares that belong to the calling user account. This operation is only // supported for file gateways. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation ListFileShares for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListFileShares func (c *StorageGateway) ListFileShares(input *ListFileSharesInput) (*ListFileSharesOutput, error) { req, out := c.ListFileSharesRequest(input) return out, req.Send() } // ListFileSharesWithContext is the same as ListFileShares with the addition of // the ability to pass a context and additional request options. // // See ListFileShares for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) ListFileSharesWithContext(ctx aws.Context, input *ListFileSharesInput, opts ...request.Option) (*ListFileSharesOutput, error) { req, out := c.ListFileSharesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // ListFileSharesPages iterates over the pages of a ListFileShares operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See ListFileShares method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a ListFileShares operation. // pageNum := 0 // err := client.ListFileSharesPages(params, // func(page *storagegateway.ListFileSharesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *StorageGateway) ListFileSharesPages(input *ListFileSharesInput, fn func(*ListFileSharesOutput, bool) bool) error { return c.ListFileSharesPagesWithContext(aws.BackgroundContext(), input, fn) } // ListFileSharesPagesWithContext same as ListFileSharesPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) ListFileSharesPagesWithContext(ctx aws.Context, input *ListFileSharesInput, fn func(*ListFileSharesOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *ListFileSharesInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.ListFileSharesRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } for p.Next() { if !fn(p.Page().(*ListFileSharesOutput), !p.HasNextPage()) { break } } return p.Err() } const opListGateways = "ListGateways" // ListGatewaysRequest generates a "aws/request.Request" representing the // client's request for the ListGateways operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See ListGateways for more information on using the ListGateways // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the ListGatewaysRequest method. // req, resp := client.ListGatewaysRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListGateways func (c *StorageGateway) ListGatewaysRequest(input *ListGatewaysInput) (req *request.Request, output *ListGatewaysOutput) { op := &request.Operation{ Name: opListGateways, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"Marker"}, OutputTokens: []string{"Marker"}, LimitToken: "Limit", TruncationToken: "", }, } if input == nil { input = &ListGatewaysInput{} } output = &ListGatewaysOutput{} req = c.newRequest(op, input, output) return } // ListGateways API operation for AWS Storage Gateway. // // Lists gateways owned by an AWS account in an AWS Region specified in the // request. The returned list is ordered by gateway Amazon Resource Name (ARN). // // By default, the operation returns a maximum of 100 gateways. This operation // supports pagination that allows you to optionally reduce the number of gateways // returned in a response. // // If you have more gateways than are returned in a response (that is, the response // returns only a truncated list of your gateways), the response contains a // marker that you can specify in your next request to fetch the next page of // gateways. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation ListGateways for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListGateways func (c *StorageGateway) ListGateways(input *ListGatewaysInput) (*ListGatewaysOutput, error) { req, out := c.ListGatewaysRequest(input) return out, req.Send() } // ListGatewaysWithContext is the same as ListGateways with the addition of // the ability to pass a context and additional request options. // // See ListGateways for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) ListGatewaysWithContext(ctx aws.Context, input *ListGatewaysInput, opts ...request.Option) (*ListGatewaysOutput, error) { req, out := c.ListGatewaysRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // ListGatewaysPages iterates over the pages of a ListGateways operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See ListGateways method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a ListGateways operation. // pageNum := 0 // err := client.ListGatewaysPages(params, // func(page *storagegateway.ListGatewaysOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *StorageGateway) ListGatewaysPages(input *ListGatewaysInput, fn func(*ListGatewaysOutput, bool) bool) error { return c.ListGatewaysPagesWithContext(aws.BackgroundContext(), input, fn) } // ListGatewaysPagesWithContext same as ListGatewaysPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) ListGatewaysPagesWithContext(ctx aws.Context, input *ListGatewaysInput, fn func(*ListGatewaysOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *ListGatewaysInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.ListGatewaysRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } for p.Next() { if !fn(p.Page().(*ListGatewaysOutput), !p.HasNextPage()) { break } } return p.Err() } const opListLocalDisks = "ListLocalDisks" // ListLocalDisksRequest generates a "aws/request.Request" representing the // client's request for the ListLocalDisks operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See ListLocalDisks for more information on using the ListLocalDisks // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the ListLocalDisksRequest method. // req, resp := client.ListLocalDisksRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListLocalDisks func (c *StorageGateway) ListLocalDisksRequest(input *ListLocalDisksInput) (req *request.Request, output *ListLocalDisksOutput) { op := &request.Operation{ Name: opListLocalDisks, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &ListLocalDisksInput{} } output = &ListLocalDisksOutput{} req = c.newRequest(op, input, output) return } // ListLocalDisks API operation for AWS Storage Gateway. // // Returns a list of the gateway's local disks. To specify which gateway to // describe, you use the Amazon Resource Name (ARN) of the gateway in the body // of the request. // // The request returns a list of all disks, specifying which are configured // as working storage, cache storage, or stored volume or not configured at // all. The response includes a DiskStatus field. This field can have a value // of present (the disk is available to use), missing (the disk is no longer // connected to the gateway), or mismatch (the disk node is occupied by a disk // that has incorrect metadata or the disk content is corrupted). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation ListLocalDisks for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListLocalDisks func (c *StorageGateway) ListLocalDisks(input *ListLocalDisksInput) (*ListLocalDisksOutput, error) { req, out := c.ListLocalDisksRequest(input) return out, req.Send() } // ListLocalDisksWithContext is the same as ListLocalDisks with the addition of // the ability to pass a context and additional request options. // // See ListLocalDisks for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) ListLocalDisksWithContext(ctx aws.Context, input *ListLocalDisksInput, opts ...request.Option) (*ListLocalDisksOutput, error) { req, out := c.ListLocalDisksRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opListTagsForResource = "ListTagsForResource" // ListTagsForResourceRequest generates a "aws/request.Request" representing the // client's request for the ListTagsForResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See ListTagsForResource for more information on using the ListTagsForResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the ListTagsForResourceRequest method. // req, resp := client.ListTagsForResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListTagsForResource func (c *StorageGateway) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { op := &request.Operation{ Name: opListTagsForResource, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"Marker"}, OutputTokens: []string{"Marker"}, LimitToken: "Limit", TruncationToken: "", }, } if input == nil { input = &ListTagsForResourceInput{} } output = &ListTagsForResourceOutput{} req = c.newRequest(op, input, output) return } // ListTagsForResource API operation for AWS Storage Gateway. // // Lists the tags that have been added to the specified resource. This operation // is supported in storage gateways of all types. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation ListTagsForResource for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListTagsForResource func (c *StorageGateway) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { req, out := c.ListTagsForResourceRequest(input) return out, req.Send() } // ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of // the ability to pass a context and additional request options. // // See ListTagsForResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { req, out := c.ListTagsForResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // ListTagsForResourcePages iterates over the pages of a ListTagsForResource operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See ListTagsForResource method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a ListTagsForResource operation. // pageNum := 0 // err := client.ListTagsForResourcePages(params, // func(page *storagegateway.ListTagsForResourceOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *StorageGateway) ListTagsForResourcePages(input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool) error { return c.ListTagsForResourcePagesWithContext(aws.BackgroundContext(), input, fn) } // ListTagsForResourcePagesWithContext same as ListTagsForResourcePages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) ListTagsForResourcePagesWithContext(ctx aws.Context, input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *ListTagsForResourceInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.ListTagsForResourceRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } for p.Next() { if !fn(p.Page().(*ListTagsForResourceOutput), !p.HasNextPage()) { break } } return p.Err() } const opListTapes = "ListTapes" // ListTapesRequest generates a "aws/request.Request" representing the // client's request for the ListTapes operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See ListTapes for more information on using the ListTapes // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the ListTapesRequest method. // req, resp := client.ListTapesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListTapes func (c *StorageGateway) ListTapesRequest(input *ListTapesInput) (req *request.Request, output *ListTapesOutput) { op := &request.Operation{ Name: opListTapes, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"Marker"}, OutputTokens: []string{"Marker"}, LimitToken: "Limit", TruncationToken: "", }, } if input == nil { input = &ListTapesInput{} } output = &ListTapesOutput{} req = c.newRequest(op, input, output) return } // ListTapes API operation for AWS Storage Gateway. // // Lists virtual tapes in your virtual tape library (VTL) and your virtual tape // shelf (VTS). You specify the tapes to list by specifying one or more tape // Amazon Resource Names (ARNs). If you don't specify a tape ARN, the operation // lists all virtual tapes in both your VTL and VTS. // // This operation supports pagination. By default, the operation returns a maximum // of up to 100 tapes. You can optionally specify the Limit parameter in the // body to limit the number of tapes in the response. If the number of tapes // returned in the response is truncated, the response includes a Marker element // that you can use in your subsequent request to retrieve the next set of tapes. // This operation is only supported in the tape gateway type. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation ListTapes for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListTapes func (c *StorageGateway) ListTapes(input *ListTapesInput) (*ListTapesOutput, error) { req, out := c.ListTapesRequest(input) return out, req.Send() } // ListTapesWithContext is the same as ListTapes with the addition of // the ability to pass a context and additional request options. // // See ListTapes for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) ListTapesWithContext(ctx aws.Context, input *ListTapesInput, opts ...request.Option) (*ListTapesOutput, error) { req, out := c.ListTapesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // ListTapesPages iterates over the pages of a ListTapes operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See ListTapes method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a ListTapes operation. // pageNum := 0 // err := client.ListTapesPages(params, // func(page *storagegateway.ListTapesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *StorageGateway) ListTapesPages(input *ListTapesInput, fn func(*ListTapesOutput, bool) bool) error { return c.ListTapesPagesWithContext(aws.BackgroundContext(), input, fn) } // ListTapesPagesWithContext same as ListTapesPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) ListTapesPagesWithContext(ctx aws.Context, input *ListTapesInput, fn func(*ListTapesOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *ListTapesInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.ListTapesRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } for p.Next() { if !fn(p.Page().(*ListTapesOutput), !p.HasNextPage()) { break } } return p.Err() } const opListVolumeInitiators = "ListVolumeInitiators" // ListVolumeInitiatorsRequest generates a "aws/request.Request" representing the // client's request for the ListVolumeInitiators operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See ListVolumeInitiators for more information on using the ListVolumeInitiators // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the ListVolumeInitiatorsRequest method. // req, resp := client.ListVolumeInitiatorsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListVolumeInitiators func (c *StorageGateway) ListVolumeInitiatorsRequest(input *ListVolumeInitiatorsInput) (req *request.Request, output *ListVolumeInitiatorsOutput) { op := &request.Operation{ Name: opListVolumeInitiators, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &ListVolumeInitiatorsInput{} } output = &ListVolumeInitiatorsOutput{} req = c.newRequest(op, input, output) return } // ListVolumeInitiators API operation for AWS Storage Gateway. // // Lists iSCSI initiators that are connected to a volume. You can use this operation // to determine whether a volume is being used or not. This operation is only // supported in the cached volume and stored volume gateway types. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation ListVolumeInitiators for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListVolumeInitiators func (c *StorageGateway) ListVolumeInitiators(input *ListVolumeInitiatorsInput) (*ListVolumeInitiatorsOutput, error) { req, out := c.ListVolumeInitiatorsRequest(input) return out, req.Send() } // ListVolumeInitiatorsWithContext is the same as ListVolumeInitiators with the addition of // the ability to pass a context and additional request options. // // See ListVolumeInitiators for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) ListVolumeInitiatorsWithContext(ctx aws.Context, input *ListVolumeInitiatorsInput, opts ...request.Option) (*ListVolumeInitiatorsOutput, error) { req, out := c.ListVolumeInitiatorsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opListVolumeRecoveryPoints = "ListVolumeRecoveryPoints" // ListVolumeRecoveryPointsRequest generates a "aws/request.Request" representing the // client's request for the ListVolumeRecoveryPoints operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See ListVolumeRecoveryPoints for more information on using the ListVolumeRecoveryPoints // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the ListVolumeRecoveryPointsRequest method. // req, resp := client.ListVolumeRecoveryPointsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListVolumeRecoveryPoints func (c *StorageGateway) ListVolumeRecoveryPointsRequest(input *ListVolumeRecoveryPointsInput) (req *request.Request, output *ListVolumeRecoveryPointsOutput) { op := &request.Operation{ Name: opListVolumeRecoveryPoints, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &ListVolumeRecoveryPointsInput{} } output = &ListVolumeRecoveryPointsOutput{} req = c.newRequest(op, input, output) return } // ListVolumeRecoveryPoints API operation for AWS Storage Gateway. // // Lists the recovery points for a specified gateway. This operation is only // supported in the cached volume gateway type. // // Each cache volume has one recovery point. A volume recovery point is a point // in time at which all data of the volume is consistent and from which you // can create a snapshot or clone a new cached volume from a source volume. // To create a snapshot from a volume recovery point use the CreateSnapshotFromVolumeRecoveryPoint // operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation ListVolumeRecoveryPoints for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListVolumeRecoveryPoints func (c *StorageGateway) ListVolumeRecoveryPoints(input *ListVolumeRecoveryPointsInput) (*ListVolumeRecoveryPointsOutput, error) { req, out := c.ListVolumeRecoveryPointsRequest(input) return out, req.Send() } // ListVolumeRecoveryPointsWithContext is the same as ListVolumeRecoveryPoints with the addition of // the ability to pass a context and additional request options. // // See ListVolumeRecoveryPoints for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) ListVolumeRecoveryPointsWithContext(ctx aws.Context, input *ListVolumeRecoveryPointsInput, opts ...request.Option) (*ListVolumeRecoveryPointsOutput, error) { req, out := c.ListVolumeRecoveryPointsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opListVolumes = "ListVolumes" // ListVolumesRequest generates a "aws/request.Request" representing the // client's request for the ListVolumes operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See ListVolumes for more information on using the ListVolumes // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the ListVolumesRequest method. // req, resp := client.ListVolumesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListVolumes func (c *StorageGateway) ListVolumesRequest(input *ListVolumesInput) (req *request.Request, output *ListVolumesOutput) { op := &request.Operation{ Name: opListVolumes, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"Marker"}, OutputTokens: []string{"Marker"}, LimitToken: "Limit", TruncationToken: "", }, } if input == nil { input = &ListVolumesInput{} } output = &ListVolumesOutput{} req = c.newRequest(op, input, output) return } // ListVolumes API operation for AWS Storage Gateway. // // Lists the iSCSI stored volumes of a gateway. Results are sorted by volume // ARN. The response includes only the volume ARNs. If you want additional volume // information, use the DescribeStorediSCSIVolumes or the DescribeCachediSCSIVolumes // API. // // The operation supports pagination. By default, the operation returns a maximum // of up to 100 volumes. You can optionally specify the Limit field in the body // to limit the number of volumes in the response. If the number of volumes // returned in the response is truncated, the response includes a Marker field. // You can use this Marker value in your subsequent request to retrieve the // next set of volumes. This operation is only supported in the cached volume // and stored volume gateway types. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation ListVolumes for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListVolumes func (c *StorageGateway) ListVolumes(input *ListVolumesInput) (*ListVolumesOutput, error) { req, out := c.ListVolumesRequest(input) return out, req.Send() } // ListVolumesWithContext is the same as ListVolumes with the addition of // the ability to pass a context and additional request options. // // See ListVolumes for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) ListVolumesWithContext(ctx aws.Context, input *ListVolumesInput, opts ...request.Option) (*ListVolumesOutput, error) { req, out := c.ListVolumesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // ListVolumesPages iterates over the pages of a ListVolumes operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See ListVolumes method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a ListVolumes operation. // pageNum := 0 // err := client.ListVolumesPages(params, // func(page *storagegateway.ListVolumesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *StorageGateway) ListVolumesPages(input *ListVolumesInput, fn func(*ListVolumesOutput, bool) bool) error { return c.ListVolumesPagesWithContext(aws.BackgroundContext(), input, fn) } // ListVolumesPagesWithContext same as ListVolumesPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) ListVolumesPagesWithContext(ctx aws.Context, input *ListVolumesInput, fn func(*ListVolumesOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *ListVolumesInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.ListVolumesRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } for p.Next() { if !fn(p.Page().(*ListVolumesOutput), !p.HasNextPage()) { break } } return p.Err() } const opNotifyWhenUploaded = "NotifyWhenUploaded" // NotifyWhenUploadedRequest generates a "aws/request.Request" representing the // client's request for the NotifyWhenUploaded operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See NotifyWhenUploaded for more information on using the NotifyWhenUploaded // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the NotifyWhenUploadedRequest method. // req, resp := client.NotifyWhenUploadedRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/NotifyWhenUploaded func (c *StorageGateway) NotifyWhenUploadedRequest(input *NotifyWhenUploadedInput) (req *request.Request, output *NotifyWhenUploadedOutput) { op := &request.Operation{ Name: opNotifyWhenUploaded, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &NotifyWhenUploadedInput{} } output = &NotifyWhenUploadedOutput{} req = c.newRequest(op, input, output) return } // NotifyWhenUploaded API operation for AWS Storage Gateway. // // Sends you notification through CloudWatch Events when all files written to // your file share have been uploaded to Amazon S3. // // AWS Storage Gateway can send a notification through Amazon CloudWatch Events // when all files written to your file share up to that point in time have been // uploaded to Amazon S3. These files include files written to the file share // up to the time that you make a request for notification. When the upload // is done, Storage Gateway sends you notification through an Amazon CloudWatch // Event. You can configure CloudWatch Events to send the notification through // event targets such as Amazon SNS or AWS Lambda function. This operation is // only supported for file gateways. // // For more information, see Getting file upload notification (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-upload-notification) // in the AWS Storage Gateway User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation NotifyWhenUploaded for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/NotifyWhenUploaded func (c *StorageGateway) NotifyWhenUploaded(input *NotifyWhenUploadedInput) (*NotifyWhenUploadedOutput, error) { req, out := c.NotifyWhenUploadedRequest(input) return out, req.Send() } // NotifyWhenUploadedWithContext is the same as NotifyWhenUploaded with the addition of // the ability to pass a context and additional request options. // // See NotifyWhenUploaded for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) NotifyWhenUploadedWithContext(ctx aws.Context, input *NotifyWhenUploadedInput, opts ...request.Option) (*NotifyWhenUploadedOutput, error) { req, out := c.NotifyWhenUploadedRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opRefreshCache = "RefreshCache" // RefreshCacheRequest generates a "aws/request.Request" representing the // client's request for the RefreshCache operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See RefreshCache for more information on using the RefreshCache // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the RefreshCacheRequest method. // req, resp := client.RefreshCacheRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/RefreshCache func (c *StorageGateway) RefreshCacheRequest(input *RefreshCacheInput) (req *request.Request, output *RefreshCacheOutput) { op := &request.Operation{ Name: opRefreshCache, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &RefreshCacheInput{} } output = &RefreshCacheOutput{} req = c.newRequest(op, input, output) return } // RefreshCache API operation for AWS Storage Gateway. // // Refreshes the cache for the specified file share. This operation finds objects // in the Amazon S3 bucket that were added, removed or replaced since the gateway // last listed the bucket's contents and cached the results. This operation // is only supported in the file gateway type. You can subscribe to be notified // through an Amazon CloudWatch event when your RefreshCache operation completes. // For more information, see Getting notified about file operations (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification) // in the AWS Storage Gateway User Guide. // // When this API is called, it only initiates the refresh operation. When the // API call completes and returns a success code, it doesn't necessarily mean // that the file refresh has completed. You should use the refresh-complete // notification to determine that the operation has completed before you check // for new files on the gateway file share. You can subscribe to be notified // through an CloudWatch event when your RefreshCache operation completes. // // Throttle limit: This API is asynchronous so the gateway will accept no more // than two refreshes at any time. We recommend using the refresh-complete CloudWatch // event notification before issuing additional requests. For more information, // see Getting notified about file operations (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification) // in the AWS Storage Gateway User Guide. // // If you invoke the RefreshCache API when two requests are already being processed, // any new request will cause an InvalidGatewayRequestException error because // too many requests were sent to the server. // // For more information, see Getting notified about file operations (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification) // in the AWS Storage Gateway User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation RefreshCache for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/RefreshCache func (c *StorageGateway) RefreshCache(input *RefreshCacheInput) (*RefreshCacheOutput, error) { req, out := c.RefreshCacheRequest(input) return out, req.Send() } // RefreshCacheWithContext is the same as RefreshCache with the addition of // the ability to pass a context and additional request options. // // See RefreshCache for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) RefreshCacheWithContext(ctx aws.Context, input *RefreshCacheInput, opts ...request.Option) (*RefreshCacheOutput, error) { req, out := c.RefreshCacheRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opRemoveTagsFromResource = "RemoveTagsFromResource" // RemoveTagsFromResourceRequest generates a "aws/request.Request" representing the // client's request for the RemoveTagsFromResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See RemoveTagsFromResource for more information on using the RemoveTagsFromResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the RemoveTagsFromResourceRequest method. // req, resp := client.RemoveTagsFromResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/RemoveTagsFromResource func (c *StorageGateway) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *RemoveTagsFromResourceOutput) { op := &request.Operation{ Name: opRemoveTagsFromResource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &RemoveTagsFromResourceInput{} } output = &RemoveTagsFromResourceOutput{} req = c.newRequest(op, input, output) return } // RemoveTagsFromResource API operation for AWS Storage Gateway. // // Removes one or more tags from the specified resource. This operation is supported // in storage gateways of all types. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation RemoveTagsFromResource for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/RemoveTagsFromResource func (c *StorageGateway) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*RemoveTagsFromResourceOutput, error) { req, out := c.RemoveTagsFromResourceRequest(input) return out, req.Send() } // RemoveTagsFromResourceWithContext is the same as RemoveTagsFromResource with the addition of // the ability to pass a context and additional request options. // // See RemoveTagsFromResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) RemoveTagsFromResourceWithContext(ctx aws.Context, input *RemoveTagsFromResourceInput, opts ...request.Option) (*RemoveTagsFromResourceOutput, error) { req, out := c.RemoveTagsFromResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opResetCache = "ResetCache" // ResetCacheRequest generates a "aws/request.Request" representing the // client's request for the ResetCache operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See ResetCache for more information on using the ResetCache // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the ResetCacheRequest method. // req, resp := client.ResetCacheRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ResetCache func (c *StorageGateway) ResetCacheRequest(input *ResetCacheInput) (req *request.Request, output *ResetCacheOutput) { op := &request.Operation{ Name: opResetCache, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &ResetCacheInput{} } output = &ResetCacheOutput{} req = c.newRequest(op, input, output) return } // ResetCache API operation for AWS Storage Gateway. // // Resets all cache disks that have encountered an error and makes the disks // available for reconfiguration as cache storage. If your cache disk encounters // an error, the gateway prevents read and write operations on virtual tapes // in the gateway. For example, an error can occur when a disk is corrupted // or removed from the gateway. When a cache is reset, the gateway loses its // cache storage. At this point, you can reconfigure the disks as cache disks. // This operation is only supported in the cached volume and tape types. // // If the cache disk you are resetting contains data that has not been uploaded // to Amazon S3 yet, that data can be lost. After you reset cache disks, there // will be no configured cache disks left in the gateway, so you must configure // at least one new cache disk for your gateway to function properly. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation ResetCache for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ResetCache func (c *StorageGateway) ResetCache(input *ResetCacheInput) (*ResetCacheOutput, error) { req, out := c.ResetCacheRequest(input) return out, req.Send() } // ResetCacheWithContext is the same as ResetCache with the addition of // the ability to pass a context and additional request options. // // See ResetCache for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) ResetCacheWithContext(ctx aws.Context, input *ResetCacheInput, opts ...request.Option) (*ResetCacheOutput, error) { req, out := c.ResetCacheRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opRetrieveTapeArchive = "RetrieveTapeArchive" // RetrieveTapeArchiveRequest generates a "aws/request.Request" representing the // client's request for the RetrieveTapeArchive operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See RetrieveTapeArchive for more information on using the RetrieveTapeArchive // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the RetrieveTapeArchiveRequest method. // req, resp := client.RetrieveTapeArchiveRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/RetrieveTapeArchive func (c *StorageGateway) RetrieveTapeArchiveRequest(input *RetrieveTapeArchiveInput) (req *request.Request, output *RetrieveTapeArchiveOutput) { op := &request.Operation{ Name: opRetrieveTapeArchive, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &RetrieveTapeArchiveInput{} } output = &RetrieveTapeArchiveOutput{} req = c.newRequest(op, input, output) return } // RetrieveTapeArchive API operation for AWS Storage Gateway. // // Retrieves an archived virtual tape from the virtual tape shelf (VTS) to a // tape gateway. Virtual tapes archived in the VTS are not associated with any // gateway. However after a tape is retrieved, it is associated with a gateway, // even though it is also listed in the VTS, that is, archive. This operation // is only supported in the tape gateway type. // // Once a tape is successfully retrieved to a gateway, it cannot be retrieved // again to another gateway. You must archive the tape again before you can // retrieve it to another gateway. This operation is only supported in the tape // gateway type. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation RetrieveTapeArchive for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/RetrieveTapeArchive func (c *StorageGateway) RetrieveTapeArchive(input *RetrieveTapeArchiveInput) (*RetrieveTapeArchiveOutput, error) { req, out := c.RetrieveTapeArchiveRequest(input) return out, req.Send() } // RetrieveTapeArchiveWithContext is the same as RetrieveTapeArchive with the addition of // the ability to pass a context and additional request options. // // See RetrieveTapeArchive for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) RetrieveTapeArchiveWithContext(ctx aws.Context, input *RetrieveTapeArchiveInput, opts ...request.Option) (*RetrieveTapeArchiveOutput, error) { req, out := c.RetrieveTapeArchiveRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opRetrieveTapeRecoveryPoint = "RetrieveTapeRecoveryPoint" // RetrieveTapeRecoveryPointRequest generates a "aws/request.Request" representing the // client's request for the RetrieveTapeRecoveryPoint operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See RetrieveTapeRecoveryPoint for more information on using the RetrieveTapeRecoveryPoint // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the RetrieveTapeRecoveryPointRequest method. // req, resp := client.RetrieveTapeRecoveryPointRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/RetrieveTapeRecoveryPoint func (c *StorageGateway) RetrieveTapeRecoveryPointRequest(input *RetrieveTapeRecoveryPointInput) (req *request.Request, output *RetrieveTapeRecoveryPointOutput) { op := &request.Operation{ Name: opRetrieveTapeRecoveryPoint, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &RetrieveTapeRecoveryPointInput{} } output = &RetrieveTapeRecoveryPointOutput{} req = c.newRequest(op, input, output) return } // RetrieveTapeRecoveryPoint API operation for AWS Storage Gateway. // // Retrieves the recovery point for the specified virtual tape. This operation // is only supported in the tape gateway type. // // A recovery point is a point in time view of a virtual tape at which all the // data on the tape is consistent. If your gateway crashes, virtual tapes that // have recovery points can be recovered to a new gateway. // // The virtual tape can be retrieved to only one gateway. The retrieved tape // is read-only. The virtual tape can be retrieved to only a tape gateway. There // is no charge for retrieving recovery points. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation RetrieveTapeRecoveryPoint for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/RetrieveTapeRecoveryPoint func (c *StorageGateway) RetrieveTapeRecoveryPoint(input *RetrieveTapeRecoveryPointInput) (*RetrieveTapeRecoveryPointOutput, error) { req, out := c.RetrieveTapeRecoveryPointRequest(input) return out, req.Send() } // RetrieveTapeRecoveryPointWithContext is the same as RetrieveTapeRecoveryPoint with the addition of // the ability to pass a context and additional request options. // // See RetrieveTapeRecoveryPoint for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) RetrieveTapeRecoveryPointWithContext(ctx aws.Context, input *RetrieveTapeRecoveryPointInput, opts ...request.Option) (*RetrieveTapeRecoveryPointOutput, error) { req, out := c.RetrieveTapeRecoveryPointRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opSetLocalConsolePassword = "SetLocalConsolePassword" // SetLocalConsolePasswordRequest generates a "aws/request.Request" representing the // client's request for the SetLocalConsolePassword operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See SetLocalConsolePassword for more information on using the SetLocalConsolePassword // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the SetLocalConsolePasswordRequest method. // req, resp := client.SetLocalConsolePasswordRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/SetLocalConsolePassword func (c *StorageGateway) SetLocalConsolePasswordRequest(input *SetLocalConsolePasswordInput) (req *request.Request, output *SetLocalConsolePasswordOutput) { op := &request.Operation{ Name: opSetLocalConsolePassword, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &SetLocalConsolePasswordInput{} } output = &SetLocalConsolePasswordOutput{} req = c.newRequest(op, input, output) return } // SetLocalConsolePassword API operation for AWS Storage Gateway. // // Sets the password for your VM local console. When you log in to the local // console for the first time, you log in to the VM with the default credentials. // We recommend that you set a new password. You don't need to know the default // password to set a new password. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation SetLocalConsolePassword for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/SetLocalConsolePassword func (c *StorageGateway) SetLocalConsolePassword(input *SetLocalConsolePasswordInput) (*SetLocalConsolePasswordOutput, error) { req, out := c.SetLocalConsolePasswordRequest(input) return out, req.Send() } // SetLocalConsolePasswordWithContext is the same as SetLocalConsolePassword with the addition of // the ability to pass a context and additional request options. // // See SetLocalConsolePassword for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) SetLocalConsolePasswordWithContext(ctx aws.Context, input *SetLocalConsolePasswordInput, opts ...request.Option) (*SetLocalConsolePasswordOutput, error) { req, out := c.SetLocalConsolePasswordRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opSetSMBGuestPassword = "SetSMBGuestPassword" // SetSMBGuestPasswordRequest generates a "aws/request.Request" representing the // client's request for the SetSMBGuestPassword operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See SetSMBGuestPassword for more information on using the SetSMBGuestPassword // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the SetSMBGuestPasswordRequest method. // req, resp := client.SetSMBGuestPasswordRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/SetSMBGuestPassword func (c *StorageGateway) SetSMBGuestPasswordRequest(input *SetSMBGuestPasswordInput) (req *request.Request, output *SetSMBGuestPasswordOutput) { op := &request.Operation{ Name: opSetSMBGuestPassword, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &SetSMBGuestPasswordInput{} } output = &SetSMBGuestPasswordOutput{} req = c.newRequest(op, input, output) return } // SetSMBGuestPassword API operation for AWS Storage Gateway. // // Sets the password for the guest user smbguest. The smbguest user is the user // when the authentication method for the file share is set to GuestAccess. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation SetSMBGuestPassword for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/SetSMBGuestPassword func (c *StorageGateway) SetSMBGuestPassword(input *SetSMBGuestPasswordInput) (*SetSMBGuestPasswordOutput, error) { req, out := c.SetSMBGuestPasswordRequest(input) return out, req.Send() } // SetSMBGuestPasswordWithContext is the same as SetSMBGuestPassword with the addition of // the ability to pass a context and additional request options. // // See SetSMBGuestPassword for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) SetSMBGuestPasswordWithContext(ctx aws.Context, input *SetSMBGuestPasswordInput, opts ...request.Option) (*SetSMBGuestPasswordOutput, error) { req, out := c.SetSMBGuestPasswordRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opShutdownGateway = "ShutdownGateway" // ShutdownGatewayRequest generates a "aws/request.Request" representing the // client's request for the ShutdownGateway operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See ShutdownGateway for more information on using the ShutdownGateway // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the ShutdownGatewayRequest method. // req, resp := client.ShutdownGatewayRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ShutdownGateway func (c *StorageGateway) ShutdownGatewayRequest(input *ShutdownGatewayInput) (req *request.Request, output *ShutdownGatewayOutput) { op := &request.Operation{ Name: opShutdownGateway, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &ShutdownGatewayInput{} } output = &ShutdownGatewayOutput{} req = c.newRequest(op, input, output) return } // ShutdownGateway API operation for AWS Storage Gateway. // // Shuts down a gateway. To specify which gateway to shut down, use the Amazon // Resource Name (ARN) of the gateway in the body of your request. // // The operation shuts down the gateway service component running in the gateway's // virtual machine (VM) and not the host VM. // // If you want to shut down the VM, it is recommended that you first shut down // the gateway component in the VM to avoid unpredictable conditions. // // After the gateway is shutdown, you cannot call any other API except StartGateway, // DescribeGatewayInformation, and ListGateways. For more information, see ActivateGateway. // Your applications cannot read from or write to the gateway's storage volumes, // and there are no snapshots taken. // // When you make a shutdown request, you will get a 200 OK success response // immediately. However, it might take some time for the gateway to shut down. // You can call the DescribeGatewayInformation API to check the status. For // more information, see ActivateGateway. // // If do not intend to use the gateway again, you must delete the gateway (using // DeleteGateway) to no longer pay software charges associated with the gateway. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation ShutdownGateway for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ShutdownGateway func (c *StorageGateway) ShutdownGateway(input *ShutdownGatewayInput) (*ShutdownGatewayOutput, error) { req, out := c.ShutdownGatewayRequest(input) return out, req.Send() } // ShutdownGatewayWithContext is the same as ShutdownGateway with the addition of // the ability to pass a context and additional request options. // // See ShutdownGateway for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) ShutdownGatewayWithContext(ctx aws.Context, input *ShutdownGatewayInput, opts ...request.Option) (*ShutdownGatewayOutput, error) { req, out := c.ShutdownGatewayRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opStartAvailabilityMonitorTest = "StartAvailabilityMonitorTest" // StartAvailabilityMonitorTestRequest generates a "aws/request.Request" representing the // client's request for the StartAvailabilityMonitorTest operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See StartAvailabilityMonitorTest for more information on using the StartAvailabilityMonitorTest // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the StartAvailabilityMonitorTestRequest method. // req, resp := client.StartAvailabilityMonitorTestRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/StartAvailabilityMonitorTest func (c *StorageGateway) StartAvailabilityMonitorTestRequest(input *StartAvailabilityMonitorTestInput) (req *request.Request, output *StartAvailabilityMonitorTestOutput) { op := &request.Operation{ Name: opStartAvailabilityMonitorTest, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &StartAvailabilityMonitorTestInput{} } output = &StartAvailabilityMonitorTestOutput{} req = c.newRequest(op, input, output) return } // StartAvailabilityMonitorTest API operation for AWS Storage Gateway. // // Start a test that verifies that the specified gateway is configured for High // Availability monitoring in your host environment. This request only initiates // the test and that a successful response only indicates that the test was // started. It doesn't indicate that the test passed. For the status of the // test, invoke the DescribeAvailabilityMonitorTest API. // // Starting this test will cause your gateway to go offline for a brief period. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation StartAvailabilityMonitorTest for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/StartAvailabilityMonitorTest func (c *StorageGateway) StartAvailabilityMonitorTest(input *StartAvailabilityMonitorTestInput) (*StartAvailabilityMonitorTestOutput, error) { req, out := c.StartAvailabilityMonitorTestRequest(input) return out, req.Send() } // StartAvailabilityMonitorTestWithContext is the same as StartAvailabilityMonitorTest with the addition of // the ability to pass a context and additional request options. // // See StartAvailabilityMonitorTest for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) StartAvailabilityMonitorTestWithContext(ctx aws.Context, input *StartAvailabilityMonitorTestInput, opts ...request.Option) (*StartAvailabilityMonitorTestOutput, error) { req, out := c.StartAvailabilityMonitorTestRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opStartGateway = "StartGateway" // StartGatewayRequest generates a "aws/request.Request" representing the // client's request for the StartGateway operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See StartGateway for more information on using the StartGateway // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the StartGatewayRequest method. // req, resp := client.StartGatewayRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/StartGateway func (c *StorageGateway) StartGatewayRequest(input *StartGatewayInput) (req *request.Request, output *StartGatewayOutput) { op := &request.Operation{ Name: opStartGateway, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &StartGatewayInput{} } output = &StartGatewayOutput{} req = c.newRequest(op, input, output) return } // StartGateway API operation for AWS Storage Gateway. // // Starts a gateway that you previously shut down (see ShutdownGateway). After // the gateway starts, you can then make other API calls, your applications // can read from or write to the gateway's storage volumes and you will be able // to take snapshot backups. // // When you make a request, you will get a 200 OK success response immediately. // However, it might take some time for the gateway to be ready. You should // call DescribeGatewayInformation and check the status before making any additional // API calls. For more information, see ActivateGateway. // // To specify which gateway to start, use the Amazon Resource Name (ARN) of // the gateway in your request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation StartGateway for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/StartGateway func (c *StorageGateway) StartGateway(input *StartGatewayInput) (*StartGatewayOutput, error) { req, out := c.StartGatewayRequest(input) return out, req.Send() } // StartGatewayWithContext is the same as StartGateway with the addition of // the ability to pass a context and additional request options. // // See StartGateway for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) StartGatewayWithContext(ctx aws.Context, input *StartGatewayInput, opts ...request.Option) (*StartGatewayOutput, error) { req, out := c.StartGatewayRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opUpdateAutomaticTapeCreationPolicy = "UpdateAutomaticTapeCreationPolicy" // UpdateAutomaticTapeCreationPolicyRequest generates a "aws/request.Request" representing the // client's request for the UpdateAutomaticTapeCreationPolicy operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See UpdateAutomaticTapeCreationPolicy for more information on using the UpdateAutomaticTapeCreationPolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the UpdateAutomaticTapeCreationPolicyRequest method. // req, resp := client.UpdateAutomaticTapeCreationPolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateAutomaticTapeCreationPolicy func (c *StorageGateway) UpdateAutomaticTapeCreationPolicyRequest(input *UpdateAutomaticTapeCreationPolicyInput) (req *request.Request, output *UpdateAutomaticTapeCreationPolicyOutput) { op := &request.Operation{ Name: opUpdateAutomaticTapeCreationPolicy, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &UpdateAutomaticTapeCreationPolicyInput{} } output = &UpdateAutomaticTapeCreationPolicyOutput{} req = c.newRequest(op, input, output) return } // UpdateAutomaticTapeCreationPolicy API operation for AWS Storage Gateway. // // Updates the automatic tape creation policy of a gateway. Use this to update // the policy with a new set of automatic tape creation rules. This is only // supported for tape gateways. // // By default, there is no automatic tape creation policy. // // A gateway can have only one automatic tape creation policy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation UpdateAutomaticTapeCreationPolicy for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateAutomaticTapeCreationPolicy func (c *StorageGateway) UpdateAutomaticTapeCreationPolicy(input *UpdateAutomaticTapeCreationPolicyInput) (*UpdateAutomaticTapeCreationPolicyOutput, error) { req, out := c.UpdateAutomaticTapeCreationPolicyRequest(input) return out, req.Send() } // UpdateAutomaticTapeCreationPolicyWithContext is the same as UpdateAutomaticTapeCreationPolicy with the addition of // the ability to pass a context and additional request options. // // See UpdateAutomaticTapeCreationPolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) UpdateAutomaticTapeCreationPolicyWithContext(ctx aws.Context, input *UpdateAutomaticTapeCreationPolicyInput, opts ...request.Option) (*UpdateAutomaticTapeCreationPolicyOutput, error) { req, out := c.UpdateAutomaticTapeCreationPolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opUpdateBandwidthRateLimit = "UpdateBandwidthRateLimit" // UpdateBandwidthRateLimitRequest generates a "aws/request.Request" representing the // client's request for the UpdateBandwidthRateLimit operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See UpdateBandwidthRateLimit for more information on using the UpdateBandwidthRateLimit // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the UpdateBandwidthRateLimitRequest method. // req, resp := client.UpdateBandwidthRateLimitRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateBandwidthRateLimit func (c *StorageGateway) UpdateBandwidthRateLimitRequest(input *UpdateBandwidthRateLimitInput) (req *request.Request, output *UpdateBandwidthRateLimitOutput) { op := &request.Operation{ Name: opUpdateBandwidthRateLimit, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &UpdateBandwidthRateLimitInput{} } output = &UpdateBandwidthRateLimitOutput{} req = c.newRequest(op, input, output) return } // UpdateBandwidthRateLimit API operation for AWS Storage Gateway. // // Updates the bandwidth rate limits of a gateway. You can update both the upload // and download bandwidth rate limit or specify only one of the two. If you // don't set a bandwidth rate limit, the existing rate limit remains. This operation // is supported for the stored volume, cached volume, and tape gateway types. // // By default, a gateway's bandwidth rate limits are not set. If you don't set // any limit, the gateway does not have any limitations on its bandwidth usage // and could potentially use the maximum available bandwidth. // // To specify which gateway to update, use the Amazon Resource Name (ARN) of // the gateway in your request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation UpdateBandwidthRateLimit for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateBandwidthRateLimit func (c *StorageGateway) UpdateBandwidthRateLimit(input *UpdateBandwidthRateLimitInput) (*UpdateBandwidthRateLimitOutput, error) { req, out := c.UpdateBandwidthRateLimitRequest(input) return out, req.Send() } // UpdateBandwidthRateLimitWithContext is the same as UpdateBandwidthRateLimit with the addition of // the ability to pass a context and additional request options. // // See UpdateBandwidthRateLimit for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) UpdateBandwidthRateLimitWithContext(ctx aws.Context, input *UpdateBandwidthRateLimitInput, opts ...request.Option) (*UpdateBandwidthRateLimitOutput, error) { req, out := c.UpdateBandwidthRateLimitRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opUpdateChapCredentials = "UpdateChapCredentials" // UpdateChapCredentialsRequest generates a "aws/request.Request" representing the // client's request for the UpdateChapCredentials operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See UpdateChapCredentials for more information on using the UpdateChapCredentials // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the UpdateChapCredentialsRequest method. // req, resp := client.UpdateChapCredentialsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateChapCredentials func (c *StorageGateway) UpdateChapCredentialsRequest(input *UpdateChapCredentialsInput) (req *request.Request, output *UpdateChapCredentialsOutput) { op := &request.Operation{ Name: opUpdateChapCredentials, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &UpdateChapCredentialsInput{} } output = &UpdateChapCredentialsOutput{} req = c.newRequest(op, input, output) return } // UpdateChapCredentials API operation for AWS Storage Gateway. // // Updates the Challenge-Handshake Authentication Protocol (CHAP) credentials // for a specified iSCSI target. By default, a gateway does not have CHAP enabled; // however, for added security, you might use it. This operation is supported // in the volume and tape gateway types. // // When you update CHAP credentials, all existing connections on the target // are closed and initiators must reconnect with the new credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation UpdateChapCredentials for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateChapCredentials func (c *StorageGateway) UpdateChapCredentials(input *UpdateChapCredentialsInput) (*UpdateChapCredentialsOutput, error) { req, out := c.UpdateChapCredentialsRequest(input) return out, req.Send() } // UpdateChapCredentialsWithContext is the same as UpdateChapCredentials with the addition of // the ability to pass a context and additional request options. // // See UpdateChapCredentials for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) UpdateChapCredentialsWithContext(ctx aws.Context, input *UpdateChapCredentialsInput, opts ...request.Option) (*UpdateChapCredentialsOutput, error) { req, out := c.UpdateChapCredentialsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opUpdateGatewayInformation = "UpdateGatewayInformation" // UpdateGatewayInformationRequest generates a "aws/request.Request" representing the // client's request for the UpdateGatewayInformation operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See UpdateGatewayInformation for more information on using the UpdateGatewayInformation // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the UpdateGatewayInformationRequest method. // req, resp := client.UpdateGatewayInformationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateGatewayInformation func (c *StorageGateway) UpdateGatewayInformationRequest(input *UpdateGatewayInformationInput) (req *request.Request, output *UpdateGatewayInformationOutput) { op := &request.Operation{ Name: opUpdateGatewayInformation, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &UpdateGatewayInformationInput{} } output = &UpdateGatewayInformationOutput{} req = c.newRequest(op, input, output) return } // UpdateGatewayInformation API operation for AWS Storage Gateway. // // Updates a gateway's metadata, which includes the gateway's name and time // zone. To specify which gateway to update, use the Amazon Resource Name (ARN) // of the gateway in your request. // // For Gateways activated after September 2, 2015, the gateway's ARN contains // the gateway ID rather than the gateway name. However, changing the name of // the gateway has no effect on the gateway's ARN. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation UpdateGatewayInformation for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateGatewayInformation func (c *StorageGateway) UpdateGatewayInformation(input *UpdateGatewayInformationInput) (*UpdateGatewayInformationOutput, error) { req, out := c.UpdateGatewayInformationRequest(input) return out, req.Send() } // UpdateGatewayInformationWithContext is the same as UpdateGatewayInformation with the addition of // the ability to pass a context and additional request options. // // See UpdateGatewayInformation for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) UpdateGatewayInformationWithContext(ctx aws.Context, input *UpdateGatewayInformationInput, opts ...request.Option) (*UpdateGatewayInformationOutput, error) { req, out := c.UpdateGatewayInformationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opUpdateGatewaySoftwareNow = "UpdateGatewaySoftwareNow" // UpdateGatewaySoftwareNowRequest generates a "aws/request.Request" representing the // client's request for the UpdateGatewaySoftwareNow operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See UpdateGatewaySoftwareNow for more information on using the UpdateGatewaySoftwareNow // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the UpdateGatewaySoftwareNowRequest method. // req, resp := client.UpdateGatewaySoftwareNowRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateGatewaySoftwareNow func (c *StorageGateway) UpdateGatewaySoftwareNowRequest(input *UpdateGatewaySoftwareNowInput) (req *request.Request, output *UpdateGatewaySoftwareNowOutput) { op := &request.Operation{ Name: opUpdateGatewaySoftwareNow, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &UpdateGatewaySoftwareNowInput{} } output = &UpdateGatewaySoftwareNowOutput{} req = c.newRequest(op, input, output) return } // UpdateGatewaySoftwareNow API operation for AWS Storage Gateway. // // Updates the gateway virtual machine (VM) software. The request immediately // triggers the software update. // // When you make this request, you get a 200 OK success response immediately. // However, it might take some time for the update to complete. You can call // DescribeGatewayInformation to verify the gateway is in the STATE_RUNNING // state. // // A software update forces a system restart of your gateway. You can minimize // the chance of any disruption to your applications by increasing your iSCSI // Initiators' timeouts. For more information about increasing iSCSI Initiator // timeouts for Windows and Linux, see Customizing your Windows iSCSI settings // (https://docs.aws.amazon.com/storagegateway/latest/userguide/ConfiguringiSCSIClientInitiatorWindowsClient.html#CustomizeWindowsiSCSISettings) // and Customizing your Linux iSCSI settings (https://docs.aws.amazon.com/storagegateway/latest/userguide/ConfiguringiSCSIClientInitiatorRedHatClient.html#CustomizeLinuxiSCSISettings), // respectively. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation UpdateGatewaySoftwareNow for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateGatewaySoftwareNow func (c *StorageGateway) UpdateGatewaySoftwareNow(input *UpdateGatewaySoftwareNowInput) (*UpdateGatewaySoftwareNowOutput, error) { req, out := c.UpdateGatewaySoftwareNowRequest(input) return out, req.Send() } // UpdateGatewaySoftwareNowWithContext is the same as UpdateGatewaySoftwareNow with the addition of // the ability to pass a context and additional request options. // // See UpdateGatewaySoftwareNow for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) UpdateGatewaySoftwareNowWithContext(ctx aws.Context, input *UpdateGatewaySoftwareNowInput, opts ...request.Option) (*UpdateGatewaySoftwareNowOutput, error) { req, out := c.UpdateGatewaySoftwareNowRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opUpdateMaintenanceStartTime = "UpdateMaintenanceStartTime" // UpdateMaintenanceStartTimeRequest generates a "aws/request.Request" representing the // client's request for the UpdateMaintenanceStartTime operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See UpdateMaintenanceStartTime for more information on using the UpdateMaintenanceStartTime // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the UpdateMaintenanceStartTimeRequest method. // req, resp := client.UpdateMaintenanceStartTimeRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateMaintenanceStartTime func (c *StorageGateway) UpdateMaintenanceStartTimeRequest(input *UpdateMaintenanceStartTimeInput) (req *request.Request, output *UpdateMaintenanceStartTimeOutput) { op := &request.Operation{ Name: opUpdateMaintenanceStartTime, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &UpdateMaintenanceStartTimeInput{} } output = &UpdateMaintenanceStartTimeOutput{} req = c.newRequest(op, input, output) return } // UpdateMaintenanceStartTime API operation for AWS Storage Gateway. // // Updates a gateway's weekly maintenance start time information, including // day and time of the week. The maintenance time is the time in your gateway's // time zone. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation UpdateMaintenanceStartTime for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateMaintenanceStartTime func (c *StorageGateway) UpdateMaintenanceStartTime(input *UpdateMaintenanceStartTimeInput) (*UpdateMaintenanceStartTimeOutput, error) { req, out := c.UpdateMaintenanceStartTimeRequest(input) return out, req.Send() } // UpdateMaintenanceStartTimeWithContext is the same as UpdateMaintenanceStartTime with the addition of // the ability to pass a context and additional request options. // // See UpdateMaintenanceStartTime for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) UpdateMaintenanceStartTimeWithContext(ctx aws.Context, input *UpdateMaintenanceStartTimeInput, opts ...request.Option) (*UpdateMaintenanceStartTimeOutput, error) { req, out := c.UpdateMaintenanceStartTimeRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opUpdateNFSFileShare = "UpdateNFSFileShare" // UpdateNFSFileShareRequest generates a "aws/request.Request" representing the // client's request for the UpdateNFSFileShare operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See UpdateNFSFileShare for more information on using the UpdateNFSFileShare // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the UpdateNFSFileShareRequest method. // req, resp := client.UpdateNFSFileShareRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateNFSFileShare func (c *StorageGateway) UpdateNFSFileShareRequest(input *UpdateNFSFileShareInput) (req *request.Request, output *UpdateNFSFileShareOutput) { op := &request.Operation{ Name: opUpdateNFSFileShare, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &UpdateNFSFileShareInput{} } output = &UpdateNFSFileShareOutput{} req = c.newRequest(op, input, output) return } // UpdateNFSFileShare API operation for AWS Storage Gateway. // // Updates a Network File System (NFS) file share. This operation is only supported // in the file gateway type. // // To leave a file share field unchanged, set the corresponding input field // to null. // // Updates the following file share setting: // // * Default storage class for your S3 bucket // // * Metadata defaults for your S3 bucket // // * Allowed NFS clients for your file share // // * Squash settings // // * Write status of your file share // // To leave a file share field unchanged, set the corresponding input field // to null. This operation is only supported in file gateways. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation UpdateNFSFileShare for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateNFSFileShare func (c *StorageGateway) UpdateNFSFileShare(input *UpdateNFSFileShareInput) (*UpdateNFSFileShareOutput, error) { req, out := c.UpdateNFSFileShareRequest(input) return out, req.Send() } // UpdateNFSFileShareWithContext is the same as UpdateNFSFileShare with the addition of // the ability to pass a context and additional request options. // // See UpdateNFSFileShare for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) UpdateNFSFileShareWithContext(ctx aws.Context, input *UpdateNFSFileShareInput, opts ...request.Option) (*UpdateNFSFileShareOutput, error) { req, out := c.UpdateNFSFileShareRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opUpdateSMBFileShare = "UpdateSMBFileShare" // UpdateSMBFileShareRequest generates a "aws/request.Request" representing the // client's request for the UpdateSMBFileShare operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See UpdateSMBFileShare for more information on using the UpdateSMBFileShare // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the UpdateSMBFileShareRequest method. // req, resp := client.UpdateSMBFileShareRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateSMBFileShare func (c *StorageGateway) UpdateSMBFileShareRequest(input *UpdateSMBFileShareInput) (req *request.Request, output *UpdateSMBFileShareOutput) { op := &request.Operation{ Name: opUpdateSMBFileShare, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &UpdateSMBFileShareInput{} } output = &UpdateSMBFileShareOutput{} req = c.newRequest(op, input, output) return } // UpdateSMBFileShare API operation for AWS Storage Gateway. // // Updates a Server Message Block (SMB) file share. // // To leave a file share field unchanged, set the corresponding input field // to null. This operation is only supported for file gateways. // // File gateways require AWS Security Token Service (AWS STS) to be activated // to enable you to create a file share. Make sure that AWS STS is activated // in the AWS Region you are creating your file gateway in. If AWS STS is not // activated in this AWS Region, activate it. For information about how to activate // AWS STS, see Activating and deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the AWS Identity and Access Management User Guide. // // File gateways don't support creating hard or symbolic links on a file share. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation UpdateSMBFileShare for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateSMBFileShare func (c *StorageGateway) UpdateSMBFileShare(input *UpdateSMBFileShareInput) (*UpdateSMBFileShareOutput, error) { req, out := c.UpdateSMBFileShareRequest(input) return out, req.Send() } // UpdateSMBFileShareWithContext is the same as UpdateSMBFileShare with the addition of // the ability to pass a context and additional request options. // // See UpdateSMBFileShare for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) UpdateSMBFileShareWithContext(ctx aws.Context, input *UpdateSMBFileShareInput, opts ...request.Option) (*UpdateSMBFileShareOutput, error) { req, out := c.UpdateSMBFileShareRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opUpdateSMBSecurityStrategy = "UpdateSMBSecurityStrategy" // UpdateSMBSecurityStrategyRequest generates a "aws/request.Request" representing the // client's request for the UpdateSMBSecurityStrategy operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See UpdateSMBSecurityStrategy for more information on using the UpdateSMBSecurityStrategy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the UpdateSMBSecurityStrategyRequest method. // req, resp := client.UpdateSMBSecurityStrategyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateSMBSecurityStrategy func (c *StorageGateway) UpdateSMBSecurityStrategyRequest(input *UpdateSMBSecurityStrategyInput) (req *request.Request, output *UpdateSMBSecurityStrategyOutput) { op := &request.Operation{ Name: opUpdateSMBSecurityStrategy, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &UpdateSMBSecurityStrategyInput{} } output = &UpdateSMBSecurityStrategyOutput{} req = c.newRequest(op, input, output) return } // UpdateSMBSecurityStrategy API operation for AWS Storage Gateway. // // Updates the SMB security strategy on a file gateway. This action is only // supported in file gateways. // // This API is called Security level in the User Guide. // // A higher security level can affect performance of the gateway. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation UpdateSMBSecurityStrategy for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateSMBSecurityStrategy func (c *StorageGateway) UpdateSMBSecurityStrategy(input *UpdateSMBSecurityStrategyInput) (*UpdateSMBSecurityStrategyOutput, error) { req, out := c.UpdateSMBSecurityStrategyRequest(input) return out, req.Send() } // UpdateSMBSecurityStrategyWithContext is the same as UpdateSMBSecurityStrategy with the addition of // the ability to pass a context and additional request options. // // See UpdateSMBSecurityStrategy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) UpdateSMBSecurityStrategyWithContext(ctx aws.Context, input *UpdateSMBSecurityStrategyInput, opts ...request.Option) (*UpdateSMBSecurityStrategyOutput, error) { req, out := c.UpdateSMBSecurityStrategyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opUpdateSnapshotSchedule = "UpdateSnapshotSchedule" // UpdateSnapshotScheduleRequest generates a "aws/request.Request" representing the // client's request for the UpdateSnapshotSchedule operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See UpdateSnapshotSchedule for more information on using the UpdateSnapshotSchedule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the UpdateSnapshotScheduleRequest method. // req, resp := client.UpdateSnapshotScheduleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateSnapshotSchedule func (c *StorageGateway) UpdateSnapshotScheduleRequest(input *UpdateSnapshotScheduleInput) (req *request.Request, output *UpdateSnapshotScheduleOutput) { op := &request.Operation{ Name: opUpdateSnapshotSchedule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &UpdateSnapshotScheduleInput{} } output = &UpdateSnapshotScheduleOutput{} req = c.newRequest(op, input, output) return } // UpdateSnapshotSchedule API operation for AWS Storage Gateway. // // Updates a snapshot schedule configured for a gateway volume. This operation // is only supported in the cached volume and stored volume gateway types. // // The default snapshot schedule for volume is once every 24 hours, starting // at the creation time of the volume. You can use this API to change the snapshot // schedule configured for the volume. // // In the request you must identify the gateway volume whose snapshot schedule // you want to update, and the schedule information, including when you want // the snapshot to begin on a day and the frequency (in hours) of snapshots. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation UpdateSnapshotSchedule for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateSnapshotSchedule func (c *StorageGateway) UpdateSnapshotSchedule(input *UpdateSnapshotScheduleInput) (*UpdateSnapshotScheduleOutput, error) { req, out := c.UpdateSnapshotScheduleRequest(input) return out, req.Send() } // UpdateSnapshotScheduleWithContext is the same as UpdateSnapshotSchedule with the addition of // the ability to pass a context and additional request options. // // See UpdateSnapshotSchedule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) UpdateSnapshotScheduleWithContext(ctx aws.Context, input *UpdateSnapshotScheduleInput, opts ...request.Option) (*UpdateSnapshotScheduleOutput, error) { req, out := c.UpdateSnapshotScheduleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opUpdateVTLDeviceType = "UpdateVTLDeviceType" // UpdateVTLDeviceTypeRequest generates a "aws/request.Request" representing the // client's request for the UpdateVTLDeviceType operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See UpdateVTLDeviceType for more information on using the UpdateVTLDeviceType // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the UpdateVTLDeviceTypeRequest method. // req, resp := client.UpdateVTLDeviceTypeRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateVTLDeviceType func (c *StorageGateway) UpdateVTLDeviceTypeRequest(input *UpdateVTLDeviceTypeInput) (req *request.Request, output *UpdateVTLDeviceTypeOutput) { op := &request.Operation{ Name: opUpdateVTLDeviceType, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &UpdateVTLDeviceTypeInput{} } output = &UpdateVTLDeviceTypeOutput{} req = c.newRequest(op, input, output) return } // UpdateVTLDeviceType API operation for AWS Storage Gateway. // // Updates the type of medium changer in a tape gateway. When you activate a // tape gateway, you select a medium changer type for the tape gateway. This // operation enables you to select a different type of medium changer after // a tape gateway is activated. This operation is only supported in the tape // gateway type. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's // API operation UpdateVTLDeviceType for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. // // * InternalServerError // An internal server error has occurred during the request. For more information, // see the error and message fields. // // See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateVTLDeviceType func (c *StorageGateway) UpdateVTLDeviceType(input *UpdateVTLDeviceTypeInput) (*UpdateVTLDeviceTypeOutput, error) { req, out := c.UpdateVTLDeviceTypeRequest(input) return out, req.Send() } // UpdateVTLDeviceTypeWithContext is the same as UpdateVTLDeviceType with the addition of // the ability to pass a context and additional request options. // // See UpdateVTLDeviceType for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *StorageGateway) UpdateVTLDeviceTypeWithContext(ctx aws.Context, input *UpdateVTLDeviceTypeInput, opts ...request.Option) (*UpdateVTLDeviceTypeOutput, error) { req, out := c.UpdateVTLDeviceTypeRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // A JSON object containing one or more of the following fields: // // * ActivateGatewayInput$ActivationKey // // * ActivateGatewayInput$GatewayName // // * ActivateGatewayInput$GatewayRegion // // * ActivateGatewayInput$GatewayTimezone // // * ActivateGatewayInput$GatewayType // // * ActivateGatewayInput$MediumChangerType // // * ActivateGatewayInput$TapeDriveType type ActivateGatewayInput struct { _ struct{} `type:"structure"` // Your gateway activation key. You can obtain the activation key by sending // an HTTP GET request with redirects enabled to the gateway IP address (port // 80). The redirect URL returned in the response provides you the activation // key for your gateway in the query string parameter activationKey. It may // also include other activation-related parameters, however, these are merely // defaults -- the arguments you pass to the ActivateGateway API call determine // the actual configuration of your gateway. // // For more information, see Getting activation key (https://docs.aws.amazon.com/storagegateway/latest/userguide/get-activation-key.html) // in the AWS Storage Gateway User Guide. // // ActivationKey is a required field ActivationKey *string `min:"1" type:"string" required:"true"` // The name you configured for your gateway. // // GatewayName is a required field GatewayName *string `min:"2" type:"string" required:"true"` // A value that indicates the AWS Region where you want to store your data. // The gateway AWS Region specified must be the same AWS Region as the AWS Region // in your Host header in the request. For more information about available // AWS Regions and endpoints for AWS Storage Gateway, see AWS Storage Gateway // endpoints and quotas (https://docs.aws.amazon.com/general/latest/gr/sg.html) // in the AWS General Reference. // // Valid Values: See AWS Storage Gateway endpoints and quotas (https://docs.aws.amazon.com/general/latest/gr/sg.html) // in the AWS General Reference. // // GatewayRegion is a required field GatewayRegion *string `min:"1" type:"string" required:"true"` // A value that indicates the time zone you want to set for the gateway. The // time zone is of the format "GMT-hr:mm" or "GMT+hr:mm". For example, GMT-4:00 // indicates the time is 4 hours behind GMT. GMT+2:00 indicates the time is // 2 hours ahead of GMT. The time zone is used, for example, for scheduling // snapshots and your gateway's maintenance schedule. // // GatewayTimezone is a required field GatewayTimezone *string `min:"3" type:"string" required:"true"` // A value that defines the type of gateway to activate. The type specified // is critical to all later functions of the gateway and cannot be changed after // activation. The default value is CACHED. // // Valid Values: STORED | CACHED | VTL | FILE_S3 GatewayType *string `min:"2" type:"string"` // The value that indicates the type of medium changer to use for tape gateway. // This field is optional. // // Valid Values: STK-L700 | AWS-Gateway-VTL MediumChangerType *string `min:"2" type:"string"` // A list of up to 50 tags that you can assign to the gateway. Each tag is a // key-value pair. // // Valid characters for key and value are letters, spaces, and numbers that // can be represented in UTF-8 format, and the following special characters: // + - = . _ : / @. The maximum length of a tag's key is 128 characters, and // the maximum length for a tag's value is 256 characters. Tags []*Tag `type:"list"` // The value that indicates the type of tape drive to use for tape gateway. // This field is optional. // // Valid Values: IBM-ULT3580-TD5 TapeDriveType *string `min:"2" type:"string"` } // String returns the string representation func (s ActivateGatewayInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ActivateGatewayInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ActivateGatewayInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ActivateGatewayInput"} if s.ActivationKey == nil { invalidParams.Add(request.NewErrParamRequired("ActivationKey")) } if s.ActivationKey != nil && len(*s.ActivationKey) < 1 { invalidParams.Add(request.NewErrParamMinLen("ActivationKey", 1)) } if s.GatewayName == nil { invalidParams.Add(request.NewErrParamRequired("GatewayName")) } if s.GatewayName != nil && len(*s.GatewayName) < 2 { invalidParams.Add(request.NewErrParamMinLen("GatewayName", 2)) } if s.GatewayRegion == nil { invalidParams.Add(request.NewErrParamRequired("GatewayRegion")) } if s.GatewayRegion != nil && len(*s.GatewayRegion) < 1 { invalidParams.Add(request.NewErrParamMinLen("GatewayRegion", 1)) } if s.GatewayTimezone == nil { invalidParams.Add(request.NewErrParamRequired("GatewayTimezone")) } if s.GatewayTimezone != nil && len(*s.GatewayTimezone) < 3 { invalidParams.Add(request.NewErrParamMinLen("GatewayTimezone", 3)) } if s.GatewayType != nil && len(*s.GatewayType) < 2 { invalidParams.Add(request.NewErrParamMinLen("GatewayType", 2)) } if s.MediumChangerType != nil && len(*s.MediumChangerType) < 2 { invalidParams.Add(request.NewErrParamMinLen("MediumChangerType", 2)) } if s.TapeDriveType != nil && len(*s.TapeDriveType) < 2 { invalidParams.Add(request.NewErrParamMinLen("TapeDriveType", 2)) } if s.Tags != nil { for i, v := range s.Tags { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetActivationKey sets the ActivationKey field's value. func (s *ActivateGatewayInput) SetActivationKey(v string) *ActivateGatewayInput { s.ActivationKey = &v return s } // SetGatewayName sets the GatewayName field's value. func (s *ActivateGatewayInput) SetGatewayName(v string) *ActivateGatewayInput { s.GatewayName = &v return s } // SetGatewayRegion sets the GatewayRegion field's value. func (s *ActivateGatewayInput) SetGatewayRegion(v string) *ActivateGatewayInput { s.GatewayRegion = &v return s } // SetGatewayTimezone sets the GatewayTimezone field's value. func (s *ActivateGatewayInput) SetGatewayTimezone(v string) *ActivateGatewayInput { s.GatewayTimezone = &v return s } // SetGatewayType sets the GatewayType field's value. func (s *ActivateGatewayInput) SetGatewayType(v string) *ActivateGatewayInput { s.GatewayType = &v return s } // SetMediumChangerType sets the MediumChangerType field's value. func (s *ActivateGatewayInput) SetMediumChangerType(v string) *ActivateGatewayInput { s.MediumChangerType = &v return s } // SetTags sets the Tags field's value. func (s *ActivateGatewayInput) SetTags(v []*Tag) *ActivateGatewayInput { s.Tags = v return s } // SetTapeDriveType sets the TapeDriveType field's value. func (s *ActivateGatewayInput) SetTapeDriveType(v string) *ActivateGatewayInput { s.TapeDriveType = &v return s } // AWS Storage Gateway returns the Amazon Resource Name (ARN) of the activated // gateway. It is a string made of information such as your account, gateway // name, and AWS Region. This ARN is used to reference the gateway in other // API operations as well as resource-based authorization. // // For gateways activated prior to September 02, 2015, the gateway ARN contains // the gateway name rather than the gateway ID. Changing the name of the gateway // has no effect on the gateway ARN. type ActivateGatewayOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s ActivateGatewayOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ActivateGatewayOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *ActivateGatewayOutput) SetGatewayARN(v string) *ActivateGatewayOutput { s.GatewayARN = &v return s } type AddCacheInput struct { _ struct{} `type:"structure"` // An array of strings that identify disks that are to be configured as working // storage. Each string has a minimum length of 1 and maximum length of 300. // You can get the disk IDs from the ListLocalDisks API. // // DiskIds is a required field DiskIds []*string `type:"list" required:"true"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s AddCacheInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AddCacheInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AddCacheInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AddCacheInput"} if s.DiskIds == nil { invalidParams.Add(request.NewErrParamRequired("DiskIds")) } if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDiskIds sets the DiskIds field's value. func (s *AddCacheInput) SetDiskIds(v []*string) *AddCacheInput { s.DiskIds = v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *AddCacheInput) SetGatewayARN(v string) *AddCacheInput { s.GatewayARN = &v return s } type AddCacheOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s AddCacheOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AddCacheOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *AddCacheOutput) SetGatewayARN(v string) *AddCacheOutput { s.GatewayARN = &v return s } // AddTagsToResourceInput type AddTagsToResourceInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the resource you want to add tags to. // // ResourceARN is a required field ResourceARN *string `min:"50" type:"string" required:"true"` // The key-value pair that represents the tag you want to add to the resource. // The value can be an empty string. // // Valid characters for key and value are letters, spaces, and numbers representable // in UTF-8 format, and the following special characters: + - = . _ : / @. The // maximum length of a tag's key is 128 characters, and the maximum length for // a tag's value is 256. // // Tags is a required field Tags []*Tag `type:"list" required:"true"` } // String returns the string representation func (s AddTagsToResourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AddTagsToResourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AddTagsToResourceInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AddTagsToResourceInput"} if s.ResourceARN == nil { invalidParams.Add(request.NewErrParamRequired("ResourceARN")) } if s.ResourceARN != nil && len(*s.ResourceARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 50)) } if s.Tags == nil { invalidParams.Add(request.NewErrParamRequired("Tags")) } if s.Tags != nil { for i, v := range s.Tags { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetResourceARN sets the ResourceARN field's value. func (s *AddTagsToResourceInput) SetResourceARN(v string) *AddTagsToResourceInput { s.ResourceARN = &v return s } // SetTags sets the Tags field's value. func (s *AddTagsToResourceInput) SetTags(v []*Tag) *AddTagsToResourceInput { s.Tags = v return s } // AddTagsToResourceOutput type AddTagsToResourceOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the resource you want to add tags to. ResourceARN *string `min:"50" type:"string"` } // String returns the string representation func (s AddTagsToResourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AddTagsToResourceOutput) GoString() string { return s.String() } // SetResourceARN sets the ResourceARN field's value. func (s *AddTagsToResourceOutput) SetResourceARN(v string) *AddTagsToResourceOutput { s.ResourceARN = &v return s } type AddUploadBufferInput struct { _ struct{} `type:"structure"` // An array of strings that identify disks that are to be configured as working // storage. Each string has a minimum length of 1 and maximum length of 300. // You can get the disk IDs from the ListLocalDisks API. // // DiskIds is a required field DiskIds []*string `type:"list" required:"true"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s AddUploadBufferInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AddUploadBufferInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AddUploadBufferInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AddUploadBufferInput"} if s.DiskIds == nil { invalidParams.Add(request.NewErrParamRequired("DiskIds")) } if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDiskIds sets the DiskIds field's value. func (s *AddUploadBufferInput) SetDiskIds(v []*string) *AddUploadBufferInput { s.DiskIds = v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *AddUploadBufferInput) SetGatewayARN(v string) *AddUploadBufferInput { s.GatewayARN = &v return s } type AddUploadBufferOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s AddUploadBufferOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AddUploadBufferOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *AddUploadBufferOutput) SetGatewayARN(v string) *AddUploadBufferOutput { s.GatewayARN = &v return s } // A JSON object containing one or more of the following fields: // // * AddWorkingStorageInput$DiskIds type AddWorkingStorageInput struct { _ struct{} `type:"structure"` // An array of strings that identify disks that are to be configured as working // storage. Each string has a minimum length of 1 and maximum length of 300. // You can get the disk IDs from the ListLocalDisks API. // // DiskIds is a required field DiskIds []*string `type:"list" required:"true"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s AddWorkingStorageInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AddWorkingStorageInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AddWorkingStorageInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AddWorkingStorageInput"} if s.DiskIds == nil { invalidParams.Add(request.NewErrParamRequired("DiskIds")) } if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDiskIds sets the DiskIds field's value. func (s *AddWorkingStorageInput) SetDiskIds(v []*string) *AddWorkingStorageInput { s.DiskIds = v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *AddWorkingStorageInput) SetGatewayARN(v string) *AddWorkingStorageInput { s.GatewayARN = &v return s } // A JSON object containing the Amazon Resource Name (ARN) of the gateway for // which working storage was configured. type AddWorkingStorageOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s AddWorkingStorageOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AddWorkingStorageOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *AddWorkingStorageOutput) SetGatewayARN(v string) *AddWorkingStorageOutput { s.GatewayARN = &v return s } type AssignTapePoolInput struct { _ struct{} `type:"structure"` // The ID of the pool that you want to add your tape to for archiving. The tape // in this pool is archived in the S3 storage class that is associated with // the pool. When you use your backup application to eject the tape, the tape // is archived directly into the storage class (S3 Glacier or S3 Glacier Deep // Archive) that corresponds to the pool. // // Valid Values: GLACIER | DEEP_ARCHIVE // // PoolId is a required field PoolId *string `min:"1" type:"string" required:"true"` // The unique Amazon Resource Name (ARN) of the virtual tape that you want to // add to the tape pool. // // TapeARN is a required field TapeARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s AssignTapePoolInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AssignTapePoolInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AssignTapePoolInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AssignTapePoolInput"} if s.PoolId == nil { invalidParams.Add(request.NewErrParamRequired("PoolId")) } if s.PoolId != nil && len(*s.PoolId) < 1 { invalidParams.Add(request.NewErrParamMinLen("PoolId", 1)) } if s.TapeARN == nil { invalidParams.Add(request.NewErrParamRequired("TapeARN")) } if s.TapeARN != nil && len(*s.TapeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("TapeARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetPoolId sets the PoolId field's value. func (s *AssignTapePoolInput) SetPoolId(v string) *AssignTapePoolInput { s.PoolId = &v return s } // SetTapeARN sets the TapeARN field's value. func (s *AssignTapePoolInput) SetTapeARN(v string) *AssignTapePoolInput { s.TapeARN = &v return s } type AssignTapePoolOutput struct { _ struct{} `type:"structure"` // The unique Amazon Resource Names (ARN) of the virtual tape that was added // to the tape pool. TapeARN *string `min:"50" type:"string"` } // String returns the string representation func (s AssignTapePoolOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AssignTapePoolOutput) GoString() string { return s.String() } // SetTapeARN sets the TapeARN field's value. func (s *AssignTapePoolOutput) SetTapeARN(v string) *AssignTapePoolOutput { s.TapeARN = &v return s } // AttachVolumeInput type AttachVolumeInput struct { _ struct{} `type:"structure"` // The unique device ID or other distinguishing data that identifies the local // disk used to create the volume. This value is only required when you are // attaching a stored volume. DiskId *string `min:"1" type:"string"` // The Amazon Resource Name (ARN) of the gateway that you want to attach the // volume to. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // The network interface of the gateway on which to expose the iSCSI target. // Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a // list of the network interfaces available on a gateway. // // Valid Values: A valid IP address. // // NetworkInterfaceId is a required field NetworkInterfaceId *string `type:"string" required:"true"` // The name of the iSCSI target used by an initiator to connect to a volume // and used as a suffix for the target ARN. For example, specifying TargetName // as myvolume results in the target ARN of arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume. // The target name must be unique across all volumes on a gateway. // // If you don't specify a value, Storage Gateway uses the value that was previously // used for this volume as the new target name. TargetName *string `min:"1" type:"string"` // The Amazon Resource Name (ARN) of the volume to attach to the specified gateway. // // VolumeARN is a required field VolumeARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s AttachVolumeInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AttachVolumeInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AttachVolumeInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AttachVolumeInput"} if s.DiskId != nil && len(*s.DiskId) < 1 { invalidParams.Add(request.NewErrParamMinLen("DiskId", 1)) } if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.NetworkInterfaceId == nil { invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) } if s.TargetName != nil && len(*s.TargetName) < 1 { invalidParams.Add(request.NewErrParamMinLen("TargetName", 1)) } if s.VolumeARN == nil { invalidParams.Add(request.NewErrParamRequired("VolumeARN")) } if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDiskId sets the DiskId field's value. func (s *AttachVolumeInput) SetDiskId(v string) *AttachVolumeInput { s.DiskId = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *AttachVolumeInput) SetGatewayARN(v string) *AttachVolumeInput { s.GatewayARN = &v return s } // SetNetworkInterfaceId sets the NetworkInterfaceId field's value. func (s *AttachVolumeInput) SetNetworkInterfaceId(v string) *AttachVolumeInput { s.NetworkInterfaceId = &v return s } // SetTargetName sets the TargetName field's value. func (s *AttachVolumeInput) SetTargetName(v string) *AttachVolumeInput { s.TargetName = &v return s } // SetVolumeARN sets the VolumeARN field's value. func (s *AttachVolumeInput) SetVolumeARN(v string) *AttachVolumeInput { s.VolumeARN = &v return s } // AttachVolumeOutput type AttachVolumeOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the volume target, which includes the iSCSI // name for the initiator that was used to connect to the target. TargetARN *string `min:"50" type:"string"` // The Amazon Resource Name (ARN) of the volume that was attached to the gateway. VolumeARN *string `min:"50" type:"string"` } // String returns the string representation func (s AttachVolumeOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AttachVolumeOutput) GoString() string { return s.String() } // SetTargetARN sets the TargetARN field's value. func (s *AttachVolumeOutput) SetTargetARN(v string) *AttachVolumeOutput { s.TargetARN = &v return s } // SetVolumeARN sets the VolumeARN field's value. func (s *AttachVolumeOutput) SetVolumeARN(v string) *AttachVolumeOutput { s.VolumeARN = &v return s } // Information about the gateway's automatic tape creation policies, including // the automatic tape creation rules and the gateway that is using the policies. type AutomaticTapeCreationPolicyInfo struct { _ struct{} `type:"structure"` // An automatic tape creation policy consists of a list of automatic tape creation // rules. This returns the rules that determine when and how to automatically // create new tapes. AutomaticTapeCreationRules []*AutomaticTapeCreationRule `min:"1" type:"list"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s AutomaticTapeCreationPolicyInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AutomaticTapeCreationPolicyInfo) GoString() string { return s.String() } // SetAutomaticTapeCreationRules sets the AutomaticTapeCreationRules field's value. func (s *AutomaticTapeCreationPolicyInfo) SetAutomaticTapeCreationRules(v []*AutomaticTapeCreationRule) *AutomaticTapeCreationPolicyInfo { s.AutomaticTapeCreationRules = v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *AutomaticTapeCreationPolicyInfo) SetGatewayARN(v string) *AutomaticTapeCreationPolicyInfo { s.GatewayARN = &v return s } // An automatic tape creation policy consists of automatic tape creation rules // where each rule defines when and how to create new tapes. type AutomaticTapeCreationRule struct { _ struct{} `type:"structure"` // The minimum number of available virtual tapes that the gateway maintains // at all times. If the number of tapes on the gateway goes below this value, // the gateway creates as many new tapes as are needed to have MinimumNumTapes // on the gateway. // // MinimumNumTapes is a required field MinimumNumTapes *int64 `min:"1" type:"integer" required:"true"` // The ID of the pool that you want to add your tape to for archiving. The tape // in this pool is archived in the Amazon S3 storage class that is associated // with the pool. When you use your backup application to eject the tape, the // tape is archived directly into the storage class (S3 Glacier or S3 Glacier // Deep Archive) that corresponds to the pool. // // Valid Values: GLACIER | DEEP_ARCHIVE // // PoolId is a required field PoolId *string `min:"1" type:"string" required:"true"` // A prefix that you append to the barcode of the virtual tape that you are // creating. This prefix makes the barcode unique. // // The prefix must be 1-4 characters in length and must be one of the uppercase // letters from A to Z. // // TapeBarcodePrefix is a required field TapeBarcodePrefix *string `min:"1" type:"string" required:"true"` // The size, in bytes, of the virtual tape capacity. // // TapeSizeInBytes is a required field TapeSizeInBytes *int64 `type:"long" required:"true"` } // String returns the string representation func (s AutomaticTapeCreationRule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AutomaticTapeCreationRule) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *AutomaticTapeCreationRule) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AutomaticTapeCreationRule"} if s.MinimumNumTapes == nil { invalidParams.Add(request.NewErrParamRequired("MinimumNumTapes")) } if s.MinimumNumTapes != nil && *s.MinimumNumTapes < 1 { invalidParams.Add(request.NewErrParamMinValue("MinimumNumTapes", 1)) } if s.PoolId == nil { invalidParams.Add(request.NewErrParamRequired("PoolId")) } if s.PoolId != nil && len(*s.PoolId) < 1 { invalidParams.Add(request.NewErrParamMinLen("PoolId", 1)) } if s.TapeBarcodePrefix == nil { invalidParams.Add(request.NewErrParamRequired("TapeBarcodePrefix")) } if s.TapeBarcodePrefix != nil && len(*s.TapeBarcodePrefix) < 1 { invalidParams.Add(request.NewErrParamMinLen("TapeBarcodePrefix", 1)) } if s.TapeSizeInBytes == nil { invalidParams.Add(request.NewErrParamRequired("TapeSizeInBytes")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetMinimumNumTapes sets the MinimumNumTapes field's value. func (s *AutomaticTapeCreationRule) SetMinimumNumTapes(v int64) *AutomaticTapeCreationRule { s.MinimumNumTapes = &v return s } // SetPoolId sets the PoolId field's value. func (s *AutomaticTapeCreationRule) SetPoolId(v string) *AutomaticTapeCreationRule { s.PoolId = &v return s } // SetTapeBarcodePrefix sets the TapeBarcodePrefix field's value. func (s *AutomaticTapeCreationRule) SetTapeBarcodePrefix(v string) *AutomaticTapeCreationRule { s.TapeBarcodePrefix = &v return s } // SetTapeSizeInBytes sets the TapeSizeInBytes field's value. func (s *AutomaticTapeCreationRule) SetTapeSizeInBytes(v int64) *AutomaticTapeCreationRule { s.TapeSizeInBytes = &v return s } // Describes an iSCSI cached volume. type CachediSCSIVolume struct { _ struct{} `type:"structure"` // The date the volume was created. Volumes created prior to March 28, 2017 // don’t have this time stamp. CreatedDate *time.Time `type:"timestamp"` // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // If the cached volume was created from a snapshot, this field contains the // snapshot ID used, e.g. snap-78e22663. Otherwise, this field is not included. SourceSnapshotId *string `type:"string"` // The name of the iSCSI target used by an initiator to connect to a volume // and used as a suffix for the target ARN. For example, specifying TargetName // as myvolume results in the target ARN of arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume. // The target name must be unique across all volumes on a gateway. // // If you don't specify a value, Storage Gateway uses the value that was previously // used for this volume as the new target name. TargetName *string `min:"1" type:"string"` // The Amazon Resource Name (ARN) of the storage volume. VolumeARN *string `min:"50" type:"string"` // A value that indicates whether a storage volume is attached to or detached // from a gateway. For more information, see Moving your volumes to a different // gateway (https://docs.aws.amazon.com/storagegateway/latest/userguide/managing-volumes.html#attach-detach-volume). VolumeAttachmentStatus *string `min:"3" type:"string"` // The unique identifier of the volume, e.g. vol-AE4B946D. VolumeId *string `min:"12" type:"string"` // Represents the percentage complete if the volume is restoring or bootstrapping // that represents the percent of data transferred. This field does not appear // in the response if the cached volume is not restoring or bootstrapping. VolumeProgress *float64 `type:"double"` // The size, in bytes, of the volume capacity. VolumeSizeInBytes *int64 `type:"long"` // One of the VolumeStatus values that indicates the state of the storage volume. VolumeStatus *string `min:"3" type:"string"` // One of the VolumeType enumeration values that describes the type of the volume. VolumeType *string `min:"3" type:"string"` // The size of the data stored on the volume in bytes. This value is calculated // based on the number of blocks that are touched, instead of the actual amount // of data written. This value can be useful for sequential write patterns but // less accurate for random write patterns. VolumeUsedInBytes is different from // the compressed size of the volume, which is the value that is used to calculate // your bill. // // This value is not available for volumes created prior to May 13, 2015, until // you store data on the volume. VolumeUsedInBytes *int64 `type:"long"` // An VolumeiSCSIAttributes object that represents a collection of iSCSI attributes // for one stored volume. VolumeiSCSIAttributes *VolumeiSCSIAttributes `type:"structure"` } // String returns the string representation func (s CachediSCSIVolume) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CachediSCSIVolume) GoString() string { return s.String() } // SetCreatedDate sets the CreatedDate field's value. func (s *CachediSCSIVolume) SetCreatedDate(v time.Time) *CachediSCSIVolume { s.CreatedDate = &v return s } // SetKMSKey sets the KMSKey field's value. func (s *CachediSCSIVolume) SetKMSKey(v string) *CachediSCSIVolume { s.KMSKey = &v return s } // SetSourceSnapshotId sets the SourceSnapshotId field's value. func (s *CachediSCSIVolume) SetSourceSnapshotId(v string) *CachediSCSIVolume { s.SourceSnapshotId = &v return s } // SetTargetName sets the TargetName field's value. func (s *CachediSCSIVolume) SetTargetName(v string) *CachediSCSIVolume { s.TargetName = &v return s } // SetVolumeARN sets the VolumeARN field's value. func (s *CachediSCSIVolume) SetVolumeARN(v string) *CachediSCSIVolume { s.VolumeARN = &v return s } // SetVolumeAttachmentStatus sets the VolumeAttachmentStatus field's value. func (s *CachediSCSIVolume) SetVolumeAttachmentStatus(v string) *CachediSCSIVolume { s.VolumeAttachmentStatus = &v return s } // SetVolumeId sets the VolumeId field's value. func (s *CachediSCSIVolume) SetVolumeId(v string) *CachediSCSIVolume { s.VolumeId = &v return s } // SetVolumeProgress sets the VolumeProgress field's value. func (s *CachediSCSIVolume) SetVolumeProgress(v float64) *CachediSCSIVolume { s.VolumeProgress = &v return s } // SetVolumeSizeInBytes sets the VolumeSizeInBytes field's value. func (s *CachediSCSIVolume) SetVolumeSizeInBytes(v int64) *CachediSCSIVolume { s.VolumeSizeInBytes = &v return s } // SetVolumeStatus sets the VolumeStatus field's value. func (s *CachediSCSIVolume) SetVolumeStatus(v string) *CachediSCSIVolume { s.VolumeStatus = &v return s } // SetVolumeType sets the VolumeType field's value. func (s *CachediSCSIVolume) SetVolumeType(v string) *CachediSCSIVolume { s.VolumeType = &v return s } // SetVolumeUsedInBytes sets the VolumeUsedInBytes field's value. func (s *CachediSCSIVolume) SetVolumeUsedInBytes(v int64) *CachediSCSIVolume { s.VolumeUsedInBytes = &v return s } // SetVolumeiSCSIAttributes sets the VolumeiSCSIAttributes field's value. func (s *CachediSCSIVolume) SetVolumeiSCSIAttributes(v *VolumeiSCSIAttributes) *CachediSCSIVolume { s.VolumeiSCSIAttributes = v return s } // CancelArchivalInput type CancelArchivalInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // The Amazon Resource Name (ARN) of the virtual tape you want to cancel archiving // for. // // TapeARN is a required field TapeARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s CancelArchivalInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CancelArchivalInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CancelArchivalInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CancelArchivalInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.TapeARN == nil { invalidParams.Add(request.NewErrParamRequired("TapeARN")) } if s.TapeARN != nil && len(*s.TapeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("TapeARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *CancelArchivalInput) SetGatewayARN(v string) *CancelArchivalInput { s.GatewayARN = &v return s } // SetTapeARN sets the TapeARN field's value. func (s *CancelArchivalInput) SetTapeARN(v string) *CancelArchivalInput { s.TapeARN = &v return s } // CancelArchivalOutput type CancelArchivalOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the virtual tape for which archiving was // canceled. TapeARN *string `min:"50" type:"string"` } // String returns the string representation func (s CancelArchivalOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CancelArchivalOutput) GoString() string { return s.String() } // SetTapeARN sets the TapeARN field's value. func (s *CancelArchivalOutput) SetTapeARN(v string) *CancelArchivalOutput { s.TapeARN = &v return s } // CancelRetrievalInput type CancelRetrievalInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // The Amazon Resource Name (ARN) of the virtual tape you want to cancel retrieval // for. // // TapeARN is a required field TapeARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s CancelRetrievalInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CancelRetrievalInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CancelRetrievalInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CancelRetrievalInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.TapeARN == nil { invalidParams.Add(request.NewErrParamRequired("TapeARN")) } if s.TapeARN != nil && len(*s.TapeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("TapeARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *CancelRetrievalInput) SetGatewayARN(v string) *CancelRetrievalInput { s.GatewayARN = &v return s } // SetTapeARN sets the TapeARN field's value. func (s *CancelRetrievalInput) SetTapeARN(v string) *CancelRetrievalInput { s.TapeARN = &v return s } // CancelRetrievalOutput type CancelRetrievalOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the virtual tape for which retrieval was // canceled. TapeARN *string `min:"50" type:"string"` } // String returns the string representation func (s CancelRetrievalOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CancelRetrievalOutput) GoString() string { return s.String() } // SetTapeARN sets the TapeARN field's value. func (s *CancelRetrievalOutput) SetTapeARN(v string) *CancelRetrievalOutput { s.TapeARN = &v return s } // Describes Challenge-Handshake Authentication Protocol (CHAP) information // that supports authentication between your gateway and iSCSI initiators. type ChapInfo struct { _ struct{} `type:"structure"` // The iSCSI initiator that connects to the target. InitiatorName *string `min:"1" type:"string"` // The secret key that the initiator (for example, the Windows client) must // provide to participate in mutual CHAP with the target. SecretToAuthenticateInitiator *string `min:"1" type:"string" sensitive:"true"` // The secret key that the target must provide to participate in mutual CHAP // with the initiator (e.g. Windows client). SecretToAuthenticateTarget *string `min:"1" type:"string" sensitive:"true"` // The Amazon Resource Name (ARN) of the volume. // // Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens // (-). TargetARN *string `min:"50" type:"string"` } // String returns the string representation func (s ChapInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ChapInfo) GoString() string { return s.String() } // SetInitiatorName sets the InitiatorName field's value. func (s *ChapInfo) SetInitiatorName(v string) *ChapInfo { s.InitiatorName = &v return s } // SetSecretToAuthenticateInitiator sets the SecretToAuthenticateInitiator field's value. func (s *ChapInfo) SetSecretToAuthenticateInitiator(v string) *ChapInfo { s.SecretToAuthenticateInitiator = &v return s } // SetSecretToAuthenticateTarget sets the SecretToAuthenticateTarget field's value. func (s *ChapInfo) SetSecretToAuthenticateTarget(v string) *ChapInfo { s.SecretToAuthenticateTarget = &v return s } // SetTargetARN sets the TargetARN field's value. func (s *ChapInfo) SetTargetARN(v string) *ChapInfo { s.TargetARN = &v return s } type CreateCachediSCSIVolumeInput struct { _ struct{} `type:"structure"` // A unique identifier that you use to retry a request. If you retry a request, // use the same ClientToken you specified in the initial request. // // ClientToken is a required field ClientToken *string `min:"5" type:"string" required:"true"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // Set to true to use Amazon S3 server-side encryption with your own AWS KMS // key, or false to use a key managed by Amazon S3. Optional. // // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The network interface of the gateway on which to expose the iSCSI target. // Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a // list of the network interfaces available on a gateway. // // Valid Values: A valid IP address. // // NetworkInterfaceId is a required field NetworkInterfaceId *string `type:"string" required:"true"` // The snapshot ID (e.g. "snap-1122aabb") of the snapshot to restore as the // new cached volume. Specify this field if you want to create the iSCSI storage // volume from a snapshot; otherwise, do not include this field. To list snapshots // for your account use DescribeSnapshots (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSnapshots.html) // in the Amazon Elastic Compute Cloud API Reference. SnapshotId *string `type:"string"` // The ARN for an existing volume. Specifying this ARN makes the new volume // into an exact copy of the specified existing volume's latest recovery point. // The VolumeSizeInBytes value for this new volume must be equal to or larger // than the size of the existing volume, in bytes. SourceVolumeARN *string `min:"50" type:"string"` // A list of up to 50 tags that you can assign to a cached volume. Each tag // is a key-value pair. // // Valid characters for key and value are letters, spaces, and numbers that // you can represent in UTF-8 format, and the following special characters: // + - = . _ : / @. The maximum length of a tag's key is 128 characters, and // the maximum length for a tag's value is 256 characters. Tags []*Tag `type:"list"` // The name of the iSCSI target used by an initiator to connect to a volume // and used as a suffix for the target ARN. For example, specifying TargetName // as myvolume results in the target ARN of arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume. // The target name must be unique across all volumes on a gateway. // // If you don't specify a value, Storage Gateway uses the value that was previously // used for this volume as the new target name. // // TargetName is a required field TargetName *string `min:"1" type:"string" required:"true"` // The size of the volume in bytes. // // VolumeSizeInBytes is a required field VolumeSizeInBytes *int64 `type:"long" required:"true"` } // String returns the string representation func (s CreateCachediSCSIVolumeInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateCachediSCSIVolumeInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateCachediSCSIVolumeInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateCachediSCSIVolumeInput"} if s.ClientToken == nil { invalidParams.Add(request.NewErrParamRequired("ClientToken")) } if s.ClientToken != nil && len(*s.ClientToken) < 5 { invalidParams.Add(request.NewErrParamMinLen("ClientToken", 5)) } if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.KMSKey != nil && len(*s.KMSKey) < 7 { invalidParams.Add(request.NewErrParamMinLen("KMSKey", 7)) } if s.NetworkInterfaceId == nil { invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) } if s.SourceVolumeARN != nil && len(*s.SourceVolumeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("SourceVolumeARN", 50)) } if s.TargetName == nil { invalidParams.Add(request.NewErrParamRequired("TargetName")) } if s.TargetName != nil && len(*s.TargetName) < 1 { invalidParams.Add(request.NewErrParamMinLen("TargetName", 1)) } if s.VolumeSizeInBytes == nil { invalidParams.Add(request.NewErrParamRequired("VolumeSizeInBytes")) } if s.Tags != nil { for i, v := range s.Tags { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetClientToken sets the ClientToken field's value. func (s *CreateCachediSCSIVolumeInput) SetClientToken(v string) *CreateCachediSCSIVolumeInput { s.ClientToken = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *CreateCachediSCSIVolumeInput) SetGatewayARN(v string) *CreateCachediSCSIVolumeInput { s.GatewayARN = &v return s } // SetKMSEncrypted sets the KMSEncrypted field's value. func (s *CreateCachediSCSIVolumeInput) SetKMSEncrypted(v bool) *CreateCachediSCSIVolumeInput { s.KMSEncrypted = &v return s } // SetKMSKey sets the KMSKey field's value. func (s *CreateCachediSCSIVolumeInput) SetKMSKey(v string) *CreateCachediSCSIVolumeInput { s.KMSKey = &v return s } // SetNetworkInterfaceId sets the NetworkInterfaceId field's value. func (s *CreateCachediSCSIVolumeInput) SetNetworkInterfaceId(v string) *CreateCachediSCSIVolumeInput { s.NetworkInterfaceId = &v return s } // SetSnapshotId sets the SnapshotId field's value. func (s *CreateCachediSCSIVolumeInput) SetSnapshotId(v string) *CreateCachediSCSIVolumeInput { s.SnapshotId = &v return s } // SetSourceVolumeARN sets the SourceVolumeARN field's value. func (s *CreateCachediSCSIVolumeInput) SetSourceVolumeARN(v string) *CreateCachediSCSIVolumeInput { s.SourceVolumeARN = &v return s } // SetTags sets the Tags field's value. func (s *CreateCachediSCSIVolumeInput) SetTags(v []*Tag) *CreateCachediSCSIVolumeInput { s.Tags = v return s } // SetTargetName sets the TargetName field's value. func (s *CreateCachediSCSIVolumeInput) SetTargetName(v string) *CreateCachediSCSIVolumeInput { s.TargetName = &v return s } // SetVolumeSizeInBytes sets the VolumeSizeInBytes field's value. func (s *CreateCachediSCSIVolumeInput) SetVolumeSizeInBytes(v int64) *CreateCachediSCSIVolumeInput { s.VolumeSizeInBytes = &v return s } type CreateCachediSCSIVolumeOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the volume target, which includes the iSCSI // name that initiators can use to connect to the target. TargetARN *string `min:"50" type:"string"` // The Amazon Resource Name (ARN) of the configured volume. VolumeARN *string `min:"50" type:"string"` } // String returns the string representation func (s CreateCachediSCSIVolumeOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateCachediSCSIVolumeOutput) GoString() string { return s.String() } // SetTargetARN sets the TargetARN field's value. func (s *CreateCachediSCSIVolumeOutput) SetTargetARN(v string) *CreateCachediSCSIVolumeOutput { s.TargetARN = &v return s } // SetVolumeARN sets the VolumeARN field's value. func (s *CreateCachediSCSIVolumeOutput) SetVolumeARN(v string) *CreateCachediSCSIVolumeOutput { s.VolumeARN = &v return s } // CreateNFSFileShareInput type CreateNFSFileShareInput struct { _ struct{} `type:"structure"` // The list of clients that are allowed to access the file gateway. The list // must contain either valid IP addresses or valid CIDR blocks. ClientList []*string `min:"1" type:"list"` // A unique string value that you supply that is used by file gateway to ensure // idempotent file share creation. // // ClientToken is a required field ClientToken *string `min:"5" type:"string" required:"true"` // The default storage class for objects put into an Amazon S3 bucket by the // file gateway. The default value is S3_INTELLIGENT_TIERING. Optional. // // Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA DefaultStorageClass *string `min:"5" type:"string"` // The Amazon Resource Name (ARN) of the file gateway on which you want to create // a file share. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // A value that enables guessing of the MIME type for uploaded objects based // on file extensions. Set this value to true to enable MIME type guessing, // otherwise set to false. The default value is true. // // Valid Values: true | false GuessMIMETypeEnabled *bool `type:"boolean"` // Set to true to use Amazon S3 server-side encryption with your own AWS KMS // key, or false to use a key managed by Amazon S3. Optional. // // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The ARN of the backed storage used for storing file data. // // LocationARN is a required field LocationARN *string `min:"16" type:"string" required:"true"` // File share default values. Optional. NFSFileShareDefaults *NFSFileShareDefaults `type:"structure"` // A value that sets the access control list (ACL) permission for objects in // the S3 bucket that a file gateway puts objects into. The default value is // private. ObjectACL *string `type:"string" enum:"ObjectACL"` // A value that sets the write status of a file share. Set this value to true // to set the write status to read-only, otherwise set to false. // // Valid Values: true | false ReadOnly *bool `type:"boolean"` // A value that sets who pays the cost of the request and the cost associated // with data download from the S3 bucket. If this value is set to true, the // requester pays the costs; otherwise, the S3 bucket owner pays. However, the // S3 bucket owner always pays the cost of storing data. // // RequesterPays is a configuration for the S3 bucket that backs the file share, // so make sure that the configuration on the file share is the same as the // S3 bucket configuration. // // Valid Values: true | false RequesterPays *bool `type:"boolean"` // The ARN of the AWS Identity and Access Management (IAM) role that a file // gateway assumes when it accesses the underlying storage. // // Role is a required field Role *string `min:"20" type:"string" required:"true"` // A value that maps a user to anonymous user. // // Valid values are the following: // // * RootSquash: Only root is mapped to anonymous user. // // * NoSquash: No one is mapped to anonymous user. // // * AllSquash: Everyone is mapped to anonymous user. Squash *string `min:"5" type:"string"` // A list of up to 50 tags that can be assigned to the NFS file share. Each // tag is a key-value pair. // // Valid characters for key and value are letters, spaces, and numbers representable // in UTF-8 format, and the following special characters: + - = . _ : / @. The // maximum length of a tag's key is 128 characters, and the maximum length for // a tag's value is 256. Tags []*Tag `type:"list"` } // String returns the string representation func (s CreateNFSFileShareInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateNFSFileShareInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateNFSFileShareInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateNFSFileShareInput"} if s.ClientList != nil && len(s.ClientList) < 1 { invalidParams.Add(request.NewErrParamMinLen("ClientList", 1)) } if s.ClientToken == nil { invalidParams.Add(request.NewErrParamRequired("ClientToken")) } if s.ClientToken != nil && len(*s.ClientToken) < 5 { invalidParams.Add(request.NewErrParamMinLen("ClientToken", 5)) } if s.DefaultStorageClass != nil && len(*s.DefaultStorageClass) < 5 { invalidParams.Add(request.NewErrParamMinLen("DefaultStorageClass", 5)) } if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.KMSKey != nil && len(*s.KMSKey) < 7 { invalidParams.Add(request.NewErrParamMinLen("KMSKey", 7)) } if s.LocationARN == nil { invalidParams.Add(request.NewErrParamRequired("LocationARN")) } if s.LocationARN != nil && len(*s.LocationARN) < 16 { invalidParams.Add(request.NewErrParamMinLen("LocationARN", 16)) } if s.Role == nil { invalidParams.Add(request.NewErrParamRequired("Role")) } if s.Role != nil && len(*s.Role) < 20 { invalidParams.Add(request.NewErrParamMinLen("Role", 20)) } if s.Squash != nil && len(*s.Squash) < 5 { invalidParams.Add(request.NewErrParamMinLen("Squash", 5)) } if s.NFSFileShareDefaults != nil { if err := s.NFSFileShareDefaults.Validate(); err != nil { invalidParams.AddNested("NFSFileShareDefaults", err.(request.ErrInvalidParams)) } } if s.Tags != nil { for i, v := range s.Tags { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetClientList sets the ClientList field's value. func (s *CreateNFSFileShareInput) SetClientList(v []*string) *CreateNFSFileShareInput { s.ClientList = v return s } // SetClientToken sets the ClientToken field's value. func (s *CreateNFSFileShareInput) SetClientToken(v string) *CreateNFSFileShareInput { s.ClientToken = &v return s } // SetDefaultStorageClass sets the DefaultStorageClass field's value. func (s *CreateNFSFileShareInput) SetDefaultStorageClass(v string) *CreateNFSFileShareInput { s.DefaultStorageClass = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *CreateNFSFileShareInput) SetGatewayARN(v string) *CreateNFSFileShareInput { s.GatewayARN = &v return s } // SetGuessMIMETypeEnabled sets the GuessMIMETypeEnabled field's value. func (s *CreateNFSFileShareInput) SetGuessMIMETypeEnabled(v bool) *CreateNFSFileShareInput { s.GuessMIMETypeEnabled = &v return s } // SetKMSEncrypted sets the KMSEncrypted field's value. func (s *CreateNFSFileShareInput) SetKMSEncrypted(v bool) *CreateNFSFileShareInput { s.KMSEncrypted = &v return s } // SetKMSKey sets the KMSKey field's value. func (s *CreateNFSFileShareInput) SetKMSKey(v string) *CreateNFSFileShareInput { s.KMSKey = &v return s } // SetLocationARN sets the LocationARN field's value. func (s *CreateNFSFileShareInput) SetLocationARN(v string) *CreateNFSFileShareInput { s.LocationARN = &v return s } // SetNFSFileShareDefaults sets the NFSFileShareDefaults field's value. func (s *CreateNFSFileShareInput) SetNFSFileShareDefaults(v *NFSFileShareDefaults) *CreateNFSFileShareInput { s.NFSFileShareDefaults = v return s } // SetObjectACL sets the ObjectACL field's value. func (s *CreateNFSFileShareInput) SetObjectACL(v string) *CreateNFSFileShareInput { s.ObjectACL = &v return s } // SetReadOnly sets the ReadOnly field's value. func (s *CreateNFSFileShareInput) SetReadOnly(v bool) *CreateNFSFileShareInput { s.ReadOnly = &v return s } // SetRequesterPays sets the RequesterPays field's value. func (s *CreateNFSFileShareInput) SetRequesterPays(v bool) *CreateNFSFileShareInput { s.RequesterPays = &v return s } // SetRole sets the Role field's value. func (s *CreateNFSFileShareInput) SetRole(v string) *CreateNFSFileShareInput { s.Role = &v return s } // SetSquash sets the Squash field's value. func (s *CreateNFSFileShareInput) SetSquash(v string) *CreateNFSFileShareInput { s.Squash = &v return s } // SetTags sets the Tags field's value. func (s *CreateNFSFileShareInput) SetTags(v []*Tag) *CreateNFSFileShareInput { s.Tags = v return s } // CreateNFSFileShareOutput type CreateNFSFileShareOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the newly created file share. FileShareARN *string `min:"50" type:"string"` } // String returns the string representation func (s CreateNFSFileShareOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateNFSFileShareOutput) GoString() string { return s.String() } // SetFileShareARN sets the FileShareARN field's value. func (s *CreateNFSFileShareOutput) SetFileShareARN(v string) *CreateNFSFileShareOutput { s.FileShareARN = &v return s } // CreateSMBFileShareInput type CreateSMBFileShareInput struct { _ struct{} `type:"structure"` // A list of users in the Active Directory that will be granted administrator // privileges on the file share. These users can do all file operations as the // super-user. // // Use this option very carefully, because any user in this list can do anything // they like on the file share, regardless of file permissions. AdminUserList []*string `type:"list"` // The Amazon Resource Name (ARN) of the storage used for the audit logs. AuditDestinationARN *string `type:"string"` // The authentication method that users use to access the file share. The default // is ActiveDirectory. // // Valid Values: ActiveDirectory | GuestAccess Authentication *string `min:"5" type:"string"` // A unique string value that you supply that is used by file gateway to ensure // idempotent file share creation. // // ClientToken is a required field ClientToken *string `min:"5" type:"string" required:"true"` // The default storage class for objects put into an Amazon S3 bucket by the // file gateway. The default value is S3_INTELLIGENT_TIERING. Optional. // // Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA DefaultStorageClass *string `min:"5" type:"string"` // The ARN of the file gateway on which you want to create a file share. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // A value that enables guessing of the MIME type for uploaded objects based // on file extensions. Set this value to true to enable MIME type guessing, // otherwise set to false. The default value is true. // // Valid Values: true | false GuessMIMETypeEnabled *bool `type:"boolean"` // A list of users or groups in the Active Directory that are not allowed to // access the file share. A group must be prefixed with the @ character. For // example, @group1. Can only be set if Authentication is set to ActiveDirectory. InvalidUserList []*string `type:"list"` // Set to true to use Amazon S3 server-side encryption with your own AWS KMS // key, or false to use a key managed by Amazon S3. Optional. // // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The ARN of the backed storage used for storing file data. // // LocationARN is a required field LocationARN *string `min:"16" type:"string" required:"true"` // A value that sets the access control list (ACL) permission for objects in // the S3 bucket that a file gateway puts objects into. The default value is // private. ObjectACL *string `type:"string" enum:"ObjectACL"` // A value that sets the write status of a file share. Set this value to true // to set the write status to read-only, otherwise set to false. // // Valid Values: true | false ReadOnly *bool `type:"boolean"` // A value that sets who pays the cost of the request and the cost associated // with data download from the S3 bucket. If this value is set to true, the // requester pays the costs; otherwise, the S3 bucket owner pays. However, the // S3 bucket owner always pays the cost of storing data. // // RequesterPays is a configuration for the S3 bucket that backs the file share, // so make sure that the configuration on the file share is the same as the // S3 bucket configuration. // // Valid Values: true | false RequesterPays *bool `type:"boolean"` // The ARN of the AWS Identity and Access Management (IAM) role that a file // gateway assumes when it accesses the underlying storage. // // Role is a required field Role *string `min:"20" type:"string" required:"true"` // Set this value to true to enable access control list (ACL) on the SMB file // share. Set it to false to map file and directory permissions to the POSIX // permissions. // // For more information, see Using Microsoft Windows ACLs to control access // to an SMB file share (https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.html) // in the AWS Storage Gateway User Guide. // // Valid Values: true | false SMBACLEnabled *bool `type:"boolean"` // A list of up to 50 tags that can be assigned to the NFS file share. Each // tag is a key-value pair. // // Valid characters for key and value are letters, spaces, and numbers representable // in UTF-8 format, and the following special characters: + - = . _ : / @. The // maximum length of a tag's key is 128 characters, and the maximum length for // a tag's value is 256. Tags []*Tag `type:"list"` // A list of users or groups in the Active Directory that are allowed to access // the file share. A group must be prefixed with the @ character. For example, // @group1. Can only be set if Authentication is set to ActiveDirectory. ValidUserList []*string `type:"list"` } // String returns the string representation func (s CreateSMBFileShareInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateSMBFileShareInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateSMBFileShareInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateSMBFileShareInput"} if s.Authentication != nil && len(*s.Authentication) < 5 { invalidParams.Add(request.NewErrParamMinLen("Authentication", 5)) } if s.ClientToken == nil { invalidParams.Add(request.NewErrParamRequired("ClientToken")) } if s.ClientToken != nil && len(*s.ClientToken) < 5 { invalidParams.Add(request.NewErrParamMinLen("ClientToken", 5)) } if s.DefaultStorageClass != nil && len(*s.DefaultStorageClass) < 5 { invalidParams.Add(request.NewErrParamMinLen("DefaultStorageClass", 5)) } if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.KMSKey != nil && len(*s.KMSKey) < 7 { invalidParams.Add(request.NewErrParamMinLen("KMSKey", 7)) } if s.LocationARN == nil { invalidParams.Add(request.NewErrParamRequired("LocationARN")) } if s.LocationARN != nil && len(*s.LocationARN) < 16 { invalidParams.Add(request.NewErrParamMinLen("LocationARN", 16)) } if s.Role == nil { invalidParams.Add(request.NewErrParamRequired("Role")) } if s.Role != nil && len(*s.Role) < 20 { invalidParams.Add(request.NewErrParamMinLen("Role", 20)) } if s.Tags != nil { for i, v := range s.Tags { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAdminUserList sets the AdminUserList field's value. func (s *CreateSMBFileShareInput) SetAdminUserList(v []*string) *CreateSMBFileShareInput { s.AdminUserList = v return s } // SetAuditDestinationARN sets the AuditDestinationARN field's value. func (s *CreateSMBFileShareInput) SetAuditDestinationARN(v string) *CreateSMBFileShareInput { s.AuditDestinationARN = &v return s } // SetAuthentication sets the Authentication field's value. func (s *CreateSMBFileShareInput) SetAuthentication(v string) *CreateSMBFileShareInput { s.Authentication = &v return s } // SetClientToken sets the ClientToken field's value. func (s *CreateSMBFileShareInput) SetClientToken(v string) *CreateSMBFileShareInput { s.ClientToken = &v return s } // SetDefaultStorageClass sets the DefaultStorageClass field's value. func (s *CreateSMBFileShareInput) SetDefaultStorageClass(v string) *CreateSMBFileShareInput { s.DefaultStorageClass = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *CreateSMBFileShareInput) SetGatewayARN(v string) *CreateSMBFileShareInput { s.GatewayARN = &v return s } // SetGuessMIMETypeEnabled sets the GuessMIMETypeEnabled field's value. func (s *CreateSMBFileShareInput) SetGuessMIMETypeEnabled(v bool) *CreateSMBFileShareInput { s.GuessMIMETypeEnabled = &v return s } // SetInvalidUserList sets the InvalidUserList field's value. func (s *CreateSMBFileShareInput) SetInvalidUserList(v []*string) *CreateSMBFileShareInput { s.InvalidUserList = v return s } // SetKMSEncrypted sets the KMSEncrypted field's value. func (s *CreateSMBFileShareInput) SetKMSEncrypted(v bool) *CreateSMBFileShareInput { s.KMSEncrypted = &v return s } // SetKMSKey sets the KMSKey field's value. func (s *CreateSMBFileShareInput) SetKMSKey(v string) *CreateSMBFileShareInput { s.KMSKey = &v return s } // SetLocationARN sets the LocationARN field's value. func (s *CreateSMBFileShareInput) SetLocationARN(v string) *CreateSMBFileShareInput { s.LocationARN = &v return s } // SetObjectACL sets the ObjectACL field's value. func (s *CreateSMBFileShareInput) SetObjectACL(v string) *CreateSMBFileShareInput { s.ObjectACL = &v return s } // SetReadOnly sets the ReadOnly field's value. func (s *CreateSMBFileShareInput) SetReadOnly(v bool) *CreateSMBFileShareInput { s.ReadOnly = &v return s } // SetRequesterPays sets the RequesterPays field's value. func (s *CreateSMBFileShareInput) SetRequesterPays(v bool) *CreateSMBFileShareInput { s.RequesterPays = &v return s } // SetRole sets the Role field's value. func (s *CreateSMBFileShareInput) SetRole(v string) *CreateSMBFileShareInput { s.Role = &v return s } // SetSMBACLEnabled sets the SMBACLEnabled field's value. func (s *CreateSMBFileShareInput) SetSMBACLEnabled(v bool) *CreateSMBFileShareInput { s.SMBACLEnabled = &v return s } // SetTags sets the Tags field's value. func (s *CreateSMBFileShareInput) SetTags(v []*Tag) *CreateSMBFileShareInput { s.Tags = v return s } // SetValidUserList sets the ValidUserList field's value. func (s *CreateSMBFileShareInput) SetValidUserList(v []*string) *CreateSMBFileShareInput { s.ValidUserList = v return s } // CreateSMBFileShareOutput type CreateSMBFileShareOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the newly created file share. FileShareARN *string `min:"50" type:"string"` } // String returns the string representation func (s CreateSMBFileShareOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateSMBFileShareOutput) GoString() string { return s.String() } // SetFileShareARN sets the FileShareARN field's value. func (s *CreateSMBFileShareOutput) SetFileShareARN(v string) *CreateSMBFileShareOutput { s.FileShareARN = &v return s } type CreateSnapshotFromVolumeRecoveryPointInput struct { _ struct{} `type:"structure"` // Textual description of the snapshot that appears in the Amazon EC2 console, // Elastic Block Store snapshots panel in the Description field, and in the // AWS Storage Gateway snapshot Details pane, Description field. // // SnapshotDescription is a required field SnapshotDescription *string `min:"1" type:"string" required:"true"` // A list of up to 50 tags that can be assigned to a snapshot. Each tag is a // key-value pair. // // Valid characters for key and value are letters, spaces, and numbers representable // in UTF-8 format, and the following special characters: + - = . _ : / @. The // maximum length of a tag's key is 128 characters, and the maximum length for // a tag's value is 256. Tags []*Tag `type:"list"` // The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes // operation to return to retrieve the TargetARN for specified VolumeARN. // // VolumeARN is a required field VolumeARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s CreateSnapshotFromVolumeRecoveryPointInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateSnapshotFromVolumeRecoveryPointInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateSnapshotFromVolumeRecoveryPointInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateSnapshotFromVolumeRecoveryPointInput"} if s.SnapshotDescription == nil { invalidParams.Add(request.NewErrParamRequired("SnapshotDescription")) } if s.SnapshotDescription != nil && len(*s.SnapshotDescription) < 1 { invalidParams.Add(request.NewErrParamMinLen("SnapshotDescription", 1)) } if s.VolumeARN == nil { invalidParams.Add(request.NewErrParamRequired("VolumeARN")) } if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) } if s.Tags != nil { for i, v := range s.Tags { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetSnapshotDescription sets the SnapshotDescription field's value. func (s *CreateSnapshotFromVolumeRecoveryPointInput) SetSnapshotDescription(v string) *CreateSnapshotFromVolumeRecoveryPointInput { s.SnapshotDescription = &v return s } // SetTags sets the Tags field's value. func (s *CreateSnapshotFromVolumeRecoveryPointInput) SetTags(v []*Tag) *CreateSnapshotFromVolumeRecoveryPointInput { s.Tags = v return s } // SetVolumeARN sets the VolumeARN field's value. func (s *CreateSnapshotFromVolumeRecoveryPointInput) SetVolumeARN(v string) *CreateSnapshotFromVolumeRecoveryPointInput { s.VolumeARN = &v return s } type CreateSnapshotFromVolumeRecoveryPointOutput struct { _ struct{} `type:"structure"` // The ID of the snapshot. SnapshotId *string `type:"string"` // The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes // operation to return to retrieve the TargetARN for specified VolumeARN. VolumeARN *string `min:"50" type:"string"` // The time the volume was created from the recovery point. VolumeRecoveryPointTime *string `type:"string"` } // String returns the string representation func (s CreateSnapshotFromVolumeRecoveryPointOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateSnapshotFromVolumeRecoveryPointOutput) GoString() string { return s.String() } // SetSnapshotId sets the SnapshotId field's value. func (s *CreateSnapshotFromVolumeRecoveryPointOutput) SetSnapshotId(v string) *CreateSnapshotFromVolumeRecoveryPointOutput { s.SnapshotId = &v return s } // SetVolumeARN sets the VolumeARN field's value. func (s *CreateSnapshotFromVolumeRecoveryPointOutput) SetVolumeARN(v string) *CreateSnapshotFromVolumeRecoveryPointOutput { s.VolumeARN = &v return s } // SetVolumeRecoveryPointTime sets the VolumeRecoveryPointTime field's value. func (s *CreateSnapshotFromVolumeRecoveryPointOutput) SetVolumeRecoveryPointTime(v string) *CreateSnapshotFromVolumeRecoveryPointOutput { s.VolumeRecoveryPointTime = &v return s } // A JSON object containing one or more of the following fields: // // * CreateSnapshotInput$SnapshotDescription // // * CreateSnapshotInput$VolumeARN type CreateSnapshotInput struct { _ struct{} `type:"structure"` // Textual description of the snapshot that appears in the Amazon EC2 console, // Elastic Block Store snapshots panel in the Description field, and in the // AWS Storage Gateway snapshot Details pane, Description field. // // SnapshotDescription is a required field SnapshotDescription *string `min:"1" type:"string" required:"true"` // A list of up to 50 tags that can be assigned to a snapshot. Each tag is a // key-value pair. // // Valid characters for key and value are letters, spaces, and numbers representable // in UTF-8 format, and the following special characters: + - = . _ : / @. The // maximum length of a tag's key is 128 characters, and the maximum length for // a tag's value is 256. Tags []*Tag `type:"list"` // The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation // to return a list of gateway volumes. // // VolumeARN is a required field VolumeARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s CreateSnapshotInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateSnapshotInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateSnapshotInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateSnapshotInput"} if s.SnapshotDescription == nil { invalidParams.Add(request.NewErrParamRequired("SnapshotDescription")) } if s.SnapshotDescription != nil && len(*s.SnapshotDescription) < 1 { invalidParams.Add(request.NewErrParamMinLen("SnapshotDescription", 1)) } if s.VolumeARN == nil { invalidParams.Add(request.NewErrParamRequired("VolumeARN")) } if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) } if s.Tags != nil { for i, v := range s.Tags { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetSnapshotDescription sets the SnapshotDescription field's value. func (s *CreateSnapshotInput) SetSnapshotDescription(v string) *CreateSnapshotInput { s.SnapshotDescription = &v return s } // SetTags sets the Tags field's value. func (s *CreateSnapshotInput) SetTags(v []*Tag) *CreateSnapshotInput { s.Tags = v return s } // SetVolumeARN sets the VolumeARN field's value. func (s *CreateSnapshotInput) SetVolumeARN(v string) *CreateSnapshotInput { s.VolumeARN = &v return s } // A JSON object containing the following fields: type CreateSnapshotOutput struct { _ struct{} `type:"structure"` // The snapshot ID that is used to refer to the snapshot in future operations // such as describing snapshots (Amazon Elastic Compute Cloud API DescribeSnapshots) // or creating a volume from a snapshot (CreateStorediSCSIVolume). SnapshotId *string `type:"string"` // The Amazon Resource Name (ARN) of the volume of which the snapshot was taken. VolumeARN *string `min:"50" type:"string"` } // String returns the string representation func (s CreateSnapshotOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateSnapshotOutput) GoString() string { return s.String() } // SetSnapshotId sets the SnapshotId field's value. func (s *CreateSnapshotOutput) SetSnapshotId(v string) *CreateSnapshotOutput { s.SnapshotId = &v return s } // SetVolumeARN sets the VolumeARN field's value. func (s *CreateSnapshotOutput) SetVolumeARN(v string) *CreateSnapshotOutput { s.VolumeARN = &v return s } // A JSON object containing one or more of the following fields: // // * CreateStorediSCSIVolumeInput$DiskId // // * CreateStorediSCSIVolumeInput$NetworkInterfaceId // // * CreateStorediSCSIVolumeInput$PreserveExistingData // // * CreateStorediSCSIVolumeInput$SnapshotId // // * CreateStorediSCSIVolumeInput$TargetName type CreateStorediSCSIVolumeInput struct { _ struct{} `type:"structure"` // The unique identifier for the gateway local disk that is configured as a // stored volume. Use ListLocalDisks (https://docs.aws.amazon.com/storagegateway/latest/userguide/API_ListLocalDisks.html) // to list disk IDs for a gateway. // // DiskId is a required field DiskId *string `min:"1" type:"string" required:"true"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // Set to true to use Amazon S3 server-side encryption with your own AWS KMS // key, or false to use a key managed by Amazon S3. Optional. // // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The network interface of the gateway on which to expose the iSCSI target. // Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a // list of the network interfaces available on a gateway. // // Valid Values: A valid IP address. // // NetworkInterfaceId is a required field NetworkInterfaceId *string `type:"string" required:"true"` // Set to true true if you want to preserve the data on the local disk. Otherwise, // set to false to create an empty volume. // // Valid Values: true | false // // PreserveExistingData is a required field PreserveExistingData *bool `type:"boolean" required:"true"` // The snapshot ID (e.g. "snap-1122aabb") of the snapshot to restore as the // new stored volume. Specify this field if you want to create the iSCSI storage // volume from a snapshot; otherwise, do not include this field. To list snapshots // for your account use DescribeSnapshots (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSnapshots.html) // in the Amazon Elastic Compute Cloud API Reference. SnapshotId *string `type:"string"` // A list of up to 50 tags that can be assigned to a stored volume. Each tag // is a key-value pair. // // Valid characters for key and value are letters, spaces, and numbers representable // in UTF-8 format, and the following special characters: + - = . _ : / @. The // maximum length of a tag's key is 128 characters, and the maximum length for // a tag's value is 256. Tags []*Tag `type:"list"` // The name of the iSCSI target used by an initiator to connect to a volume // and used as a suffix for the target ARN. For example, specifying TargetName // as myvolume results in the target ARN of arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume. // The target name must be unique across all volumes on a gateway. // // If you don't specify a value, Storage Gateway uses the value that was previously // used for this volume as the new target name. // // TargetName is a required field TargetName *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s CreateStorediSCSIVolumeInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateStorediSCSIVolumeInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateStorediSCSIVolumeInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateStorediSCSIVolumeInput"} if s.DiskId == nil { invalidParams.Add(request.NewErrParamRequired("DiskId")) } if s.DiskId != nil && len(*s.DiskId) < 1 { invalidParams.Add(request.NewErrParamMinLen("DiskId", 1)) } if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.KMSKey != nil && len(*s.KMSKey) < 7 { invalidParams.Add(request.NewErrParamMinLen("KMSKey", 7)) } if s.NetworkInterfaceId == nil { invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) } if s.PreserveExistingData == nil { invalidParams.Add(request.NewErrParamRequired("PreserveExistingData")) } if s.TargetName == nil { invalidParams.Add(request.NewErrParamRequired("TargetName")) } if s.TargetName != nil && len(*s.TargetName) < 1 { invalidParams.Add(request.NewErrParamMinLen("TargetName", 1)) } if s.Tags != nil { for i, v := range s.Tags { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDiskId sets the DiskId field's value. func (s *CreateStorediSCSIVolumeInput) SetDiskId(v string) *CreateStorediSCSIVolumeInput { s.DiskId = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *CreateStorediSCSIVolumeInput) SetGatewayARN(v string) *CreateStorediSCSIVolumeInput { s.GatewayARN = &v return s } // SetKMSEncrypted sets the KMSEncrypted field's value. func (s *CreateStorediSCSIVolumeInput) SetKMSEncrypted(v bool) *CreateStorediSCSIVolumeInput { s.KMSEncrypted = &v return s } // SetKMSKey sets the KMSKey field's value. func (s *CreateStorediSCSIVolumeInput) SetKMSKey(v string) *CreateStorediSCSIVolumeInput { s.KMSKey = &v return s } // SetNetworkInterfaceId sets the NetworkInterfaceId field's value. func (s *CreateStorediSCSIVolumeInput) SetNetworkInterfaceId(v string) *CreateStorediSCSIVolumeInput { s.NetworkInterfaceId = &v return s } // SetPreserveExistingData sets the PreserveExistingData field's value. func (s *CreateStorediSCSIVolumeInput) SetPreserveExistingData(v bool) *CreateStorediSCSIVolumeInput { s.PreserveExistingData = &v return s } // SetSnapshotId sets the SnapshotId field's value. func (s *CreateStorediSCSIVolumeInput) SetSnapshotId(v string) *CreateStorediSCSIVolumeInput { s.SnapshotId = &v return s } // SetTags sets the Tags field's value. func (s *CreateStorediSCSIVolumeInput) SetTags(v []*Tag) *CreateStorediSCSIVolumeInput { s.Tags = v return s } // SetTargetName sets the TargetName field's value. func (s *CreateStorediSCSIVolumeInput) SetTargetName(v string) *CreateStorediSCSIVolumeInput { s.TargetName = &v return s } // A JSON object containing the following fields: type CreateStorediSCSIVolumeOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the volume target, which includes the iSCSI // name that initiators can use to connect to the target. TargetARN *string `min:"50" type:"string"` // The Amazon Resource Name (ARN) of the configured volume. VolumeARN *string `min:"50" type:"string"` // The size of the volume in bytes. VolumeSizeInBytes *int64 `type:"long"` } // String returns the string representation func (s CreateStorediSCSIVolumeOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateStorediSCSIVolumeOutput) GoString() string { return s.String() } // SetTargetARN sets the TargetARN field's value. func (s *CreateStorediSCSIVolumeOutput) SetTargetARN(v string) *CreateStorediSCSIVolumeOutput { s.TargetARN = &v return s } // SetVolumeARN sets the VolumeARN field's value. func (s *CreateStorediSCSIVolumeOutput) SetVolumeARN(v string) *CreateStorediSCSIVolumeOutput { s.VolumeARN = &v return s } // SetVolumeSizeInBytes sets the VolumeSizeInBytes field's value. func (s *CreateStorediSCSIVolumeOutput) SetVolumeSizeInBytes(v int64) *CreateStorediSCSIVolumeOutput { s.VolumeSizeInBytes = &v return s } // CreateTapeWithBarcodeInput type CreateTapeWithBarcodeInput struct { _ struct{} `type:"structure"` // The unique Amazon Resource Name (ARN) that represents the gateway to associate // the virtual tape with. Use the ListGateways operation to return a list of // gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // Set to true to use Amazon S3 server-side encryption with your own AWS KMS // key, or false to use a key managed by Amazon S3. Optional. // // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The ID of the pool that you want to add your tape to for archiving. The tape // in this pool is archived in the S3 storage class that is associated with // the pool. When you use your backup application to eject the tape, the tape // is archived directly into the storage class (S3 Glacier or S3 Deep Archive) // that corresponds to the pool. // // Valid Values: GLACIER | DEEP_ARCHIVE PoolId *string `min:"1" type:"string"` // A list of up to 50 tags that can be assigned to a virtual tape that has a // barcode. Each tag is a key-value pair. // // Valid characters for key and value are letters, spaces, and numbers representable // in UTF-8 format, and the following special characters: + - = . _ : / @. The // maximum length of a tag's key is 128 characters, and the maximum length for // a tag's value is 256. Tags []*Tag `type:"list"` // The barcode that you want to assign to the tape. // // Barcodes cannot be reused. This includes barcodes used for tapes that have // been deleted. // // TapeBarcode is a required field TapeBarcode *string `min:"7" type:"string" required:"true"` // The size, in bytes, of the virtual tape that you want to create. // // The size must be aligned by gigabyte (1024*1024*1024 bytes). // // TapeSizeInBytes is a required field TapeSizeInBytes *int64 `type:"long" required:"true"` } // String returns the string representation func (s CreateTapeWithBarcodeInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateTapeWithBarcodeInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateTapeWithBarcodeInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateTapeWithBarcodeInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.KMSKey != nil && len(*s.KMSKey) < 7 { invalidParams.Add(request.NewErrParamMinLen("KMSKey", 7)) } if s.PoolId != nil && len(*s.PoolId) < 1 { invalidParams.Add(request.NewErrParamMinLen("PoolId", 1)) } if s.TapeBarcode == nil { invalidParams.Add(request.NewErrParamRequired("TapeBarcode")) } if s.TapeBarcode != nil && len(*s.TapeBarcode) < 7 { invalidParams.Add(request.NewErrParamMinLen("TapeBarcode", 7)) } if s.TapeSizeInBytes == nil { invalidParams.Add(request.NewErrParamRequired("TapeSizeInBytes")) } if s.Tags != nil { for i, v := range s.Tags { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *CreateTapeWithBarcodeInput) SetGatewayARN(v string) *CreateTapeWithBarcodeInput { s.GatewayARN = &v return s } // SetKMSEncrypted sets the KMSEncrypted field's value. func (s *CreateTapeWithBarcodeInput) SetKMSEncrypted(v bool) *CreateTapeWithBarcodeInput { s.KMSEncrypted = &v return s } // SetKMSKey sets the KMSKey field's value. func (s *CreateTapeWithBarcodeInput) SetKMSKey(v string) *CreateTapeWithBarcodeInput { s.KMSKey = &v return s } // SetPoolId sets the PoolId field's value. func (s *CreateTapeWithBarcodeInput) SetPoolId(v string) *CreateTapeWithBarcodeInput { s.PoolId = &v return s } // SetTags sets the Tags field's value. func (s *CreateTapeWithBarcodeInput) SetTags(v []*Tag) *CreateTapeWithBarcodeInput { s.Tags = v return s } // SetTapeBarcode sets the TapeBarcode field's value. func (s *CreateTapeWithBarcodeInput) SetTapeBarcode(v string) *CreateTapeWithBarcodeInput { s.TapeBarcode = &v return s } // SetTapeSizeInBytes sets the TapeSizeInBytes field's value. func (s *CreateTapeWithBarcodeInput) SetTapeSizeInBytes(v int64) *CreateTapeWithBarcodeInput { s.TapeSizeInBytes = &v return s } // CreateTapeOutput type CreateTapeWithBarcodeOutput struct { _ struct{} `type:"structure"` // A unique Amazon Resource Name (ARN) that represents the virtual tape that // was created. TapeARN *string `min:"50" type:"string"` } // String returns the string representation func (s CreateTapeWithBarcodeOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateTapeWithBarcodeOutput) GoString() string { return s.String() } // SetTapeARN sets the TapeARN field's value. func (s *CreateTapeWithBarcodeOutput) SetTapeARN(v string) *CreateTapeWithBarcodeOutput { s.TapeARN = &v return s } // CreateTapesInput type CreateTapesInput struct { _ struct{} `type:"structure"` // A unique identifier that you use to retry a request. If you retry a request, // use the same ClientToken you specified in the initial request. // // Using the same ClientToken prevents creating the tape multiple times. // // ClientToken is a required field ClientToken *string `min:"5" type:"string" required:"true"` // The unique Amazon Resource Name (ARN) that represents the gateway to associate // the virtual tapes with. Use the ListGateways operation to return a list of // gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // Set to true to use Amazon S3 server-side encryption with your own AWS KMS // key, or false to use a key managed by Amazon S3. Optional. // // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The number of virtual tapes that you want to create. // // NumTapesToCreate is a required field NumTapesToCreate *int64 `min:"1" type:"integer" required:"true"` // The ID of the pool that you want to add your tape to for archiving. The tape // in this pool is archived in the S3 storage class that is associated with // the pool. When you use your backup application to eject the tape, the tape // is archived directly into the storage class (S3 Glacier or S3 Glacier Deep // Archive) that corresponds to the pool. // // Valid Values: GLACIER | DEEP_ARCHIVE PoolId *string `min:"1" type:"string"` // A list of up to 50 tags that can be assigned to a virtual tape. Each tag // is a key-value pair. // // Valid characters for key and value are letters, spaces, and numbers representable // in UTF-8 format, and the following special characters: + - = . _ : / @. The // maximum length of a tag's key is 128 characters, and the maximum length for // a tag's value is 256. Tags []*Tag `type:"list"` // A prefix that you append to the barcode of the virtual tape you are creating. // This prefix makes the barcode unique. // // The prefix must be 1 to 4 characters in length and must be one of the uppercase // letters from A to Z. // // TapeBarcodePrefix is a required field TapeBarcodePrefix *string `min:"1" type:"string" required:"true"` // The size, in bytes, of the virtual tapes that you want to create. // // The size must be aligned by gigabyte (1024*1024*1024 bytes). // // TapeSizeInBytes is a required field TapeSizeInBytes *int64 `type:"long" required:"true"` } // String returns the string representation func (s CreateTapesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateTapesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateTapesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateTapesInput"} if s.ClientToken == nil { invalidParams.Add(request.NewErrParamRequired("ClientToken")) } if s.ClientToken != nil && len(*s.ClientToken) < 5 { invalidParams.Add(request.NewErrParamMinLen("ClientToken", 5)) } if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.KMSKey != nil && len(*s.KMSKey) < 7 { invalidParams.Add(request.NewErrParamMinLen("KMSKey", 7)) } if s.NumTapesToCreate == nil { invalidParams.Add(request.NewErrParamRequired("NumTapesToCreate")) } if s.NumTapesToCreate != nil && *s.NumTapesToCreate < 1 { invalidParams.Add(request.NewErrParamMinValue("NumTapesToCreate", 1)) } if s.PoolId != nil && len(*s.PoolId) < 1 { invalidParams.Add(request.NewErrParamMinLen("PoolId", 1)) } if s.TapeBarcodePrefix == nil { invalidParams.Add(request.NewErrParamRequired("TapeBarcodePrefix")) } if s.TapeBarcodePrefix != nil && len(*s.TapeBarcodePrefix) < 1 { invalidParams.Add(request.NewErrParamMinLen("TapeBarcodePrefix", 1)) } if s.TapeSizeInBytes == nil { invalidParams.Add(request.NewErrParamRequired("TapeSizeInBytes")) } if s.Tags != nil { for i, v := range s.Tags { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetClientToken sets the ClientToken field's value. func (s *CreateTapesInput) SetClientToken(v string) *CreateTapesInput { s.ClientToken = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *CreateTapesInput) SetGatewayARN(v string) *CreateTapesInput { s.GatewayARN = &v return s } // SetKMSEncrypted sets the KMSEncrypted field's value. func (s *CreateTapesInput) SetKMSEncrypted(v bool) *CreateTapesInput { s.KMSEncrypted = &v return s } // SetKMSKey sets the KMSKey field's value. func (s *CreateTapesInput) SetKMSKey(v string) *CreateTapesInput { s.KMSKey = &v return s } // SetNumTapesToCreate sets the NumTapesToCreate field's value. func (s *CreateTapesInput) SetNumTapesToCreate(v int64) *CreateTapesInput { s.NumTapesToCreate = &v return s } // SetPoolId sets the PoolId field's value. func (s *CreateTapesInput) SetPoolId(v string) *CreateTapesInput { s.PoolId = &v return s } // SetTags sets the Tags field's value. func (s *CreateTapesInput) SetTags(v []*Tag) *CreateTapesInput { s.Tags = v return s } // SetTapeBarcodePrefix sets the TapeBarcodePrefix field's value. func (s *CreateTapesInput) SetTapeBarcodePrefix(v string) *CreateTapesInput { s.TapeBarcodePrefix = &v return s } // SetTapeSizeInBytes sets the TapeSizeInBytes field's value. func (s *CreateTapesInput) SetTapeSizeInBytes(v int64) *CreateTapesInput { s.TapeSizeInBytes = &v return s } // CreateTapeOutput type CreateTapesOutput struct { _ struct{} `type:"structure"` // A list of unique Amazon Resource Names (ARNs) that represents the virtual // tapes that were created. TapeARNs []*string `type:"list"` } // String returns the string representation func (s CreateTapesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateTapesOutput) GoString() string { return s.String() } // SetTapeARNs sets the TapeARNs field's value. func (s *CreateTapesOutput) SetTapeARNs(v []*string) *CreateTapesOutput { s.TapeARNs = v return s } type DeleteAutomaticTapeCreationPolicyInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DeleteAutomaticTapeCreationPolicyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteAutomaticTapeCreationPolicyInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteAutomaticTapeCreationPolicyInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteAutomaticTapeCreationPolicyInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *DeleteAutomaticTapeCreationPolicyInput) SetGatewayARN(v string) *DeleteAutomaticTapeCreationPolicyInput { s.GatewayARN = &v return s } type DeleteAutomaticTapeCreationPolicyOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s DeleteAutomaticTapeCreationPolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteAutomaticTapeCreationPolicyOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *DeleteAutomaticTapeCreationPolicyOutput) SetGatewayARN(v string) *DeleteAutomaticTapeCreationPolicyOutput { s.GatewayARN = &v return s } // A JSON object containing the following fields: // // * DeleteBandwidthRateLimitInput$BandwidthType type DeleteBandwidthRateLimitInput struct { _ struct{} `type:"structure"` // One of the BandwidthType values that indicates the gateway bandwidth rate // limit to delete. // // Valid Values: Upload | Download | All // // BandwidthType is a required field BandwidthType *string `min:"3" type:"string" required:"true"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DeleteBandwidthRateLimitInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteBandwidthRateLimitInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteBandwidthRateLimitInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteBandwidthRateLimitInput"} if s.BandwidthType == nil { invalidParams.Add(request.NewErrParamRequired("BandwidthType")) } if s.BandwidthType != nil && len(*s.BandwidthType) < 3 { invalidParams.Add(request.NewErrParamMinLen("BandwidthType", 3)) } if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBandwidthType sets the BandwidthType field's value. func (s *DeleteBandwidthRateLimitInput) SetBandwidthType(v string) *DeleteBandwidthRateLimitInput { s.BandwidthType = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *DeleteBandwidthRateLimitInput) SetGatewayARN(v string) *DeleteBandwidthRateLimitInput { s.GatewayARN = &v return s } // A JSON object containing the Amazon Resource Name (ARN) of the gateway whose // bandwidth rate information was deleted. type DeleteBandwidthRateLimitOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s DeleteBandwidthRateLimitOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteBandwidthRateLimitOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *DeleteBandwidthRateLimitOutput) SetGatewayARN(v string) *DeleteBandwidthRateLimitOutput { s.GatewayARN = &v return s } // A JSON object containing one or more of the following fields: // // * DeleteChapCredentialsInput$InitiatorName // // * DeleteChapCredentialsInput$TargetARN type DeleteChapCredentialsInput struct { _ struct{} `type:"structure"` // The iSCSI initiator that connects to the target. // // InitiatorName is a required field InitiatorName *string `min:"1" type:"string" required:"true"` // The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes // operation to return to retrieve the TargetARN for specified VolumeARN. // // TargetARN is a required field TargetARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DeleteChapCredentialsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteChapCredentialsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteChapCredentialsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteChapCredentialsInput"} if s.InitiatorName == nil { invalidParams.Add(request.NewErrParamRequired("InitiatorName")) } if s.InitiatorName != nil && len(*s.InitiatorName) < 1 { invalidParams.Add(request.NewErrParamMinLen("InitiatorName", 1)) } if s.TargetARN == nil { invalidParams.Add(request.NewErrParamRequired("TargetARN")) } if s.TargetARN != nil && len(*s.TargetARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("TargetARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetInitiatorName sets the InitiatorName field's value. func (s *DeleteChapCredentialsInput) SetInitiatorName(v string) *DeleteChapCredentialsInput { s.InitiatorName = &v return s } // SetTargetARN sets the TargetARN field's value. func (s *DeleteChapCredentialsInput) SetTargetARN(v string) *DeleteChapCredentialsInput { s.TargetARN = &v return s } // A JSON object containing the following fields: type DeleteChapCredentialsOutput struct { _ struct{} `type:"structure"` // The iSCSI initiator that connects to the target. InitiatorName *string `min:"1" type:"string"` // The Amazon Resource Name (ARN) of the target. TargetARN *string `min:"50" type:"string"` } // String returns the string representation func (s DeleteChapCredentialsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteChapCredentialsOutput) GoString() string { return s.String() } // SetInitiatorName sets the InitiatorName field's value. func (s *DeleteChapCredentialsOutput) SetInitiatorName(v string) *DeleteChapCredentialsOutput { s.InitiatorName = &v return s } // SetTargetARN sets the TargetARN field's value. func (s *DeleteChapCredentialsOutput) SetTargetARN(v string) *DeleteChapCredentialsOutput { s.TargetARN = &v return s } // DeleteFileShareInput type DeleteFileShareInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the file share to be deleted. // // FileShareARN is a required field FileShareARN *string `min:"50" type:"string" required:"true"` // If this value is set to true, the operation deletes a file share immediately // and aborts all data uploads to AWS. Otherwise, the file share is not deleted // until all data is uploaded to AWS. This process aborts the data upload process, // and the file share enters the FORCE_DELETING status. // // Valid Values: true | false ForceDelete *bool `type:"boolean"` } // String returns the string representation func (s DeleteFileShareInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteFileShareInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteFileShareInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteFileShareInput"} if s.FileShareARN == nil { invalidParams.Add(request.NewErrParamRequired("FileShareARN")) } if s.FileShareARN != nil && len(*s.FileShareARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("FileShareARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetFileShareARN sets the FileShareARN field's value. func (s *DeleteFileShareInput) SetFileShareARN(v string) *DeleteFileShareInput { s.FileShareARN = &v return s } // SetForceDelete sets the ForceDelete field's value. func (s *DeleteFileShareInput) SetForceDelete(v bool) *DeleteFileShareInput { s.ForceDelete = &v return s } // DeleteFileShareOutput type DeleteFileShareOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the deleted file share. FileShareARN *string `min:"50" type:"string"` } // String returns the string representation func (s DeleteFileShareOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteFileShareOutput) GoString() string { return s.String() } // SetFileShareARN sets the FileShareARN field's value. func (s *DeleteFileShareOutput) SetFileShareARN(v string) *DeleteFileShareOutput { s.FileShareARN = &v return s } // A JSON object containing the ID of the gateway to delete. type DeleteGatewayInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DeleteGatewayInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteGatewayInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteGatewayInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteGatewayInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *DeleteGatewayInput) SetGatewayARN(v string) *DeleteGatewayInput { s.GatewayARN = &v return s } // A JSON object containing the ID of the deleted gateway. type DeleteGatewayOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s DeleteGatewayOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteGatewayOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *DeleteGatewayOutput) SetGatewayARN(v string) *DeleteGatewayOutput { s.GatewayARN = &v return s } type DeleteSnapshotScheduleInput struct { _ struct{} `type:"structure"` // The volume which snapshot schedule to delete. // // VolumeARN is a required field VolumeARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DeleteSnapshotScheduleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteSnapshotScheduleInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteSnapshotScheduleInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteSnapshotScheduleInput"} if s.VolumeARN == nil { invalidParams.Add(request.NewErrParamRequired("VolumeARN")) } if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetVolumeARN sets the VolumeARN field's value. func (s *DeleteSnapshotScheduleInput) SetVolumeARN(v string) *DeleteSnapshotScheduleInput { s.VolumeARN = &v return s } type DeleteSnapshotScheduleOutput struct { _ struct{} `type:"structure"` // The volume which snapshot schedule was deleted. VolumeARN *string `min:"50" type:"string"` } // String returns the string representation func (s DeleteSnapshotScheduleOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteSnapshotScheduleOutput) GoString() string { return s.String() } // SetVolumeARN sets the VolumeARN field's value. func (s *DeleteSnapshotScheduleOutput) SetVolumeARN(v string) *DeleteSnapshotScheduleOutput { s.VolumeARN = &v return s } // DeleteTapeArchiveInput type DeleteTapeArchiveInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the virtual tape to delete from the virtual // tape shelf (VTS). // // TapeARN is a required field TapeARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DeleteTapeArchiveInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteTapeArchiveInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteTapeArchiveInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteTapeArchiveInput"} if s.TapeARN == nil { invalidParams.Add(request.NewErrParamRequired("TapeARN")) } if s.TapeARN != nil && len(*s.TapeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("TapeARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetTapeARN sets the TapeARN field's value. func (s *DeleteTapeArchiveInput) SetTapeARN(v string) *DeleteTapeArchiveInput { s.TapeARN = &v return s } // DeleteTapeArchiveOutput type DeleteTapeArchiveOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the virtual tape that was deleted from // the virtual tape shelf (VTS). TapeARN *string `min:"50" type:"string"` } // String returns the string representation func (s DeleteTapeArchiveOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteTapeArchiveOutput) GoString() string { return s.String() } // SetTapeARN sets the TapeARN field's value. func (s *DeleteTapeArchiveOutput) SetTapeARN(v string) *DeleteTapeArchiveOutput { s.TapeARN = &v return s } // DeleteTapeInput type DeleteTapeInput struct { _ struct{} `type:"structure"` // The unique Amazon Resource Name (ARN) of the gateway that the virtual tape // to delete is associated with. Use the ListGateways operation to return a // list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // The Amazon Resource Name (ARN) of the virtual tape to delete. // // TapeARN is a required field TapeARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DeleteTapeInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteTapeInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteTapeInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteTapeInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.TapeARN == nil { invalidParams.Add(request.NewErrParamRequired("TapeARN")) } if s.TapeARN != nil && len(*s.TapeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("TapeARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *DeleteTapeInput) SetGatewayARN(v string) *DeleteTapeInput { s.GatewayARN = &v return s } // SetTapeARN sets the TapeARN field's value. func (s *DeleteTapeInput) SetTapeARN(v string) *DeleteTapeInput { s.TapeARN = &v return s } // DeleteTapeOutput type DeleteTapeOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the deleted virtual tape. TapeARN *string `min:"50" type:"string"` } // String returns the string representation func (s DeleteTapeOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteTapeOutput) GoString() string { return s.String() } // SetTapeARN sets the TapeARN field's value. func (s *DeleteTapeOutput) SetTapeARN(v string) *DeleteTapeOutput { s.TapeARN = &v return s } // A JSON object containing the DeleteVolumeInput$VolumeARN to delete. type DeleteVolumeInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation // to return a list of gateway volumes. // // VolumeARN is a required field VolumeARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DeleteVolumeInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteVolumeInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteVolumeInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteVolumeInput"} if s.VolumeARN == nil { invalidParams.Add(request.NewErrParamRequired("VolumeARN")) } if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetVolumeARN sets the VolumeARN field's value. func (s *DeleteVolumeInput) SetVolumeARN(v string) *DeleteVolumeInput { s.VolumeARN = &v return s } // A JSON object containing the Amazon Resource Name (ARN) of the storage volume // that was deleted. type DeleteVolumeOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the storage volume that was deleted. It // is the same ARN you provided in the request. VolumeARN *string `min:"50" type:"string"` } // String returns the string representation func (s DeleteVolumeOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteVolumeOutput) GoString() string { return s.String() } // SetVolumeARN sets the VolumeARN field's value. func (s *DeleteVolumeOutput) SetVolumeARN(v string) *DeleteVolumeOutput { s.VolumeARN = &v return s } type DescribeAvailabilityMonitorTestInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DescribeAvailabilityMonitorTestInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeAvailabilityMonitorTestInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeAvailabilityMonitorTestInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeAvailabilityMonitorTestInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeAvailabilityMonitorTestInput) SetGatewayARN(v string) *DescribeAvailabilityMonitorTestInput { s.GatewayARN = &v return s } type DescribeAvailabilityMonitorTestOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // The time the High Availability monitoring test was started. If a test hasn't // been performed, the value of this field is null. StartTime *time.Time `type:"timestamp"` // The status of the High Availability monitoring test. If a test hasn't been // performed, the value of this field is null. Status *string `type:"string" enum:"AvailabilityMonitorTestStatus"` } // String returns the string representation func (s DescribeAvailabilityMonitorTestOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeAvailabilityMonitorTestOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeAvailabilityMonitorTestOutput) SetGatewayARN(v string) *DescribeAvailabilityMonitorTestOutput { s.GatewayARN = &v return s } // SetStartTime sets the StartTime field's value. func (s *DescribeAvailabilityMonitorTestOutput) SetStartTime(v time.Time) *DescribeAvailabilityMonitorTestOutput { s.StartTime = &v return s } // SetStatus sets the Status field's value. func (s *DescribeAvailabilityMonitorTestOutput) SetStatus(v string) *DescribeAvailabilityMonitorTestOutput { s.Status = &v return s } // A JSON object containing the Amazon Resource Name (ARN) of the gateway. type DescribeBandwidthRateLimitInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DescribeBandwidthRateLimitInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeBandwidthRateLimitInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeBandwidthRateLimitInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeBandwidthRateLimitInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeBandwidthRateLimitInput) SetGatewayARN(v string) *DescribeBandwidthRateLimitInput { s.GatewayARN = &v return s } // A JSON object containing the following fields: type DescribeBandwidthRateLimitOutput struct { _ struct{} `type:"structure"` // The average download bandwidth rate limit in bits per second. This field // does not appear in the response if the download rate limit is not set. AverageDownloadRateLimitInBitsPerSec *int64 `min:"102400" type:"long"` // The average upload bandwidth rate limit in bits per second. This field does // not appear in the response if the upload rate limit is not set. AverageUploadRateLimitInBitsPerSec *int64 `min:"51200" type:"long"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s DescribeBandwidthRateLimitOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeBandwidthRateLimitOutput) GoString() string { return s.String() } // SetAverageDownloadRateLimitInBitsPerSec sets the AverageDownloadRateLimitInBitsPerSec field's value. func (s *DescribeBandwidthRateLimitOutput) SetAverageDownloadRateLimitInBitsPerSec(v int64) *DescribeBandwidthRateLimitOutput { s.AverageDownloadRateLimitInBitsPerSec = &v return s } // SetAverageUploadRateLimitInBitsPerSec sets the AverageUploadRateLimitInBitsPerSec field's value. func (s *DescribeBandwidthRateLimitOutput) SetAverageUploadRateLimitInBitsPerSec(v int64) *DescribeBandwidthRateLimitOutput { s.AverageUploadRateLimitInBitsPerSec = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeBandwidthRateLimitOutput) SetGatewayARN(v string) *DescribeBandwidthRateLimitOutput { s.GatewayARN = &v return s } type DescribeCacheInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DescribeCacheInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeCacheInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeCacheInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeCacheInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeCacheInput) SetGatewayARN(v string) *DescribeCacheInput { s.GatewayARN = &v return s } type DescribeCacheOutput struct { _ struct{} `type:"structure"` // The amount of cache in bytes allocated to a gateway. CacheAllocatedInBytes *int64 `type:"long"` // The file share's contribution to the overall percentage of the gateway's // cache that has not been persisted to AWS. The sample is taken at the end // of the reporting period. CacheDirtyPercentage *float64 `type:"double"` // Percent of application read operations from the file shares that are served // from cache. The sample is taken at the end of the reporting period. CacheHitPercentage *float64 `type:"double"` // Percent of application read operations from the file shares that are not // served from cache. The sample is taken at the end of the reporting period. CacheMissPercentage *float64 `type:"double"` // Percent use of the gateway's cache storage. This metric applies only to the // gateway-cached volume setup. The sample is taken at the end of the reporting // period. CacheUsedPercentage *float64 `type:"double"` // An array of strings that identify disks that are to be configured as working // storage. Each string has a minimum length of 1 and maximum length of 300. // You can get the disk IDs from the ListLocalDisks API. DiskIds []*string `type:"list"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s DescribeCacheOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeCacheOutput) GoString() string { return s.String() } // SetCacheAllocatedInBytes sets the CacheAllocatedInBytes field's value. func (s *DescribeCacheOutput) SetCacheAllocatedInBytes(v int64) *DescribeCacheOutput { s.CacheAllocatedInBytes = &v return s } // SetCacheDirtyPercentage sets the CacheDirtyPercentage field's value. func (s *DescribeCacheOutput) SetCacheDirtyPercentage(v float64) *DescribeCacheOutput { s.CacheDirtyPercentage = &v return s } // SetCacheHitPercentage sets the CacheHitPercentage field's value. func (s *DescribeCacheOutput) SetCacheHitPercentage(v float64) *DescribeCacheOutput { s.CacheHitPercentage = &v return s } // SetCacheMissPercentage sets the CacheMissPercentage field's value. func (s *DescribeCacheOutput) SetCacheMissPercentage(v float64) *DescribeCacheOutput { s.CacheMissPercentage = &v return s } // SetCacheUsedPercentage sets the CacheUsedPercentage field's value. func (s *DescribeCacheOutput) SetCacheUsedPercentage(v float64) *DescribeCacheOutput { s.CacheUsedPercentage = &v return s } // SetDiskIds sets the DiskIds field's value. func (s *DescribeCacheOutput) SetDiskIds(v []*string) *DescribeCacheOutput { s.DiskIds = v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeCacheOutput) SetGatewayARN(v string) *DescribeCacheOutput { s.GatewayARN = &v return s } type DescribeCachediSCSIVolumesInput struct { _ struct{} `type:"structure"` // An array of strings where each string represents the Amazon Resource Name // (ARN) of a cached volume. All of the specified cached volumes must be from // the same gateway. Use ListVolumes to get volume ARNs for a gateway. // // VolumeARNs is a required field VolumeARNs []*string `type:"list" required:"true"` } // String returns the string representation func (s DescribeCachediSCSIVolumesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeCachediSCSIVolumesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeCachediSCSIVolumesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeCachediSCSIVolumesInput"} if s.VolumeARNs == nil { invalidParams.Add(request.NewErrParamRequired("VolumeARNs")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetVolumeARNs sets the VolumeARNs field's value. func (s *DescribeCachediSCSIVolumesInput) SetVolumeARNs(v []*string) *DescribeCachediSCSIVolumesInput { s.VolumeARNs = v return s } // A JSON object containing the following fields: type DescribeCachediSCSIVolumesOutput struct { _ struct{} `type:"structure"` // An array of objects where each object contains metadata about one cached // volume. CachediSCSIVolumes []*CachediSCSIVolume `type:"list"` } // String returns the string representation func (s DescribeCachediSCSIVolumesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeCachediSCSIVolumesOutput) GoString() string { return s.String() } // SetCachediSCSIVolumes sets the CachediSCSIVolumes field's value. func (s *DescribeCachediSCSIVolumesOutput) SetCachediSCSIVolumes(v []*CachediSCSIVolume) *DescribeCachediSCSIVolumesOutput { s.CachediSCSIVolumes = v return s } // A JSON object containing the Amazon Resource Name (ARN) of the iSCSI volume // target. type DescribeChapCredentialsInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes // operation to return to retrieve the TargetARN for specified VolumeARN. // // TargetARN is a required field TargetARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DescribeChapCredentialsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeChapCredentialsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeChapCredentialsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeChapCredentialsInput"} if s.TargetARN == nil { invalidParams.Add(request.NewErrParamRequired("TargetARN")) } if s.TargetARN != nil && len(*s.TargetARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("TargetARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetTargetARN sets the TargetARN field's value. func (s *DescribeChapCredentialsInput) SetTargetARN(v string) *DescribeChapCredentialsInput { s.TargetARN = &v return s } // A JSON object containing the following fields: type DescribeChapCredentialsOutput struct { _ struct{} `type:"structure"` // An array of ChapInfo objects that represent CHAP credentials. Each object // in the array contains CHAP credential information for one target-initiator // pair. If no CHAP credentials are set, an empty array is returned. CHAP credential // information is provided in a JSON object with the following fields: // // * InitiatorName: The iSCSI initiator that connects to the target. // // * SecretToAuthenticateInitiator: The secret key that the initiator (for // example, the Windows client) must provide to participate in mutual CHAP // with the target. // // * SecretToAuthenticateTarget: The secret key that the target must provide // to participate in mutual CHAP with the initiator (e.g. Windows client). // // * TargetARN: The Amazon Resource Name (ARN) of the storage volume. ChapCredentials []*ChapInfo `type:"list"` } // String returns the string representation func (s DescribeChapCredentialsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeChapCredentialsOutput) GoString() string { return s.String() } // SetChapCredentials sets the ChapCredentials field's value. func (s *DescribeChapCredentialsOutput) SetChapCredentials(v []*ChapInfo) *DescribeChapCredentialsOutput { s.ChapCredentials = v return s } // A JSON object containing the ID of the gateway. type DescribeGatewayInformationInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DescribeGatewayInformationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeGatewayInformationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeGatewayInformationInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeGatewayInformationInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeGatewayInformationInput) SetGatewayARN(v string) *DescribeGatewayInformationInput { s.GatewayARN = &v return s } // A JSON object containing the following fields: type DescribeGatewayInformationOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the Amazon CloudWatch Log Group that is // used to monitor events in the gateway. CloudWatchLogGroupARN *string `type:"string"` // The ID of the Amazon EC2 instance that was used to launch the gateway. Ec2InstanceId *string `type:"string"` // The AWS Region where the Amazon EC2 instance is located. Ec2InstanceRegion *string `type:"string"` // The type of endpoint for your gateway. // // Valid Values: STANDARD | FIPS EndpointType *string `min:"4" type:"string"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // The unique identifier assigned to your gateway during activation. This ID // becomes part of the gateway Amazon Resource Name (ARN), which you use as // input for other operations. GatewayId *string `min:"12" type:"string"` // The name you configured for your gateway. GatewayName *string `type:"string"` // A NetworkInterface array that contains descriptions of the gateway network // interfaces. GatewayNetworkInterfaces []*NetworkInterface `type:"list"` // A value that indicates the operating state of the gateway. GatewayState *string `min:"2" type:"string"` // A value that indicates the time zone configured for the gateway. GatewayTimezone *string `min:"3" type:"string"` // The type of the gateway. GatewayType *string `min:"2" type:"string"` // The type of hypervisor environment used by the host. HostEnvironment *string `type:"string" enum:"HostEnvironment"` // The date on which the last software update was applied to the gateway. If // the gateway has never been updated, this field does not return a value in // the response. LastSoftwareUpdate *string `min:"1" type:"string"` // The date on which an update to the gateway is available. This date is in // the time zone of the gateway. If the gateway is not available for an update // this field is not returned in the response. NextUpdateAvailabilityDate *string `min:"1" type:"string"` // A list of up to 50 tags assigned to the gateway, sorted alphabetically by // key name. Each tag is a key-value pair. For a gateway with more than 10 tags // assigned, you can view all tags using the ListTagsForResource API operation. Tags []*Tag `type:"list"` // The configuration settings for the virtual private cloud (VPC) endpoint for // your gateway. VPCEndpoint *string `type:"string"` } // String returns the string representation func (s DescribeGatewayInformationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeGatewayInformationOutput) GoString() string { return s.String() } // SetCloudWatchLogGroupARN sets the CloudWatchLogGroupARN field's value. func (s *DescribeGatewayInformationOutput) SetCloudWatchLogGroupARN(v string) *DescribeGatewayInformationOutput { s.CloudWatchLogGroupARN = &v return s } // SetEc2InstanceId sets the Ec2InstanceId field's value. func (s *DescribeGatewayInformationOutput) SetEc2InstanceId(v string) *DescribeGatewayInformationOutput { s.Ec2InstanceId = &v return s } // SetEc2InstanceRegion sets the Ec2InstanceRegion field's value. func (s *DescribeGatewayInformationOutput) SetEc2InstanceRegion(v string) *DescribeGatewayInformationOutput { s.Ec2InstanceRegion = &v return s } // SetEndpointType sets the EndpointType field's value. func (s *DescribeGatewayInformationOutput) SetEndpointType(v string) *DescribeGatewayInformationOutput { s.EndpointType = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeGatewayInformationOutput) SetGatewayARN(v string) *DescribeGatewayInformationOutput { s.GatewayARN = &v return s } // SetGatewayId sets the GatewayId field's value. func (s *DescribeGatewayInformationOutput) SetGatewayId(v string) *DescribeGatewayInformationOutput { s.GatewayId = &v return s } // SetGatewayName sets the GatewayName field's value. func (s *DescribeGatewayInformationOutput) SetGatewayName(v string) *DescribeGatewayInformationOutput { s.GatewayName = &v return s } // SetGatewayNetworkInterfaces sets the GatewayNetworkInterfaces field's value. func (s *DescribeGatewayInformationOutput) SetGatewayNetworkInterfaces(v []*NetworkInterface) *DescribeGatewayInformationOutput { s.GatewayNetworkInterfaces = v return s } // SetGatewayState sets the GatewayState field's value. func (s *DescribeGatewayInformationOutput) SetGatewayState(v string) *DescribeGatewayInformationOutput { s.GatewayState = &v return s } // SetGatewayTimezone sets the GatewayTimezone field's value. func (s *DescribeGatewayInformationOutput) SetGatewayTimezone(v string) *DescribeGatewayInformationOutput { s.GatewayTimezone = &v return s } // SetGatewayType sets the GatewayType field's value. func (s *DescribeGatewayInformationOutput) SetGatewayType(v string) *DescribeGatewayInformationOutput { s.GatewayType = &v return s } // SetHostEnvironment sets the HostEnvironment field's value. func (s *DescribeGatewayInformationOutput) SetHostEnvironment(v string) *DescribeGatewayInformationOutput { s.HostEnvironment = &v return s } // SetLastSoftwareUpdate sets the LastSoftwareUpdate field's value. func (s *DescribeGatewayInformationOutput) SetLastSoftwareUpdate(v string) *DescribeGatewayInformationOutput { s.LastSoftwareUpdate = &v return s } // SetNextUpdateAvailabilityDate sets the NextUpdateAvailabilityDate field's value. func (s *DescribeGatewayInformationOutput) SetNextUpdateAvailabilityDate(v string) *DescribeGatewayInformationOutput { s.NextUpdateAvailabilityDate = &v return s } // SetTags sets the Tags field's value. func (s *DescribeGatewayInformationOutput) SetTags(v []*Tag) *DescribeGatewayInformationOutput { s.Tags = v return s } // SetVPCEndpoint sets the VPCEndpoint field's value. func (s *DescribeGatewayInformationOutput) SetVPCEndpoint(v string) *DescribeGatewayInformationOutput { s.VPCEndpoint = &v return s } // A JSON object containing the Amazon Resource Name (ARN) of the gateway. type DescribeMaintenanceStartTimeInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DescribeMaintenanceStartTimeInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeMaintenanceStartTimeInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeMaintenanceStartTimeInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeMaintenanceStartTimeInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeMaintenanceStartTimeInput) SetGatewayARN(v string) *DescribeMaintenanceStartTimeInput { s.GatewayARN = &v return s } // A JSON object containing the following fields: // // * DescribeMaintenanceStartTimeOutput$DayOfMonth // // * DescribeMaintenanceStartTimeOutput$DayOfWeek // // * DescribeMaintenanceStartTimeOutput$HourOfDay // // * DescribeMaintenanceStartTimeOutput$MinuteOfHour // // * DescribeMaintenanceStartTimeOutput$Timezone type DescribeMaintenanceStartTimeOutput struct { _ struct{} `type:"structure"` // The day of the month component of the maintenance start time represented // as an ordinal number from 1 to 28, where 1 represents the first day of the // month and 28 represents the last day of the month. DayOfMonth *int64 `min:"1" type:"integer"` // An ordinal number between 0 and 6 that represents the day of the week, where // 0 represents Sunday and 6 represents Saturday. The day of week is in the // time zone of the gateway. DayOfWeek *int64 `type:"integer"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // The hour component of the maintenance start time represented as hh, where // hh is the hour (0 to 23). The hour of the day is in the time zone of the // gateway. HourOfDay *int64 `type:"integer"` // The minute component of the maintenance start time represented as mm, where // mm is the minute (0 to 59). The minute of the hour is in the time zone of // the gateway. MinuteOfHour *int64 `type:"integer"` // A value that indicates the time zone that is set for the gateway. The start // time and day of week specified should be in the time zone of the gateway. Timezone *string `min:"3" type:"string"` } // String returns the string representation func (s DescribeMaintenanceStartTimeOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeMaintenanceStartTimeOutput) GoString() string { return s.String() } // SetDayOfMonth sets the DayOfMonth field's value. func (s *DescribeMaintenanceStartTimeOutput) SetDayOfMonth(v int64) *DescribeMaintenanceStartTimeOutput { s.DayOfMonth = &v return s } // SetDayOfWeek sets the DayOfWeek field's value. func (s *DescribeMaintenanceStartTimeOutput) SetDayOfWeek(v int64) *DescribeMaintenanceStartTimeOutput { s.DayOfWeek = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeMaintenanceStartTimeOutput) SetGatewayARN(v string) *DescribeMaintenanceStartTimeOutput { s.GatewayARN = &v return s } // SetHourOfDay sets the HourOfDay field's value. func (s *DescribeMaintenanceStartTimeOutput) SetHourOfDay(v int64) *DescribeMaintenanceStartTimeOutput { s.HourOfDay = &v return s } // SetMinuteOfHour sets the MinuteOfHour field's value. func (s *DescribeMaintenanceStartTimeOutput) SetMinuteOfHour(v int64) *DescribeMaintenanceStartTimeOutput { s.MinuteOfHour = &v return s } // SetTimezone sets the Timezone field's value. func (s *DescribeMaintenanceStartTimeOutput) SetTimezone(v string) *DescribeMaintenanceStartTimeOutput { s.Timezone = &v return s } // DescribeNFSFileSharesInput type DescribeNFSFileSharesInput struct { _ struct{} `type:"structure"` // An array containing the Amazon Resource Name (ARN) of each file share to // be described. // // FileShareARNList is a required field FileShareARNList []*string `min:"1" type:"list" required:"true"` } // String returns the string representation func (s DescribeNFSFileSharesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeNFSFileSharesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeNFSFileSharesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeNFSFileSharesInput"} if s.FileShareARNList == nil { invalidParams.Add(request.NewErrParamRequired("FileShareARNList")) } if s.FileShareARNList != nil && len(s.FileShareARNList) < 1 { invalidParams.Add(request.NewErrParamMinLen("FileShareARNList", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetFileShareARNList sets the FileShareARNList field's value. func (s *DescribeNFSFileSharesInput) SetFileShareARNList(v []*string) *DescribeNFSFileSharesInput { s.FileShareARNList = v return s } // DescribeNFSFileSharesOutput type DescribeNFSFileSharesOutput struct { _ struct{} `type:"structure"` // An array containing a description for each requested file share. NFSFileShareInfoList []*NFSFileShareInfo `type:"list"` } // String returns the string representation func (s DescribeNFSFileSharesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeNFSFileSharesOutput) GoString() string { return s.String() } // SetNFSFileShareInfoList sets the NFSFileShareInfoList field's value. func (s *DescribeNFSFileSharesOutput) SetNFSFileShareInfoList(v []*NFSFileShareInfo) *DescribeNFSFileSharesOutput { s.NFSFileShareInfoList = v return s } // DescribeSMBFileSharesInput type DescribeSMBFileSharesInput struct { _ struct{} `type:"structure"` // An array containing the Amazon Resource Name (ARN) of each file share to // be described. // // FileShareARNList is a required field FileShareARNList []*string `min:"1" type:"list" required:"true"` } // String returns the string representation func (s DescribeSMBFileSharesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeSMBFileSharesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeSMBFileSharesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeSMBFileSharesInput"} if s.FileShareARNList == nil { invalidParams.Add(request.NewErrParamRequired("FileShareARNList")) } if s.FileShareARNList != nil && len(s.FileShareARNList) < 1 { invalidParams.Add(request.NewErrParamMinLen("FileShareARNList", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetFileShareARNList sets the FileShareARNList field's value. func (s *DescribeSMBFileSharesInput) SetFileShareARNList(v []*string) *DescribeSMBFileSharesInput { s.FileShareARNList = v return s } // DescribeSMBFileSharesOutput type DescribeSMBFileSharesOutput struct { _ struct{} `type:"structure"` // An array containing a description for each requested file share. SMBFileShareInfoList []*SMBFileShareInfo `type:"list"` } // String returns the string representation func (s DescribeSMBFileSharesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeSMBFileSharesOutput) GoString() string { return s.String() } // SetSMBFileShareInfoList sets the SMBFileShareInfoList field's value. func (s *DescribeSMBFileSharesOutput) SetSMBFileShareInfoList(v []*SMBFileShareInfo) *DescribeSMBFileSharesOutput { s.SMBFileShareInfoList = v return s } type DescribeSMBSettingsInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DescribeSMBSettingsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeSMBSettingsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeSMBSettingsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeSMBSettingsInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeSMBSettingsInput) SetGatewayARN(v string) *DescribeSMBSettingsInput { s.GatewayARN = &v return s } type DescribeSMBSettingsOutput struct { _ struct{} `type:"structure"` // Indicates the status of a gateway that is a member of the Active Directory // domain. // // * ACCESS_DENIED: Indicates that the JoinDomain operation failed due to // an authentication error. // // * DETACHED: Indicates that gateway is not joined to a domain. // // * JOINED: Indicates that the gateway has successfully joined a domain. // // * JOINING: Indicates that a JoinDomain operation is in progress. // // * NETWORK_ERROR: Indicates that JoinDomain operation failed due to a network // or connectivity error. // // * TIMEOUT: Indicates that the JoinDomain operation failed because the // operation didn't complete within the allotted time. // // * UNKNOWN_ERROR: Indicates that the JoinDomain operation failed due to // another type of error. ActiveDirectoryStatus *string `type:"string" enum:"ActiveDirectoryStatus"` // The name of the domain that the gateway is joined to. DomainName *string `min:"1" type:"string"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // This value is true if a password for the guest user smbguest is set, otherwise // false. // // Valid Values: true | false SMBGuestPasswordSet *bool `type:"boolean"` // The type of security strategy that was specified for file gateway. // // * ClientSpecified: If you use this option, requests are established based // on what is negotiated by the client. This option is recommended when you // want to maximize compatibility across different clients in your environment. // // * MandatorySigning: If you use this option, file gateway only allows connections // from SMBv2 or SMBv3 clients that have signing enabled. This option works // with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer. // // * MandatoryEncryption: If you use this option, file gateway only allows // connections from SMBv3 clients that have encryption enabled. This option // is highly recommended for environments that handle sensitive data. This // option works with SMB clients on Microsoft Windows 8, Windows Server 2012 // or newer. SMBSecurityStrategy *string `type:"string" enum:"SMBSecurityStrategy"` } // String returns the string representation func (s DescribeSMBSettingsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeSMBSettingsOutput) GoString() string { return s.String() } // SetActiveDirectoryStatus sets the ActiveDirectoryStatus field's value. func (s *DescribeSMBSettingsOutput) SetActiveDirectoryStatus(v string) *DescribeSMBSettingsOutput { s.ActiveDirectoryStatus = &v return s } // SetDomainName sets the DomainName field's value. func (s *DescribeSMBSettingsOutput) SetDomainName(v string) *DescribeSMBSettingsOutput { s.DomainName = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeSMBSettingsOutput) SetGatewayARN(v string) *DescribeSMBSettingsOutput { s.GatewayARN = &v return s } // SetSMBGuestPasswordSet sets the SMBGuestPasswordSet field's value. func (s *DescribeSMBSettingsOutput) SetSMBGuestPasswordSet(v bool) *DescribeSMBSettingsOutput { s.SMBGuestPasswordSet = &v return s } // SetSMBSecurityStrategy sets the SMBSecurityStrategy field's value. func (s *DescribeSMBSettingsOutput) SetSMBSecurityStrategy(v string) *DescribeSMBSettingsOutput { s.SMBSecurityStrategy = &v return s } // A JSON object containing the DescribeSnapshotScheduleInput$VolumeARN of the // volume. type DescribeSnapshotScheduleInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation // to return a list of gateway volumes. // // VolumeARN is a required field VolumeARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DescribeSnapshotScheduleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeSnapshotScheduleInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeSnapshotScheduleInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeSnapshotScheduleInput"} if s.VolumeARN == nil { invalidParams.Add(request.NewErrParamRequired("VolumeARN")) } if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetVolumeARN sets the VolumeARN field's value. func (s *DescribeSnapshotScheduleInput) SetVolumeARN(v string) *DescribeSnapshotScheduleInput { s.VolumeARN = &v return s } type DescribeSnapshotScheduleOutput struct { _ struct{} `type:"structure"` // The snapshot description. Description *string `min:"1" type:"string"` // The number of hours between snapshots. RecurrenceInHours *int64 `min:"1" type:"integer"` // The hour of the day at which the snapshot schedule begins represented as // hh, where hh is the hour (0 to 23). The hour of the day is in the time zone // of the gateway. StartAt *int64 `type:"integer"` // A list of up to 50 tags assigned to the snapshot schedule, sorted alphabetically // by key name. Each tag is a key-value pair. For a gateway with more than 10 // tags assigned, you can view all tags using the ListTagsForResource API operation. Tags []*Tag `type:"list"` // A value that indicates the time zone of the gateway. Timezone *string `min:"3" type:"string"` // The Amazon Resource Name (ARN) of the volume that was specified in the request. VolumeARN *string `min:"50" type:"string"` } // String returns the string representation func (s DescribeSnapshotScheduleOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeSnapshotScheduleOutput) GoString() string { return s.String() } // SetDescription sets the Description field's value. func (s *DescribeSnapshotScheduleOutput) SetDescription(v string) *DescribeSnapshotScheduleOutput { s.Description = &v return s } // SetRecurrenceInHours sets the RecurrenceInHours field's value. func (s *DescribeSnapshotScheduleOutput) SetRecurrenceInHours(v int64) *DescribeSnapshotScheduleOutput { s.RecurrenceInHours = &v return s } // SetStartAt sets the StartAt field's value. func (s *DescribeSnapshotScheduleOutput) SetStartAt(v int64) *DescribeSnapshotScheduleOutput { s.StartAt = &v return s } // SetTags sets the Tags field's value. func (s *DescribeSnapshotScheduleOutput) SetTags(v []*Tag) *DescribeSnapshotScheduleOutput { s.Tags = v return s } // SetTimezone sets the Timezone field's value. func (s *DescribeSnapshotScheduleOutput) SetTimezone(v string) *DescribeSnapshotScheduleOutput { s.Timezone = &v return s } // SetVolumeARN sets the VolumeARN field's value. func (s *DescribeSnapshotScheduleOutput) SetVolumeARN(v string) *DescribeSnapshotScheduleOutput { s.VolumeARN = &v return s } // A JSON object containing a list of DescribeStorediSCSIVolumesInput$VolumeARNs. type DescribeStorediSCSIVolumesInput struct { _ struct{} `type:"structure"` // An array of strings where each string represents the Amazon Resource Name // (ARN) of a stored volume. All of the specified stored volumes must be from // the same gateway. Use ListVolumes to get volume ARNs for a gateway. // // VolumeARNs is a required field VolumeARNs []*string `type:"list" required:"true"` } // String returns the string representation func (s DescribeStorediSCSIVolumesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeStorediSCSIVolumesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeStorediSCSIVolumesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeStorediSCSIVolumesInput"} if s.VolumeARNs == nil { invalidParams.Add(request.NewErrParamRequired("VolumeARNs")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetVolumeARNs sets the VolumeARNs field's value. func (s *DescribeStorediSCSIVolumesInput) SetVolumeARNs(v []*string) *DescribeStorediSCSIVolumesInput { s.VolumeARNs = v return s } type DescribeStorediSCSIVolumesOutput struct { _ struct{} `type:"structure"` // Describes a single unit of output from DescribeStorediSCSIVolumes. The following // fields are returned: // // * ChapEnabled: Indicates whether mutual CHAP is enabled for the iSCSI // target. // // * LunNumber: The logical disk number. // // * NetworkInterfaceId: The network interface ID of the stored volume that // initiator use to map the stored volume as an iSCSI target. // // * NetworkInterfacePort: The port used to communicate with iSCSI targets. // // * PreservedExistingData: Indicates when the stored volume was created, // existing data on the underlying local disk was preserved. // // * SourceSnapshotId: If the stored volume was created from a snapshot, // this field contains the snapshot ID used, e.g. snap-1122aabb. Otherwise, // this field is not included. // // * StorediSCSIVolumes: An array of StorediSCSIVolume objects where each // object contains metadata about one stored volume. // // * TargetARN: The Amazon Resource Name (ARN) of the volume target. // // * VolumeARN: The Amazon Resource Name (ARN) of the stored volume. // // * VolumeDiskId: The disk ID of the local disk that was specified in the // CreateStorediSCSIVolume operation. // // * VolumeId: The unique identifier of the storage volume, e.g. vol-1122AABB. // // * VolumeiSCSIAttributes: An VolumeiSCSIAttributes object that represents // a collection of iSCSI attributes for one stored volume. // // * VolumeProgress: Represents the percentage complete if the volume is // restoring or bootstrapping that represents the percent of data transferred. // This field does not appear in the response if the stored volume is not // restoring or bootstrapping. // // * VolumeSizeInBytes: The size of the volume in bytes. // // * VolumeStatus: One of the VolumeStatus values that indicates the state // of the volume. // // * VolumeType: One of the enumeration values describing the type of the // volume. Currently, only STORED volumes are supported. StorediSCSIVolumes []*StorediSCSIVolume `type:"list"` } // String returns the string representation func (s DescribeStorediSCSIVolumesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeStorediSCSIVolumesOutput) GoString() string { return s.String() } // SetStorediSCSIVolumes sets the StorediSCSIVolumes field's value. func (s *DescribeStorediSCSIVolumesOutput) SetStorediSCSIVolumes(v []*StorediSCSIVolume) *DescribeStorediSCSIVolumesOutput { s.StorediSCSIVolumes = v return s } // DescribeTapeArchivesInput type DescribeTapeArchivesInput struct { _ struct{} `type:"structure"` // Specifies that the number of virtual tapes described be limited to the specified // number. Limit *int64 `min:"1" type:"integer"` // An opaque string that indicates the position at which to begin describing // virtual tapes. Marker *string `min:"1" type:"string"` // Specifies one or more unique Amazon Resource Names (ARNs) that represent // the virtual tapes you want to describe. TapeARNs []*string `type:"list"` } // String returns the string representation func (s DescribeTapeArchivesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeTapeArchivesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeTapeArchivesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeTapeArchivesInput"} if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } if s.Marker != nil && len(*s.Marker) < 1 { invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetLimit sets the Limit field's value. func (s *DescribeTapeArchivesInput) SetLimit(v int64) *DescribeTapeArchivesInput { s.Limit = &v return s } // SetMarker sets the Marker field's value. func (s *DescribeTapeArchivesInput) SetMarker(v string) *DescribeTapeArchivesInput { s.Marker = &v return s } // SetTapeARNs sets the TapeARNs field's value. func (s *DescribeTapeArchivesInput) SetTapeARNs(v []*string) *DescribeTapeArchivesInput { s.TapeARNs = v return s } // DescribeTapeArchivesOutput type DescribeTapeArchivesOutput struct { _ struct{} `type:"structure"` // An opaque string that indicates the position at which the virtual tapes that // were fetched for description ended. Use this marker in your next request // to fetch the next set of virtual tapes in the virtual tape shelf (VTS). If // there are no more virtual tapes to describe, this field does not appear in // the response. Marker *string `min:"1" type:"string"` // An array of virtual tape objects in the virtual tape shelf (VTS). The description // includes of the Amazon Resource Name (ARN) of the virtual tapes. The information // returned includes the Amazon Resource Names (ARNs) of the tapes, size of // the tapes, status of the tapes, progress of the description, and tape barcode. TapeArchives []*TapeArchive `type:"list"` } // String returns the string representation func (s DescribeTapeArchivesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeTapeArchivesOutput) GoString() string { return s.String() } // SetMarker sets the Marker field's value. func (s *DescribeTapeArchivesOutput) SetMarker(v string) *DescribeTapeArchivesOutput { s.Marker = &v return s } // SetTapeArchives sets the TapeArchives field's value. func (s *DescribeTapeArchivesOutput) SetTapeArchives(v []*TapeArchive) *DescribeTapeArchivesOutput { s.TapeArchives = v return s } // DescribeTapeRecoveryPointsInput type DescribeTapeRecoveryPointsInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // Specifies that the number of virtual tape recovery points that are described // be limited to the specified number. Limit *int64 `min:"1" type:"integer"` // An opaque string that indicates the position at which to begin describing // the virtual tape recovery points. Marker *string `min:"1" type:"string"` } // String returns the string representation func (s DescribeTapeRecoveryPointsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeTapeRecoveryPointsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeTapeRecoveryPointsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeTapeRecoveryPointsInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } if s.Marker != nil && len(*s.Marker) < 1 { invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeTapeRecoveryPointsInput) SetGatewayARN(v string) *DescribeTapeRecoveryPointsInput { s.GatewayARN = &v return s } // SetLimit sets the Limit field's value. func (s *DescribeTapeRecoveryPointsInput) SetLimit(v int64) *DescribeTapeRecoveryPointsInput { s.Limit = &v return s } // SetMarker sets the Marker field's value. func (s *DescribeTapeRecoveryPointsInput) SetMarker(v string) *DescribeTapeRecoveryPointsInput { s.Marker = &v return s } // DescribeTapeRecoveryPointsOutput type DescribeTapeRecoveryPointsOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // An opaque string that indicates the position at which the virtual tape recovery // points that were listed for description ended. // // Use this marker in your next request to list the next set of virtual tape // recovery points in the list. If there are no more recovery points to describe, // this field does not appear in the response. Marker *string `min:"1" type:"string"` // An array of TapeRecoveryPointInfos that are available for the specified gateway. TapeRecoveryPointInfos []*TapeRecoveryPointInfo `type:"list"` } // String returns the string representation func (s DescribeTapeRecoveryPointsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeTapeRecoveryPointsOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeTapeRecoveryPointsOutput) SetGatewayARN(v string) *DescribeTapeRecoveryPointsOutput { s.GatewayARN = &v return s } // SetMarker sets the Marker field's value. func (s *DescribeTapeRecoveryPointsOutput) SetMarker(v string) *DescribeTapeRecoveryPointsOutput { s.Marker = &v return s } // SetTapeRecoveryPointInfos sets the TapeRecoveryPointInfos field's value. func (s *DescribeTapeRecoveryPointsOutput) SetTapeRecoveryPointInfos(v []*TapeRecoveryPointInfo) *DescribeTapeRecoveryPointsOutput { s.TapeRecoveryPointInfos = v return s } // DescribeTapesInput type DescribeTapesInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // Specifies that the number of virtual tapes described be limited to the specified // number. // // Amazon Web Services may impose its own limit, if this field is not set. Limit *int64 `min:"1" type:"integer"` // A marker value, obtained in a previous call to DescribeTapes. This marker // indicates which page of results to retrieve. // // If not specified, the first page of results is retrieved. Marker *string `min:"1" type:"string"` // Specifies one or more unique Amazon Resource Names (ARNs) that represent // the virtual tapes you want to describe. If this parameter is not specified, // Tape gateway returns a description of all virtual tapes associated with the // specified gateway. TapeARNs []*string `type:"list"` } // String returns the string representation func (s DescribeTapesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeTapesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeTapesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeTapesInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } if s.Marker != nil && len(*s.Marker) < 1 { invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeTapesInput) SetGatewayARN(v string) *DescribeTapesInput { s.GatewayARN = &v return s } // SetLimit sets the Limit field's value. func (s *DescribeTapesInput) SetLimit(v int64) *DescribeTapesInput { s.Limit = &v return s } // SetMarker sets the Marker field's value. func (s *DescribeTapesInput) SetMarker(v string) *DescribeTapesInput { s.Marker = &v return s } // SetTapeARNs sets the TapeARNs field's value. func (s *DescribeTapesInput) SetTapeARNs(v []*string) *DescribeTapesInput { s.TapeARNs = v return s } // DescribeTapesOutput type DescribeTapesOutput struct { _ struct{} `type:"structure"` // An opaque string which can be used as part of a subsequent DescribeTapes // call to retrieve the next page of results. // // If a response does not contain a marker, then there are no more results to // be retrieved. Marker *string `min:"1" type:"string"` // An array of virtual tape descriptions. Tapes []*Tape `type:"list"` } // String returns the string representation func (s DescribeTapesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeTapesOutput) GoString() string { return s.String() } // SetMarker sets the Marker field's value. func (s *DescribeTapesOutput) SetMarker(v string) *DescribeTapesOutput { s.Marker = &v return s } // SetTapes sets the Tapes field's value. func (s *DescribeTapesOutput) SetTapes(v []*Tape) *DescribeTapesOutput { s.Tapes = v return s } type DescribeUploadBufferInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DescribeUploadBufferInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeUploadBufferInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeUploadBufferInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeUploadBufferInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeUploadBufferInput) SetGatewayARN(v string) *DescribeUploadBufferInput { s.GatewayARN = &v return s } type DescribeUploadBufferOutput struct { _ struct{} `type:"structure"` // An array of the gateway's local disk IDs that are configured as working storage. // Each local disk ID is specified as a string (minimum length of 1 and maximum // length of 300). If no local disks are configured as working storage, then // the DiskIds array is empty. DiskIds []*string `type:"list"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // The total number of bytes allocated in the gateway's as upload buffer. UploadBufferAllocatedInBytes *int64 `type:"long"` // The total number of bytes being used in the gateway's upload buffer. UploadBufferUsedInBytes *int64 `type:"long"` } // String returns the string representation func (s DescribeUploadBufferOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeUploadBufferOutput) GoString() string { return s.String() } // SetDiskIds sets the DiskIds field's value. func (s *DescribeUploadBufferOutput) SetDiskIds(v []*string) *DescribeUploadBufferOutput { s.DiskIds = v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeUploadBufferOutput) SetGatewayARN(v string) *DescribeUploadBufferOutput { s.GatewayARN = &v return s } // SetUploadBufferAllocatedInBytes sets the UploadBufferAllocatedInBytes field's value. func (s *DescribeUploadBufferOutput) SetUploadBufferAllocatedInBytes(v int64) *DescribeUploadBufferOutput { s.UploadBufferAllocatedInBytes = &v return s } // SetUploadBufferUsedInBytes sets the UploadBufferUsedInBytes field's value. func (s *DescribeUploadBufferOutput) SetUploadBufferUsedInBytes(v int64) *DescribeUploadBufferOutput { s.UploadBufferUsedInBytes = &v return s } // DescribeVTLDevicesInput type DescribeVTLDevicesInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // Specifies that the number of VTL devices described be limited to the specified // number. Limit *int64 `min:"1" type:"integer"` // An opaque string that indicates the position at which to begin describing // the VTL devices. Marker *string `min:"1" type:"string"` // An array of strings, where each string represents the Amazon Resource Name // (ARN) of a VTL device. // // All of the specified VTL devices must be from the same gateway. If no VTL // devices are specified, the result will contain all devices on the specified // gateway. VTLDeviceARNs []*string `type:"list"` } // String returns the string representation func (s DescribeVTLDevicesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeVTLDevicesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeVTLDevicesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeVTLDevicesInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } if s.Marker != nil && len(*s.Marker) < 1 { invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeVTLDevicesInput) SetGatewayARN(v string) *DescribeVTLDevicesInput { s.GatewayARN = &v return s } // SetLimit sets the Limit field's value. func (s *DescribeVTLDevicesInput) SetLimit(v int64) *DescribeVTLDevicesInput { s.Limit = &v return s } // SetMarker sets the Marker field's value. func (s *DescribeVTLDevicesInput) SetMarker(v string) *DescribeVTLDevicesInput { s.Marker = &v return s } // SetVTLDeviceARNs sets the VTLDeviceARNs field's value. func (s *DescribeVTLDevicesInput) SetVTLDeviceARNs(v []*string) *DescribeVTLDevicesInput { s.VTLDeviceARNs = v return s } // DescribeVTLDevicesOutput type DescribeVTLDevicesOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // An opaque string that indicates the position at which the VTL devices that // were fetched for description ended. Use the marker in your next request to // fetch the next set of VTL devices in the list. If there are no more VTL devices // to describe, this field does not appear in the response. Marker *string `min:"1" type:"string"` // An array of VTL device objects composed of the Amazon Resource Name (ARN) // of the VTL devices. VTLDevices []*VTLDevice `type:"list"` } // String returns the string representation func (s DescribeVTLDevicesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeVTLDevicesOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeVTLDevicesOutput) SetGatewayARN(v string) *DescribeVTLDevicesOutput { s.GatewayARN = &v return s } // SetMarker sets the Marker field's value. func (s *DescribeVTLDevicesOutput) SetMarker(v string) *DescribeVTLDevicesOutput { s.Marker = &v return s } // SetVTLDevices sets the VTLDevices field's value. func (s *DescribeVTLDevicesOutput) SetVTLDevices(v []*VTLDevice) *DescribeVTLDevicesOutput { s.VTLDevices = v return s } // A JSON object containing the Amazon Resource Name (ARN) of the gateway. type DescribeWorkingStorageInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DescribeWorkingStorageInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeWorkingStorageInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeWorkingStorageInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeWorkingStorageInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeWorkingStorageInput) SetGatewayARN(v string) *DescribeWorkingStorageInput { s.GatewayARN = &v return s } // A JSON object containing the following fields: type DescribeWorkingStorageOutput struct { _ struct{} `type:"structure"` // An array of the gateway's local disk IDs that are configured as working storage. // Each local disk ID is specified as a string (minimum length of 1 and maximum // length of 300). If no local disks are configured as working storage, then // the DiskIds array is empty. DiskIds []*string `type:"list"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // The total working storage in bytes allocated for the gateway. If no working // storage is configured for the gateway, this field returns 0. WorkingStorageAllocatedInBytes *int64 `type:"long"` // The total working storage in bytes in use by the gateway. If no working storage // is configured for the gateway, this field returns 0. WorkingStorageUsedInBytes *int64 `type:"long"` } // String returns the string representation func (s DescribeWorkingStorageOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DescribeWorkingStorageOutput) GoString() string { return s.String() } // SetDiskIds sets the DiskIds field's value. func (s *DescribeWorkingStorageOutput) SetDiskIds(v []*string) *DescribeWorkingStorageOutput { s.DiskIds = v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeWorkingStorageOutput) SetGatewayARN(v string) *DescribeWorkingStorageOutput { s.GatewayARN = &v return s } // SetWorkingStorageAllocatedInBytes sets the WorkingStorageAllocatedInBytes field's value. func (s *DescribeWorkingStorageOutput) SetWorkingStorageAllocatedInBytes(v int64) *DescribeWorkingStorageOutput { s.WorkingStorageAllocatedInBytes = &v return s } // SetWorkingStorageUsedInBytes sets the WorkingStorageUsedInBytes field's value. func (s *DescribeWorkingStorageOutput) SetWorkingStorageUsedInBytes(v int64) *DescribeWorkingStorageOutput { s.WorkingStorageUsedInBytes = &v return s } // AttachVolumeInput type DetachVolumeInput struct { _ struct{} `type:"structure"` // Set to true to forcibly remove the iSCSI connection of the target volume // and detach the volume. The default is false. If this value is set to false, // you must manually disconnect the iSCSI connection from the target volume. // // Valid Values: true | false ForceDetach *bool `type:"boolean"` // The Amazon Resource Name (ARN) of the volume to detach from the gateway. // // VolumeARN is a required field VolumeARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DetachVolumeInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DetachVolumeInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DetachVolumeInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DetachVolumeInput"} if s.VolumeARN == nil { invalidParams.Add(request.NewErrParamRequired("VolumeARN")) } if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetForceDetach sets the ForceDetach field's value. func (s *DetachVolumeInput) SetForceDetach(v bool) *DetachVolumeInput { s.ForceDetach = &v return s } // SetVolumeARN sets the VolumeARN field's value. func (s *DetachVolumeInput) SetVolumeARN(v string) *DetachVolumeInput { s.VolumeARN = &v return s } // AttachVolumeOutput type DetachVolumeOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the volume that was detached. VolumeARN *string `min:"50" type:"string"` } // String returns the string representation func (s DetachVolumeOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DetachVolumeOutput) GoString() string { return s.String() } // SetVolumeARN sets the VolumeARN field's value. func (s *DetachVolumeOutput) SetVolumeARN(v string) *DetachVolumeOutput { s.VolumeARN = &v return s } // Lists iSCSI information about a VTL device. type DeviceiSCSIAttributes struct { _ struct{} `type:"structure"` // Indicates whether mutual CHAP is enabled for the iSCSI target. ChapEnabled *bool `type:"boolean"` // The network interface identifier of the VTL device. NetworkInterfaceId *string `type:"string"` // The port used to communicate with iSCSI VTL device targets. NetworkInterfacePort *int64 `type:"integer"` // Specifies the unique Amazon Resource Name (ARN) that encodes the iSCSI qualified // name(iqn) of a tape drive or media changer target. TargetARN *string `min:"50" type:"string"` } // String returns the string representation func (s DeviceiSCSIAttributes) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeviceiSCSIAttributes) GoString() string { return s.String() } // SetChapEnabled sets the ChapEnabled field's value. func (s *DeviceiSCSIAttributes) SetChapEnabled(v bool) *DeviceiSCSIAttributes { s.ChapEnabled = &v return s } // SetNetworkInterfaceId sets the NetworkInterfaceId field's value. func (s *DeviceiSCSIAttributes) SetNetworkInterfaceId(v string) *DeviceiSCSIAttributes { s.NetworkInterfaceId = &v return s } // SetNetworkInterfacePort sets the NetworkInterfacePort field's value. func (s *DeviceiSCSIAttributes) SetNetworkInterfacePort(v int64) *DeviceiSCSIAttributes { s.NetworkInterfacePort = &v return s } // SetTargetARN sets the TargetARN field's value. func (s *DeviceiSCSIAttributes) SetTargetARN(v string) *DeviceiSCSIAttributes { s.TargetARN = &v return s } // DisableGatewayInput type DisableGatewayInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s DisableGatewayInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DisableGatewayInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DisableGatewayInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DisableGatewayInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *DisableGatewayInput) SetGatewayARN(v string) *DisableGatewayInput { s.GatewayARN = &v return s } // DisableGatewayOutput type DisableGatewayOutput struct { _ struct{} `type:"structure"` // The unique Amazon Resource Name (ARN) of the disabled gateway. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s DisableGatewayOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DisableGatewayOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *DisableGatewayOutput) SetGatewayARN(v string) *DisableGatewayOutput { s.GatewayARN = &v return s } // Represents a gateway's local disk. type Disk struct { _ struct{} `type:"structure"` // The iSCSI qualified name (IQN) that is defined for a disk. This field is // not included in the response if the local disk is not defined as an iSCSI // target. The format of this field is targetIqn::LUNNumber::region-volumeId. DiskAllocationResource *string `type:"string"` // One of the DiskAllocationType enumeration values that identifies how a local // disk is used. // // Valid Values: UPLOAD_BUFFER | CACHE_STORAGE DiskAllocationType *string `min:"3" type:"string"` // A list of values that represents attributes of a local disk. DiskAttributeList []*string `type:"list"` // The unique device ID or other distinguishing data that identifies a local // disk. DiskId *string `min:"1" type:"string"` // The device node of a local disk as assigned by the virtualization environment. DiskNode *string `type:"string"` // The path of a local disk in the gateway virtual machine (VM). DiskPath *string `type:"string"` // The local disk size in bytes. DiskSizeInBytes *int64 `type:"long"` // A value that represents the status of a local disk. DiskStatus *string `type:"string"` } // String returns the string representation func (s Disk) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Disk) GoString() string { return s.String() } // SetDiskAllocationResource sets the DiskAllocationResource field's value. func (s *Disk) SetDiskAllocationResource(v string) *Disk { s.DiskAllocationResource = &v return s } // SetDiskAllocationType sets the DiskAllocationType field's value. func (s *Disk) SetDiskAllocationType(v string) *Disk { s.DiskAllocationType = &v return s } // SetDiskAttributeList sets the DiskAttributeList field's value. func (s *Disk) SetDiskAttributeList(v []*string) *Disk { s.DiskAttributeList = v return s } // SetDiskId sets the DiskId field's value. func (s *Disk) SetDiskId(v string) *Disk { s.DiskId = &v return s } // SetDiskNode sets the DiskNode field's value. func (s *Disk) SetDiskNode(v string) *Disk { s.DiskNode = &v return s } // SetDiskPath sets the DiskPath field's value. func (s *Disk) SetDiskPath(v string) *Disk { s.DiskPath = &v return s } // SetDiskSizeInBytes sets the DiskSizeInBytes field's value. func (s *Disk) SetDiskSizeInBytes(v int64) *Disk { s.DiskSizeInBytes = &v return s } // SetDiskStatus sets the DiskStatus field's value. func (s *Disk) SetDiskStatus(v string) *Disk { s.DiskStatus = &v return s } // Provides additional information about an error that was returned by the service. // See the errorCode and errorDetails members for more information about the // error. type Error struct { _ struct{} `type:"structure"` // Additional information about the error. ErrorCode *string `locationName:"errorCode" type:"string" enum:"ErrorCode"` // Human-readable text that provides detail about the error that occurred. ErrorDetails map[string]*string `locationName:"errorDetails" type:"map"` } // String returns the string representation func (s Error) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Error) GoString() string { return s.String() } // SetErrorCode sets the ErrorCode field's value. func (s *Error) SetErrorCode(v string) *Error { s.ErrorCode = &v return s } // SetErrorDetails sets the ErrorDetails field's value. func (s *Error) SetErrorDetails(v map[string]*string) *Error { s.ErrorDetails = v return s } // Describes a file share. type FileShareInfo struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the file share. FileShareARN *string `min:"50" type:"string"` // The ID of the file share. FileShareId *string `min:"12" type:"string"` // The status of the file share. // // Valid Values: CREATING | UPDATING | AVAILABLE | DELETING FileShareStatus *string `min:"3" type:"string"` // The type of the file share. FileShareType *string `type:"string" enum:"FileShareType"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s FileShareInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s FileShareInfo) GoString() string { return s.String() } // SetFileShareARN sets the FileShareARN field's value. func (s *FileShareInfo) SetFileShareARN(v string) *FileShareInfo { s.FileShareARN = &v return s } // SetFileShareId sets the FileShareId field's value. func (s *FileShareInfo) SetFileShareId(v string) *FileShareInfo { s.FileShareId = &v return s } // SetFileShareStatus sets the FileShareStatus field's value. func (s *FileShareInfo) SetFileShareStatus(v string) *FileShareInfo { s.FileShareStatus = &v return s } // SetFileShareType sets the FileShareType field's value. func (s *FileShareInfo) SetFileShareType(v string) *FileShareInfo { s.FileShareType = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *FileShareInfo) SetGatewayARN(v string) *FileShareInfo { s.GatewayARN = &v return s } // Describes a gateway object. type GatewayInfo struct { _ struct{} `type:"structure"` // The ID of the Amazon EC2 instance that was used to launch the gateway. Ec2InstanceId *string `type:"string"` // The AWS Region where the Amazon EC2 instance is located. Ec2InstanceRegion *string `type:"string"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // The unique identifier assigned to your gateway during activation. This ID // becomes part of the gateway Amazon Resource Name (ARN), which you use as // input for other operations. GatewayId *string `min:"12" type:"string"` // The name of the gateway. GatewayName *string `type:"string"` // The state of the gateway. // // Valid Values: DISABLED | ACTIVE GatewayOperationalState *string `min:"2" type:"string"` // The type of the gateway. GatewayType *string `min:"2" type:"string"` } // String returns the string representation func (s GatewayInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GatewayInfo) GoString() string { return s.String() } // SetEc2InstanceId sets the Ec2InstanceId field's value. func (s *GatewayInfo) SetEc2InstanceId(v string) *GatewayInfo { s.Ec2InstanceId = &v return s } // SetEc2InstanceRegion sets the Ec2InstanceRegion field's value. func (s *GatewayInfo) SetEc2InstanceRegion(v string) *GatewayInfo { s.Ec2InstanceRegion = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *GatewayInfo) SetGatewayARN(v string) *GatewayInfo { s.GatewayARN = &v return s } // SetGatewayId sets the GatewayId field's value. func (s *GatewayInfo) SetGatewayId(v string) *GatewayInfo { s.GatewayId = &v return s } // SetGatewayName sets the GatewayName field's value. func (s *GatewayInfo) SetGatewayName(v string) *GatewayInfo { s.GatewayName = &v return s } // SetGatewayOperationalState sets the GatewayOperationalState field's value. func (s *GatewayInfo) SetGatewayOperationalState(v string) *GatewayInfo { s.GatewayOperationalState = &v return s } // SetGatewayType sets the GatewayType field's value. func (s *GatewayInfo) SetGatewayType(v string) *GatewayInfo { s.GatewayType = &v return s } // An internal server error has occurred during the request. For more information, // see the error and message fields. type InternalServerError struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A StorageGatewayError that provides more information about the cause of the // error. Error_ *Error `locationName:"error" type:"structure"` // A human-readable message describing the error that occurred. Message_ *string `locationName:"message" type:"string"` } // String returns the string representation func (s InternalServerError) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s InternalServerError) GoString() string { return s.String() } func newE
rotocol.ResponseMetadata) error { return &InternalServerError{ RespMetadata: v, } } // Code returns the exception type name. func (s *InternalServerError) Code() string { return "InternalServerError" } // Message returns the exception's message. func (s *InternalServerError) Message() string { if s.Message_ != nil { return *s.Message_ } return "" } // OrigErr always returns nil, satisfies awserr.Error interface. func (s *InternalServerError) OrigErr() error { return nil } func (s *InternalServerError) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. func (s *InternalServerError) StatusCode() int { return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. func (s *InternalServerError) RequestID() string { return s.RespMetadata.RequestID } // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. type InvalidGatewayRequestException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A StorageGatewayError that provides more detail about the cause of the error. Error_ *Error `locationName:"error" type:"structure"` // A human-readable message describing the error that occurred. Message_ *string `locationName:"message" type:"string"` } // String returns the string representation func (s InvalidGatewayRequestException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s InvalidGatewayRequestException) GoString() string { return s.String() } func newErrorInvalidGatewayRequestException(v protocol.ResponseMetadata) error { return &InvalidGatewayRequestException{ RespMetadata: v, } } // Code returns the exception type name. func (s *InvalidGatewayRequestException) Code() string { return "InvalidGatewayRequestException" } // Message returns the exception's message. func (s *InvalidGatewayRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } return "" } // OrigErr always returns nil, satisfies awserr.Error interface. func (s *InvalidGatewayRequestException) OrigErr() error { return nil } func (s *InvalidGatewayRequestException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. func (s *InvalidGatewayRequestException) StatusCode() int { return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. func (s *InvalidGatewayRequestException) RequestID() string { return s.RespMetadata.RequestID } // JoinDomainInput type JoinDomainInput struct { _ struct{} `type:"structure"` // List of IPv4 addresses, NetBIOS names, or host names of your domain server. // If you need to specify the port number include it after the colon (“:”). // For example, mydc.mydomain.com:389. DomainControllers []*string `type:"list"` // The name of the domain that you want the gateway to join. // // DomainName is a required field DomainName *string `min:"1" type:"string" required:"true"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // The organizational unit (OU) is a container in an Active Directory that can // hold users, groups, computers, and other OUs and this parameter specifies // the OU that the gateway will join within the AD domain. OrganizationalUnit *string `min:"1" type:"string"` // Sets the password of the user who has permission to add the gateway to the // Active Directory domain. // // Password is a required field Password *string `min:"1" type:"string" required:"true" sensitive:"true"` // Specifies the time in seconds, in which the JoinDomain operation must complete. // The default is 20 seconds. TimeoutInSeconds *int64 `type:"integer"` // Sets the user name of user who has permission to add the gateway to the Active // Directory domain. The domain user account should be enabled to join computers // to the domain. For example, you can use the domain administrator account // or an account with delegated permissions to join computers to the domain. // // UserName is a required field UserName *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s JoinDomainInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s JoinDomainInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *JoinDomainInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "JoinDomainInput"} if s.DomainName == nil { invalidParams.Add(request.NewErrParamRequired("DomainName")) } if s.DomainName != nil && len(*s.DomainName) < 1 { invalidParams.Add(request.NewErrParamMinLen("DomainName", 1)) } if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.OrganizationalUnit != nil && len(*s.OrganizationalUnit) < 1 { invalidParams.Add(request.NewErrParamMinLen("OrganizationalUnit", 1)) } if s.Password == nil { invalidParams.Add(request.NewErrParamRequired("Password")) } if s.Password != nil && len(*s.Password) < 1 { invalidParams.Add(request.NewErrParamMinLen("Password", 1)) } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } if s.UserName != nil && len(*s.UserName) < 1 { invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDomainControllers sets the DomainControllers field's value. func (s *JoinDomainInput) SetDomainControllers(v []*string) *JoinDomainInput { s.DomainControllers = v return s } // SetDomainName sets the DomainName field's value. func (s *JoinDomainInput) SetDomainName(v string) *JoinDomainInput { s.DomainName = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *JoinDomainInput) SetGatewayARN(v string) *JoinDomainInput { s.GatewayARN = &v return s } // SetOrganizationalUnit sets the OrganizationalUnit field's value. func (s *JoinDomainInput) SetOrganizationalUnit(v string) *JoinDomainInput { s.OrganizationalUnit = &v return s } // SetPassword sets the Password field's value. func (s *JoinDomainInput) SetPassword(v string) *JoinDomainInput { s.Password = &v return s } // SetTimeoutInSeconds sets the TimeoutInSeconds field's value. func (s *JoinDomainInput) SetTimeoutInSeconds(v int64) *JoinDomainInput { s.TimeoutInSeconds = &v return s } // SetUserName sets the UserName field's value. func (s *JoinDomainInput) SetUserName(v string) *JoinDomainInput { s.UserName = &v return s } // JoinDomainOutput type JoinDomainOutput struct { _ struct{} `type:"structure"` // Indicates the status of the gateway as a member of the Active Directory domain. // // * ACCESS_DENIED: Indicates that the JoinDomain operation failed due to // an authentication error. // // * DETACHED: Indicates that gateway is not joined to a domain. // // * JOINED: Indicates that the gateway has successfully joined a domain. // // * JOINING: Indicates that a JoinDomain operation is in progress. // // * NETWORK_ERROR: Indicates that JoinDomain operation failed due to a network // or connectivity error. // // * TIMEOUT: Indicates that the JoinDomain operation failed because the // operation didn't complete within the allotted time. // // * UNKNOWN_ERROR: Indicates that the JoinDomain operation failed due to // another type of error. ActiveDirectoryStatus *string `type:"string" enum:"ActiveDirectoryStatus"` // The unique Amazon Resource Name (ARN) of the gateway that joined the domain. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s JoinDomainOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s JoinDomainOutput) GoString() string { return s.String() } // SetActiveDirectoryStatus sets the ActiveDirectoryStatus field's value. func (s *JoinDomainOutput) SetActiveDirectoryStatus(v string) *JoinDomainOutput { s.ActiveDirectoryStatus = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *JoinDomainOutput) SetGatewayARN(v string) *JoinDomainOutput { s.GatewayARN = &v return s } type ListAutomaticTapeCreationPoliciesInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s ListAutomaticTapeCreationPoliciesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListAutomaticTapeCreationPoliciesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ListAutomaticTapeCreationPoliciesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListAutomaticTapeCreationPoliciesInput"} if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *ListAutomaticTapeCreationPoliciesInput) SetGatewayARN(v string) *ListAutomaticTapeCreationPoliciesInput { s.GatewayARN = &v return s } type ListAutomaticTapeCreationPoliciesOutput struct { _ struct{} `type:"structure"` // Gets a listing of information about the gateway's automatic tape creation // policies, including the automatic tape creation rules and the gateway that // is using the policies. AutomaticTapeCreationPolicyInfos []*AutomaticTapeCreationPolicyInfo `type:"list"` } // String returns the string representation func (s ListAutomaticTapeCreationPoliciesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListAutomaticTapeCreationPoliciesOutput) GoString() string { return s.String() } // SetAutomaticTapeCreationPolicyInfos sets the AutomaticTapeCreationPolicyInfos field's value. func (s *ListAutomaticTapeCreationPoliciesOutput) SetAutomaticTapeCreationPolicyInfos(v []*AutomaticTapeCreationPolicyInfo) *ListAutomaticTapeCreationPoliciesOutput { s.AutomaticTapeCreationPolicyInfos = v return s } // ListFileShareInput type ListFileSharesInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway whose file shares you want // to list. If this field is not present, all file shares under your account // are listed. GatewayARN *string `min:"50" type:"string"` // The maximum number of file shares to return in the response. The value must // be an integer with a value greater than zero. Optional. Limit *int64 `min:"1" type:"integer"` // Opaque pagination token returned from a previous ListFileShares operation. // If present, Marker specifies where to continue the list from after a previous // call to ListFileShares. Optional. Marker *string `min:"1" type:"string"` } // String returns the string representation func (s ListFileSharesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListFileSharesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ListFileSharesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListFileSharesInput"} if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } if s.Marker != nil && len(*s.Marker) < 1 { invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *ListFileSharesInput) SetGatewayARN(v string) *ListFileSharesInput { s.GatewayARN = &v return s } // SetLimit sets the Limit field's value. func (s *ListFileSharesInput) SetLimit(v int64) *ListFileSharesInput { s.Limit = &v return s } // SetMarker sets the Marker field's value. func (s *ListFileSharesInput) SetMarker(v string) *ListFileSharesInput { s.Marker = &v return s } // ListFileShareOutput type ListFileSharesOutput struct { _ struct{} `type:"structure"` // An array of information about the file gateway's file shares. FileShareInfoList []*FileShareInfo `type:"list"` // If the request includes Marker, the response returns that value in this field. Marker *string `min:"1" type:"string"` // If a value is present, there are more file shares to return. In a subsequent // request, use NextMarker as the value for Marker to retrieve the next set // of file shares. NextMarker *string `min:"1" type:"string"` } // String returns the string representation func (s ListFileSharesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListFileSharesOutput) GoString() string { return s.String() } // SetFileShareInfoList sets the FileShareInfoList field's value. func (s *ListFileSharesOutput) SetFileShareInfoList(v []*FileShareInfo) *ListFileSharesOutput { s.FileShareInfoList = v return s } // SetMarker sets the Marker field's value. func (s *ListFileSharesOutput) SetMarker(v string) *ListFileSharesOutput { s.Marker = &v return s } // SetNextMarker sets the NextMarker field's value. func (s *ListFileSharesOutput) SetNextMarker(v string) *ListFileSharesOutput { s.NextMarker = &v return s } // A JSON object containing zero or more of the following fields: // // * ListGatewaysInput$Limit // // * ListGatewaysInput$Marker type ListGatewaysInput struct { _ struct{} `type:"structure"` // Specifies that the list of gateways returned be limited to the specified // number of items. Limit *int64 `min:"1" type:"integer"` // An opaque string that indicates the position at which to begin the returned // list of gateways. Marker *string `min:"1" type:"string"` } // String returns the string representation func (s ListGatewaysInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListGatewaysInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ListGatewaysInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListGatewaysInput"} if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } if s.Marker != nil && len(*s.Marker) < 1 { invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetLimit sets the Limit field's value. func (s *ListGatewaysInput) SetLimit(v int64) *ListGatewaysInput { s.Limit = &v return s } // SetMarker sets the Marker field's value. func (s *ListGatewaysInput) SetMarker(v string) *ListGatewaysInput { s.Marker = &v return s } type ListGatewaysOutput struct { _ struct{} `type:"structure"` // An array of GatewayInfo objects. Gateways []*GatewayInfo `type:"list"` // Use the marker in your next request to fetch the next set of gateways in // the list. If there are no more gateways to list, this field does not appear // in the response. Marker *string `min:"1" type:"string"` } // String returns the string representation func (s ListGatewaysOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListGatewaysOutput) GoString() string { return s.String() } // SetGateways sets the Gateways field's value. func (s *ListGatewaysOutput) SetGateways(v []*GatewayInfo) *ListGatewaysOutput { s.Gateways = v return s } // SetMarker sets the Marker field's value. func (s *ListGatewaysOutput) SetMarker(v string) *ListGatewaysOutput { s.Marker = &v return s } // A JSON object containing the Amazon Resource Name (ARN) of the gateway. type ListLocalDisksInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s ListLocalDisksInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListLocalDisksInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ListLocalDisksInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListLocalDisksInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *ListLocalDisksInput) SetGatewayARN(v string) *ListLocalDisksInput { s.GatewayARN = &v return s } type ListLocalDisksOutput struct { _ struct{} `type:"structure"` // A JSON object containing the following fields: // // * ListLocalDisksOutput$Disks Disks []*Disk `type:"list"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s ListLocalDisksOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListLocalDisksOutput) GoString() string { return s.String() } // SetDisks sets the Disks field's value. func (s *ListLocalDisksOutput) SetDisks(v []*Disk) *ListLocalDisksOutput { s.Disks = v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *ListLocalDisksOutput) SetGatewayARN(v string) *ListLocalDisksOutput { s.GatewayARN = &v return s } // ListTagsForResourceInput type ListTagsForResourceInput struct { _ struct{} `type:"structure"` // Specifies that the list of tags returned be limited to the specified number // of items. Limit *int64 `min:"1" type:"integer"` // An opaque string that indicates the position at which to begin returning // the list of tags. Marker *string `min:"1" type:"string"` // The Amazon Resource Name (ARN) of the resource for which you want to list // tags. // // ResourceARN is a required field ResourceARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s ListTagsForResourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListTagsForResourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ListTagsForResourceInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } if s.Marker != nil && len(*s.Marker) < 1 { invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) } if s.ResourceARN == nil { invalidParams.Add(request.NewErrParamRequired("ResourceARN")) } if s.ResourceARN != nil && len(*s.ResourceARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetLimit sets the Limit field's value. func (s *ListTagsForResourceInput) SetLimit(v int64) *ListTagsForResourceInput { s.Limit = &v return s } // SetMarker sets the Marker field's value. func (s *ListTagsForResourceInput) SetMarker(v string) *ListTagsForResourceInput { s.Marker = &v return s } // SetResourceARN sets the ResourceARN field's value. func (s *ListTagsForResourceInput) SetResourceARN(v string) *ListTagsForResourceInput { s.ResourceARN = &v return s } // ListTagsForResourceOutput type ListTagsForResourceOutput struct { _ struct{} `type:"structure"` // An opaque string that indicates the position at which to stop returning the // list of tags. Marker *string `min:"1" type:"string"` // he Amazon Resource Name (ARN) of the resource for which you want to list // tags. ResourceARN *string `min:"50" type:"string"` // An array that contains the tags for the specified resource. Tags []*Tag `type:"list"` } // String returns the string representation func (s ListTagsForResourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListTagsForResourceOutput) GoString() string { return s.String() } // SetMarker sets the Marker field's value. func (s *ListTagsForResourceOutput) SetMarker(v string) *ListTagsForResourceOutput { s.Marker = &v return s } // SetResourceARN sets the ResourceARN field's value. func (s *ListTagsForResourceOutput) SetResourceARN(v string) *ListTagsForResourceOutput { s.ResourceARN = &v return s } // SetTags sets the Tags field's value. func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { s.Tags = v return s } // A JSON object that contains one or more of the following fields: // // * ListTapesInput$Limit // // * ListTapesInput$Marker // // * ListTapesInput$TapeARNs type ListTapesInput struct { _ struct{} `type:"structure"` // An optional number limit for the tapes in the list returned by this call. Limit *int64 `min:"1" type:"integer"` // A string that indicates the position at which to begin the returned list // of tapes. Marker *string `min:"1" type:"string"` // The Amazon Resource Name (ARN) of each of the tapes you want to list. If // you don't specify a tape ARN, the response lists all tapes in both your VTL // and VTS. TapeARNs []*string `type:"list"` } // String returns the string representation func (s ListTapesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListTapesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ListTapesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListTapesInput"} if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } if s.Marker != nil && len(*s.Marker) < 1 { invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetLimit sets the Limit field's value. func (s *ListTapesInput) SetLimit(v int64) *ListTapesInput { s.Limit = &v return s } // SetMarker sets the Marker field's value. func (s *ListTapesInput) SetMarker(v string) *ListTapesInput { s.Marker = &v return s } // SetTapeARNs sets the TapeARNs field's value. func (s *ListTapesInput) SetTapeARNs(v []*string) *ListTapesInput { s.TapeARNs = v return s } // A JSON object containing the following fields: // // * ListTapesOutput$Marker // // * ListTapesOutput$VolumeInfos type ListTapesOutput struct { _ struct{} `type:"structure"` // A string that indicates the position at which to begin returning the next // list of tapes. Use the marker in your next request to continue pagination // of tapes. If there are no more tapes to list, this element does not appear // in the response body. Marker *string `min:"1" type:"string"` // An array of TapeInfo objects, where each object describes a single tape. // If there are no tapes in the tape library or VTS, then the TapeInfos is an // empty array. TapeInfos []*TapeInfo `type:"list"` } // String returns the string representation func (s ListTapesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListTapesOutput) GoString() string { return s.String() } // SetMarker sets the Marker field's value. func (s *ListTapesOutput) SetMarker(v string) *ListTapesOutput { s.Marker = &v return s } // SetTapeInfos sets the TapeInfos field's value. func (s *ListTapesOutput) SetTapeInfos(v []*TapeInfo) *ListTapesOutput { s.TapeInfos = v return s } // ListVolumeInitiatorsInput type ListVolumeInitiatorsInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation // to return a list of gateway volumes for the gateway. // // VolumeARN is a required field VolumeARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s ListVolumeInitiatorsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListVolumeInitiatorsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ListVolumeInitiatorsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListVolumeInitiatorsInput"} if s.VolumeARN == nil { invalidParams.Add(request.NewErrParamRequired("VolumeARN")) } if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetVolumeARN sets the VolumeARN field's value. func (s *ListVolumeInitiatorsInput) SetVolumeARN(v string) *ListVolumeInitiatorsInput { s.VolumeARN = &v return s } // ListVolumeInitiatorsOutput type ListVolumeInitiatorsOutput struct { _ struct{} `type:"structure"` // The host names and port numbers of all iSCSI initiators that are connected // to the gateway. Initiators []*string `type:"list"` } // String returns the string representation func (s ListVolumeInitiatorsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListVolumeInitiatorsOutput) GoString() string { return s.String() } // SetInitiators sets the Initiators field's value. func (s *ListVolumeInitiatorsOutput) SetInitiators(v []*string) *ListVolumeInitiatorsOutput { s.Initiators = v return s } type ListVolumeRecoveryPointsInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s ListVolumeRecoveryPointsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListVolumeRecoveryPointsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ListVolumeRecoveryPointsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListVolumeRecoveryPointsInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *ListVolumeRecoveryPointsInput) SetGatewayARN(v string) *ListVolumeRecoveryPointsInput { s.GatewayARN = &v return s } type ListVolumeRecoveryPointsOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // An array of VolumeRecoveryPointInfo objects. VolumeRecoveryPointInfos []*VolumeRecoveryPointInfo `type:"list"` } // String returns the string representation func (s ListVolumeRecoveryPointsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListVolumeRecoveryPointsOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *ListVolumeRecoveryPointsOutput) SetGatewayARN(v string) *ListVolumeRecoveryPointsOutput { s.GatewayARN = &v return s } // SetVolumeRecoveryPointInfos sets the VolumeRecoveryPointInfos field's value. func (s *ListVolumeRecoveryPointsOutput) SetVolumeRecoveryPointInfos(v []*VolumeRecoveryPointInfo) *ListVolumeRecoveryPointsOutput { s.VolumeRecoveryPointInfos = v return s } // A JSON object that contains one or more of the following fields: // // * ListVolumesInput$Limit // // * ListVolumesInput$Marker type ListVolumesInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // Specifies that the list of volumes returned be limited to the specified number // of items. Limit *int64 `min:"1" type:"integer"` // A string that indicates the position at which to begin the returned list // of volumes. Obtain the marker from the response of a previous List iSCSI // Volumes request. Marker *string `min:"1" type:"string"` } // String returns the string representation func (s ListVolumesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListVolumesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ListVolumesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListVolumesInput"} if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } if s.Marker != nil && len(*s.Marker) < 1 { invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *ListVolumesInput) SetGatewayARN(v string) *ListVolumesInput { s.GatewayARN = &v return s } // SetLimit sets the Limit field's value. func (s *ListVolumesInput) SetLimit(v int64) *ListVolumesInput { s.Limit = &v return s } // SetMarker sets the Marker field's value. func (s *ListVolumesInput) SetMarker(v string) *ListVolumesInput { s.Marker = &v return s } // A JSON object containing the following fields: // // * ListVolumesOutput$Marker // // * ListVolumesOutput$VolumeInfos type ListVolumesOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // Use the marker in your next request to continue pagination of iSCSI volumes. // If there are no more volumes to list, this field does not appear in the response // body. Marker *string `min:"1" type:"string"` // An array of VolumeInfo objects, where each object describes an iSCSI volume. // If no volumes are defined for the gateway, then VolumeInfos is an empty array // "[]". VolumeInfos []*VolumeInfo `type:"list"` } // String returns the string representation func (s ListVolumesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListVolumesOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *ListVolumesOutput) SetGatewayARN(v string) *ListVolumesOutput { s.GatewayARN = &v return s } // SetMarker sets the Marker field's value. func (s *ListVolumesOutput) SetMarker(v string) *ListVolumesOutput { s.Marker = &v return s } // SetVolumeInfos sets the VolumeInfos field's value. func (s *ListVolumesOutput) SetVolumeInfos(v []*VolumeInfo) *ListVolumesOutput { s.VolumeInfos = v return s } // Describes Network File System (NFS) file share default values. Files and // folders stored as Amazon S3 objects in S3 buckets don't, by default, have // Unix file permissions assigned to them. Upon discovery in an S3 bucket by // Storage Gateway, the S3 objects that represent files and folders are assigned // these default Unix permissions. This operation is only supported for file // gateways. type NFSFileShareDefaults struct { _ struct{} `type:"structure"` // The Unix directory mode in the form "nnnn". For example, 0666 represents // the default access mode for all directories inside the file share. The default // value is 0777. DirectoryMode *string `min:"1" type:"string"` // The Unix file mode in the form "nnnn". For example, 0666 represents the default // file mode inside the file share. The default value is 0666. FileMode *string `min:"1" type:"string"` // The default group ID for the file share (unless the files have another group // ID specified). The default value is nfsnobody. GroupId *int64 `type:"long"` // The default owner ID for files in the file share (unless the files have another // owner ID specified). The default value is nfsnobody. OwnerId *int64 `type:"long"` } // String returns the string representation func (s NFSFileShareDefaults) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s NFSFileShareDefaults) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *NFSFileShareDefaults) Validate() error { invalidParams := request.ErrInvalidParams{Context: "NFSFileShareDefaults"} if s.DirectoryMode != nil && len(*s.DirectoryMode) < 1 { invalidParams.Add(request.NewErrParamMinLen("DirectoryMode", 1)) } if s.FileMode != nil && len(*s.FileMode) < 1 { invalidParams.Add(request.NewErrParamMinLen("FileMode", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDirectoryMode sets the DirectoryMode field's value. func (s *NFSFileShareDefaults) SetDirectoryMode(v string) *NFSFileShareDefaults { s.DirectoryMode = &v return s } // SetFileMode sets the FileMode field's value. func (s *NFSFileShareDefaults) SetFileMode(v string) *NFSFileShareDefaults { s.FileMode = &v return s } // SetGroupId sets the GroupId field's value. func (s *NFSFileShareDefaults) SetGroupId(v int64) *NFSFileShareDefaults { s.GroupId = &v return s } // SetOwnerId sets the OwnerId field's value. func (s *NFSFileShareDefaults) SetOwnerId(v int64) *NFSFileShareDefaults { s.OwnerId = &v return s } // The Unix file permissions and ownership information assigned, by default, // to native S3 objects when file gateway discovers them in S3 buckets. This // operation is only supported in file gateways. type NFSFileShareInfo struct { _ struct{} `type:"structure"` // The list of clients that are allowed to access the file gateway. The list // must contain either valid IP addresses or valid CIDR blocks. ClientList []*string `min:"1" type:"list"` // The default storage class for objects put into an Amazon S3 bucket by the // file gateway. The default value is S3_INTELLIGENT_TIERING. Optional. // // Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA DefaultStorageClass *string `min:"5" type:"string"` // The Amazon Resource Name (ARN) of the file share. FileShareARN *string `min:"50" type:"string"` // The ID of the file share. FileShareId *string `min:"12" type:"string"` // The status of the file share. // // Valid Values: CREATING | UPDATING | AVAILABLE | DELETING FileShareStatus *string `min:"3" type:"string"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // A value that enables guessing of the MIME type for uploaded objects based // on file extensions. Set this value to true to enable MIME type guessing, // otherwise set to false. The default value is true. // // Valid Values: true | false GuessMIMETypeEnabled *bool `type:"boolean"` // Set to true to use Amazon S3 server-side encryption with your own AWS KMS // key, or false to use a key managed by Amazon S3. Optional. // // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The ARN of the backend storage used for storing file data. LocationARN *string `min:"16" type:"string"` // Describes Network File System (NFS) file share default values. Files and // folders stored as Amazon S3 objects in S3 buckets don't, by default, have // Unix file permissions assigned to them. Upon discovery in an S3 bucket by // Storage Gateway, the S3 objects that represent files and folders are assigned // these default Unix permissions. This operation is only supported for file // gateways. NFSFileShareDefaults *NFSFileShareDefaults `type:"structure"` // A value that sets the access control list (ACL) permission for objects in // the S3 bucket that a file gateway puts objects into. The default value is // private. ObjectACL *string `type:"string" enum:"ObjectACL"` // The file share path used by the NFS client to identify the mount point. Path *string `type:"string"` // A value that sets the write status of a file share. Set this value to true // to set the write status to read-only, otherwise set to false. // // Valid Values: true | false ReadOnly *bool `type:"boolean"` // A value that sets who pays the cost of the request and the cost associated // with data download from the S3 bucket. If this value is set to true, the // requester pays the costs; otherwise, the S3 bucket owner pays. However, the // S3 bucket owner always pays the cost of storing data. // // RequesterPays is a configuration for the S3 bucket that backs the file share, // so make sure that the configuration on the file share is the same as the // S3 bucket configuration. // // Valid Values: true | false RequesterPays *bool `type:"boolean"` // The ARN of the IAM role that file gateway assumes when it accesses the underlying // storage. Role *string `min:"20" type:"string"` // The user mapped to anonymous user. Valid options are the following: // // * RootSquash: Only root is mapped to anonymous user. // // * NoSquash: No one is mapped to anonymous user. // // * AllSquash: Everyone is mapped to anonymous user. Squash *string `min:"5" type:"string"` // A list of up to 50 tags assigned to the NFS file share, sorted alphabetically // by key name. Each tag is a key-value pair. For a gateway with more than 10 // tags assigned, you can view all tags using the ListTagsForResource API operation. Tags []*Tag `type:"list"` } // String returns the string representation func (s NFSFileShareInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s NFSFileShareInfo) GoString() string { return s.String() } // SetClientList sets the ClientList field's value. func (s *NFSFileShareInfo) SetClientList(v []*string) *NFSFileShareInfo { s.ClientList = v return s } // SetDefaultStorageClass sets the DefaultStorageClass field's value. func (s *NFSFileShareInfo) SetDefaultStorageClass(v string) *NFSFileShareInfo { s.DefaultStorageClass = &v return s } // SetFileShareARN sets the FileShareARN field's value. func (s *NFSFileShareInfo) SetFileShareARN(v string) *NFSFileShareInfo { s.FileShareARN = &v return s } // SetFileShareId sets the FileShareId field's value. func (s *NFSFileShareInfo) SetFileShareId(v string) *NFSFileShareInfo { s.FileShareId = &v return s } // SetFileShareStatus sets the FileShareStatus field's value. func (s *NFSFileShareInfo) SetFileShareStatus(v string) *NFSFileShareInfo { s.FileShareStatus = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *NFSFileShareInfo) SetGatewayARN(v string) *NFSFileShareInfo { s.GatewayARN = &v return s } // SetGuessMIMETypeEnabled sets the GuessMIMETypeEnabled field's value. func (s *NFSFileShareInfo) SetGuessMIMETypeEnabled(v bool) *NFSFileShareInfo { s.GuessMIMETypeEnabled = &v return s } // SetKMSEncrypted sets the KMSEncrypted field's value. func (s *NFSFileShareInfo) SetKMSEncrypted(v bool) *NFSFileShareInfo { s.KMSEncrypted = &v return s } // SetKMSKey sets the KMSKey field's value. func (s *NFSFileShareInfo) SetKMSKey(v string) *NFSFileShareInfo { s.KMSKey = &v return s } // SetLocationARN sets the LocationARN field's value. func (s *NFSFileShareInfo) SetLocationARN(v string) *NFSFileShareInfo { s.LocationARN = &v return s } // SetNFSFileShareDefaults sets the NFSFileShareDefaults field's value. func (s *NFSFileShareInfo) SetNFSFileShareDefaults(v *NFSFileShareDefaults) *NFSFileShareInfo { s.NFSFileShareDefaults = v return s } // SetObjectACL sets the ObjectACL field's value. func (s *NFSFileShareInfo) SetObjectACL(v string) *NFSFileShareInfo { s.ObjectACL = &v return s } // SetPath sets the Path field's value. func (s *NFSFileShareInfo) SetPath(v string) *NFSFileShareInfo { s.Path = &v return s } // SetReadOnly sets the ReadOnly field's value. func (s *NFSFileShareInfo) SetReadOnly(v bool) *NFSFileShareInfo { s.ReadOnly = &v return s } // SetRequesterPays sets the RequesterPays field's value. func (s *NFSFileShareInfo) SetRequesterPays(v bool) *NFSFileShareInfo { s.RequesterPays = &v return s } // SetRole sets the Role field's value. func (s *NFSFileShareInfo) SetRole(v string) *NFSFileShareInfo { s.Role = &v return s } // SetSquash sets the Squash field's value. func (s *NFSFileShareInfo) SetSquash(v string) *NFSFileShareInfo { s.Squash = &v return s } // SetTags sets the Tags field's value. func (s *NFSFileShareInfo) SetTags(v []*Tag) *NFSFileShareInfo { s.Tags = v return s } // Describes a gateway's network interface. type NetworkInterface struct { _ struct{} `type:"structure"` // The Internet Protocol version 4 (IPv4) address of the interface. Ipv4Address *string `type:"string"` // The Internet Protocol version 6 (IPv6) address of the interface. Currently // not supported. Ipv6Address *string `type:"string"` // The Media Access Control (MAC) address of the interface. // // This is currently unsupported and will not be returned in output. MacAddress *string `type:"string"` } // String returns the string representation func (s NetworkInterface) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s NetworkInterface) GoString() string { return s.String() } // SetIpv4Address sets the Ipv4Address field's value. func (s *NetworkInterface) SetIpv4Address(v string) *NetworkInterface { s.Ipv4Address = &v return s } // SetIpv6Address sets the Ipv6Address field's value. func (s *NetworkInterface) SetIpv6Address(v string) *NetworkInterface { s.Ipv6Address = &v return s } // SetMacAddress sets the MacAddress field's value. func (s *NetworkInterface) SetMacAddress(v string) *NetworkInterface { s.MacAddress = &v return s } type NotifyWhenUploadedInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the file share. // // FileShareARN is a required field FileShareARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s NotifyWhenUploadedInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s NotifyWhenUploadedInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *NotifyWhenUploadedInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "NotifyWhenUploadedInput"} if s.FileShareARN == nil { invalidParams.Add(request.NewErrParamRequired("FileShareARN")) } if s.FileShareARN != nil && len(*s.FileShareARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("FileShareARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetFileShareARN sets the FileShareARN field's value. func (s *NotifyWhenUploadedInput) SetFileShareARN(v string) *NotifyWhenUploadedInput { s.FileShareARN = &v return s } type NotifyWhenUploadedOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the file share. FileShareARN *string `min:"50" type:"string"` // The randomly generated ID of the notification that was sent. This ID is in // UUID format. NotificationId *string `min:"1" type:"string"` } // String returns the string representation func (s NotifyWhenUploadedOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s NotifyWhenUploadedOutput) GoString() string { return s.String() } // SetFileShareARN sets the FileShareARN field's value. func (s *NotifyWhenUploadedOutput) SetFileShareARN(v string) *NotifyWhenUploadedOutput { s.FileShareARN = &v return s } // SetNotificationId sets the NotificationId field's value. func (s *NotifyWhenUploadedOutput) SetNotificationId(v string) *NotifyWhenUploadedOutput { s.NotificationId = &v return s } // RefreshCacheInput type RefreshCacheInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the file share you want to refresh. // // FileShareARN is a required field FileShareARN *string `min:"50" type:"string" required:"true"` // A comma-separated list of the paths of folders to refresh in the cache. The // default is ["/"]. The default refreshes objects and folders at the root of // the Amazon S3 bucket. If Recursive is set to true, the entire S3 bucket that // the file share has access to is refreshed. FolderList []*string `min:"1" type:"list"` // A value that specifies whether to recursively refresh folders in the cache. // The refresh includes folders that were in the cache the last time the gateway // listed the folder's contents. If this value set to true, each folder that // is listed in FolderList is recursively updated. Otherwise, subfolders listed // in FolderList are not refreshed. Only objects that are in folders listed // directly under FolderList are found and used for the update. The default // is true. // // Valid Values: true | false Recursive *bool `type:"boolean"` } // String returns the string representation func (s RefreshCacheInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RefreshCacheInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *RefreshCacheInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RefreshCacheInput"} if s.FileShareARN == nil { invalidParams.Add(request.NewErrParamRequired("FileShareARN")) } if s.FileShareARN != nil && len(*s.FileShareARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("FileShareARN", 50)) } if s.FolderList != nil && len(s.FolderList) < 1 { invalidParams.Add(request.NewErrParamMinLen("FolderList", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetFileShareARN sets the FileShareARN field's value. func (s *RefreshCacheInput) SetFileShareARN(v string) *RefreshCacheInput { s.FileShareARN = &v return s } // SetFolderList sets the FolderList field's value. func (s *RefreshCacheInput) SetFolderList(v []*string) *RefreshCacheInput { s.FolderList = v return s } // SetRecursive sets the Recursive field's value. func (s *RefreshCacheInput) SetRecursive(v bool) *RefreshCacheInput { s.Recursive = &v return s } // RefreshCacheOutput type RefreshCacheOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the file share. FileShareARN *string `min:"50" type:"string"` // The randomly generated ID of the notification that was sent. This ID is in // UUID format. NotificationId *string `min:"1" type:"string"` } // String returns the string representation func (s RefreshCacheOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RefreshCacheOutput) GoString() string { return s.String() } // SetFileShareARN sets the FileShareARN field's value. func (s *RefreshCacheOutput) SetFileShareARN(v string) *RefreshCacheOutput { s.FileShareARN = &v return s } // SetNotificationId sets the NotificationId field's value. func (s *RefreshCacheOutput) SetNotificationId(v string) *RefreshCacheOutput { s.NotificationId = &v return s } // RemoveTagsFromResourceInput type RemoveTagsFromResourceInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the resource you want to remove the tags // from. // // ResourceARN is a required field ResourceARN *string `min:"50" type:"string" required:"true"` // The keys of the tags you want to remove from the specified resource. A tag // is composed of a key-value pair. // // TagKeys is a required field TagKeys []*string `type:"list" required:"true"` } // String returns the string representation func (s RemoveTagsFromResourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RemoveTagsFromResourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *RemoveTagsFromResourceInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RemoveTagsFromResourceInput"} if s.ResourceARN == nil { invalidParams.Add(request.NewErrParamRequired("ResourceARN")) } if s.ResourceARN != nil && len(*s.ResourceARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 50)) } if s.TagKeys == nil { invalidParams.Add(request.NewErrParamRequired("TagKeys")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetResourceARN sets the ResourceARN field's value. func (s *RemoveTagsFromResourceInput) SetResourceARN(v string) *RemoveTagsFromResourceInput { s.ResourceARN = &v return s } // SetTagKeys sets the TagKeys field's value. func (s *RemoveTagsFromResourceInput) SetTagKeys(v []*string) *RemoveTagsFromResourceInput { s.TagKeys = v return s } // RemoveTagsFromResourceOutput type RemoveTagsFromResourceOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the resource that the tags were removed // from. ResourceARN *string `min:"50" type:"string"` } // String returns the string representation func (s RemoveTagsFromResourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RemoveTagsFromResourceOutput) GoString() string { return s.String() } // SetResourceARN sets the ResourceARN field's value. func (s *RemoveTagsFromResourceOutput) SetResourceARN(v string) *RemoveTagsFromResourceOutput { s.ResourceARN = &v return s } type ResetCacheInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s ResetCacheInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ResetCacheInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ResetCacheInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ResetCacheInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *ResetCacheInput) SetGatewayARN(v string) *ResetCacheInput { s.GatewayARN = &v return s } type ResetCacheOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s ResetCacheOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ResetCacheOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *ResetCacheOutput) SetGatewayARN(v string) *ResetCacheOutput { s.GatewayARN = &v return s } // RetrieveTapeArchiveInput type RetrieveTapeArchiveInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway you want to retrieve the virtual // tape to. Use the ListGateways operation to return a list of gateways for // your account and AWS Region. // // You retrieve archived virtual tapes to only one gateway and the gateway must // be a tape gateway. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // The Amazon Resource Name (ARN) of the virtual tape you want to retrieve from // the virtual tape shelf (VTS). // // TapeARN is a required field TapeARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s RetrieveTapeArchiveInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RetrieveTapeArchiveInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *RetrieveTapeArchiveInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RetrieveTapeArchiveInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.TapeARN == nil { invalidParams.Add(request.NewErrParamRequired("TapeARN")) } if s.TapeARN != nil && len(*s.TapeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("TapeARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *RetrieveTapeArchiveInput) SetGatewayARN(v string) *RetrieveTapeArchiveInput { s.GatewayARN = &v return s } // SetTapeARN sets the TapeARN field's value. func (s *RetrieveTapeArchiveInput) SetTapeARN(v string) *RetrieveTapeArchiveInput { s.TapeARN = &v return s } // RetrieveTapeArchiveOutput type RetrieveTapeArchiveOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the retrieved virtual tape. TapeARN *string `min:"50" type:"string"` } // String returns the string representation func (s RetrieveTapeArchiveOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RetrieveTapeArchiveOutput) GoString() string { return s.String() } // SetTapeARN sets the TapeARN field's value. func (s *RetrieveTapeArchiveOutput) SetTapeARN(v string) *RetrieveTapeArchiveOutput { s.TapeARN = &v return s } // RetrieveTapeRecoveryPointInput type RetrieveTapeRecoveryPointInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // The Amazon Resource Name (ARN) of the virtual tape for which you want to // retrieve the recovery point. // // TapeARN is a required field TapeARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s RetrieveTapeRecoveryPointInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RetrieveTapeRecoveryPointInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *RetrieveTapeRecoveryPointInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RetrieveTapeRecoveryPointInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.TapeARN == nil { invalidParams.Add(request.NewErrParamRequired("TapeARN")) } if s.TapeARN != nil && len(*s.TapeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("TapeARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *RetrieveTapeRecoveryPointInput) SetGatewayARN(v string) *RetrieveTapeRecoveryPointInput { s.GatewayARN = &v return s } // SetTapeARN sets the TapeARN field's value. func (s *RetrieveTapeRecoveryPointInput) SetTapeARN(v string) *RetrieveTapeRecoveryPointInput { s.TapeARN = &v return s } // RetrieveTapeRecoveryPointOutput type RetrieveTapeRecoveryPointOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the virtual tape for which the recovery // point was retrieved. TapeARN *string `min:"50" type:"string"` } // String returns the string representation func (s RetrieveTapeRecoveryPointOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RetrieveTapeRecoveryPointOutput) GoString() string { return s.String() } // SetTapeARN sets the TapeARN field's value. func (s *RetrieveTapeRecoveryPointOutput) SetTapeARN(v string) *RetrieveTapeRecoveryPointOutput { s.TapeARN = &v return s } // The Windows file permissions and ownership information assigned, by default, // to native S3 objects when file gateway discovers them in S3 buckets. This // operation is only supported for file gateways. type SMBFileShareInfo struct { _ struct{} `type:"structure"` // A list of users or groups in the Active Directory that have administrator // rights to the file share. A group must be prefixed with the @ character. // For example @group1. Can only be set if Authentication is set to ActiveDirectory. AdminUserList []*string `type:"list"` // The Amazon Resource Name (ARN) of the storage used for the audit logs. AuditDestinationARN *string `type:"string"` // The authentication method of the file share. The default is ActiveDirectory. // // Valid Values: ActiveDirectory | GuestAccess Authentication *string `min:"5" type:"string"` // The default storage class for objects put into an Amazon S3 bucket by the // file gateway. The default value is S3_INTELLIGENT_TIERING. Optional. // // Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA DefaultStorageClass *string `min:"5" type:"string"` // The Amazon Resource Name (ARN) of the file share. FileShareARN *string `min:"50" type:"string"` // The ID of the file share. FileShareId *string `min:"12" type:"string"` // The status of the file share. // // Valid Values: CREATING | UPDATING | AVAILABLE | DELETING FileShareStatus *string `min:"3" type:"string"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // A value that enables guessing of the MIME type for uploaded objects based // on file extensions. Set this value to true to enable MIME type guessing, // otherwise set to false. The default value is true. // // Valid Values: true | false GuessMIMETypeEnabled *bool `type:"boolean"` // A list of users or groups in the Active Directory that are not allowed to // access the file share. A group must be prefixed with the @ character. For // example @group1. Can only be set if Authentication is set to ActiveDirectory. InvalidUserList []*string `type:"list"` // Set to true to use Amazon S3 server-side encryption with your own AWS KMS // key, or false to use a key managed by Amazon S3. Optional. // // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The ARN of the backend storage used for storing file data. LocationARN *string `min:"16" type:"string"` // A value that sets the access control list (ACL) permission for objects in // the S3 bucket that a file gateway puts objects into. The default value is // private. ObjectACL *string `type:"string" enum:"ObjectACL"` // The file share path used by the SMB client to identify the mount point. Path *string `type:"string"` // A value that sets the write status of a file share. Set this value to true // to set the write status to read-only, otherwise set to false. // // Valid Values: true | false ReadOnly *bool `type:"boolean"` // A value that sets who pays the cost of the request and the cost associated // with data download from the S3 bucket. If this value is set to true, the // requester pays the costs; otherwise, the S3 bucket owner pays. However, the // S3 bucket owner always pays the cost of storing data. // // RequesterPays is a configuration for the S3 bucket that backs the file share, // so make sure that the configuration on the file share is the same as the // S3 bucket configuration. // // Valid Values: true | false RequesterPays *bool `type:"boolean"` // The ARN of the IAM role that file gateway assumes when it accesses the underlying // storage. Role *string `min:"20" type:"string"` // If this value is set to true, it indicates that access control list (ACL) // is enabled on the SMB file share. If it is set to false, it indicates that // file and directory permissions are mapped to the POSIX permission. // // For more information, see Using Microsoft Windows ACLs to control access // to an SMB file share (https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.html) // in the AWS Storage Gateway User Guide. SMBACLEnabled *bool `type:"boolean"` // A list of up to 50 tags assigned to the SMB file share, sorted alphabetically // by key name. Each tag is a key-value pair. For a gateway with more than 10 // tags assigned, you can view all tags using the ListTagsForResource API operation. Tags []*Tag `type:"list"` // A list of users or groups in the Active Directory that are allowed to access // the file share. A group must be prefixed with the @ character. For example, // @group1. Can only be set if Authentication is set to ActiveDirectory. ValidUserList []*string `type:"list"` } // String returns the string representation func (s SMBFileShareInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s SMBFileShareInfo) GoString() string { return s.String() } // SetAdminUserList sets the AdminUserList field's value. func (s *SMBFileShareInfo) SetAdminUserList(v []*string) *SMBFileShareInfo { s.AdminUserList = v return s } // SetAuditDestinationARN sets the AuditDestinationARN field's value. func (s *SMBFileShareInfo) SetAuditDestinationARN(v string) *SMBFileShareInfo { s.AuditDestinationARN = &v return s } // SetAuthentication sets the Authentication field's value. func (s *SMBFileShareInfo) SetAuthentication(v string) *SMBFileShareInfo { s.Authentication = &v return s } // SetDefaultStorageClass sets the DefaultStorageClass field's value. func (s *SMBFileShareInfo) SetDefaultStorageClass(v string) *SMBFileShareInfo { s.DefaultStorageClass = &v return s } // SetFileShareARN sets the FileShareARN field's value. func (s *SMBFileShareInfo) SetFileShareARN(v string) *SMBFileShareInfo { s.FileShareARN = &v return s } // SetFileShareId sets the FileShareId field's value. func (s *SMBFileShareInfo) SetFileShareId(v string) *SMBFileShareInfo { s.FileShareId = &v return s } // SetFileShareStatus sets the FileShareStatus field's value. func (s *SMBFileShareInfo) SetFileShareStatus(v string) *SMBFileShareInfo { s.FileShareStatus = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *SMBFileShareInfo) SetGatewayARN(v string) *SMBFileShareInfo { s.GatewayARN = &v return s } // SetGuessMIMETypeEnabled sets the GuessMIMETypeEnabled field's value. func (s *SMBFileShareInfo) SetGuessMIMETypeEnabled(v bool) *SMBFileShareInfo { s.GuessMIMETypeEnabled = &v return s } // SetInvalidUserList sets the InvalidUserList field's value. func (s *SMBFileShareInfo) SetInvalidUserList(v []*string) *SMBFileShareInfo { s.InvalidUserList = v return s } // SetKMSEncrypted sets the KMSEncrypted field's value. func (s *SMBFileShareInfo) SetKMSEncrypted(v bool) *SMBFileShareInfo { s.KMSEncrypted = &v return s } // SetKMSKey sets the KMSKey field's value. func (s *SMBFileShareInfo) SetKMSKey(v string) *SMBFileShareInfo { s.KMSKey = &v return s } // SetLocationARN sets the LocationARN field's value. func (s *SMBFileShareInfo) SetLocationARN(v string) *SMBFileShareInfo { s.LocationARN = &v return s } // SetObjectACL sets the ObjectACL field's value. func (s *SMBFileShareInfo) SetObjectACL(v string) *SMBFileShareInfo { s.ObjectACL = &v return s } // SetPath sets the Path field's value. func (s *SMBFileShareInfo) SetPath(v string) *SMBFileShareInfo { s.Path = &v return s } // SetReadOnly sets the ReadOnly field's value. func (s *SMBFileShareInfo) SetReadOnly(v bool) *SMBFileShareInfo { s.ReadOnly = &v return s } // SetRequesterPays sets the RequesterPays field's value. func (s *SMBFileShareInfo) SetRequesterPays(v bool) *SMBFileShareInfo { s.RequesterPays = &v return s } // SetRole sets the Role field's value. func (s *SMBFileShareInfo) SetRole(v string) *SMBFileShareInfo { s.Role = &v return s } // SetSMBACLEnabled sets the SMBACLEnabled field's value. func (s *SMBFileShareInfo) SetSMBACLEnabled(v bool) *SMBFileShareInfo { s.SMBACLEnabled = &v return s } // SetTags sets the Tags field's value. func (s *SMBFileShareInfo) SetTags(v []*Tag) *SMBFileShareInfo { s.Tags = v return s } // SetValidUserList sets the ValidUserList field's value. func (s *SMBFileShareInfo) SetValidUserList(v []*string) *SMBFileShareInfo { s.ValidUserList = v return s } // An internal server error has occurred because the service is unavailable. // For more information, see the error and message fields. type ServiceUnavailableError struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A StorageGatewayError that provides more information about the cause of the // error. Error_ *Error `locationName:"error" type:"structure"` // A human-readable message describing the error that occurred. Message_ *string `locationName:"message" type:"string"` } // String returns the string representation func (s ServiceUnavailableError) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ServiceUnavailableError) GoString() string { return s.String() } func newErrorServiceUnavailableError(v protocol.ResponseMetadata) error { return &ServiceUnavailableError{ RespMetadata: v, } } // Code returns the exception type name. func (s *ServiceUnavailableError) Code() string { return "ServiceUnavailableError" } // Message returns the exception's message. func (s *ServiceUnavailableError) Message() string { if s.Message_ != nil { return *s.Message_ } return "" } // OrigErr always returns nil, satisfies awserr.Error interface. func (s *ServiceUnavailableError) OrigErr() error { return nil } func (s *ServiceUnavailableError) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. func (s *ServiceUnavailableError) StatusCode() int { return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. func (s *ServiceUnavailableError) RequestID() string { return s.RespMetadata.RequestID } // SetLocalConsolePasswordInput type SetLocalConsolePasswordInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // The password you want to set for your VM local console. // // LocalConsolePassword is a required field LocalConsolePassword *string `min:"6" type:"string" required:"true" sensitive:"true"` } // String returns the string representation func (s SetLocalConsolePasswordInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s SetLocalConsolePasswordInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *SetLocalConsolePasswordInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "SetLocalConsolePasswordInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.LocalConsolePassword == nil { invalidParams.Add(request.NewErrParamRequired("LocalConsolePassword")) } if s.LocalConsolePassword != nil && len(*s.LocalConsolePassword) < 6 { invalidParams.Add(request.NewErrParamMinLen("LocalConsolePassword", 6)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *SetLocalConsolePasswordInput) SetGatewayARN(v string) *SetLocalConsolePasswordInput { s.GatewayARN = &v return s } // SetLocalConsolePassword sets the LocalConsolePassword field's value. func (s *SetLocalConsolePasswordInput) SetLocalConsolePassword(v string) *SetLocalConsolePasswordInput { s.LocalConsolePassword = &v return s } type SetLocalConsolePasswordOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s SetLocalConsolePasswordOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s SetLocalConsolePasswordOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *SetLocalConsolePasswordOutput) SetGatewayARN(v string) *SetLocalConsolePasswordOutput { s.GatewayARN = &v return s } // SetSMBGuestPasswordInput type SetSMBGuestPasswordInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the file gateway the SMB file share is // associated with. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // The password that you want to set for your SMB server. // // Password is a required field Password *string `min:"6" type:"string" required:"true" sensitive:"true"` } // String returns the string representation func (s SetSMBGuestPasswordInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s SetSMBGuestPasswordInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *SetSMBGuestPasswordInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "SetSMBGuestPasswordInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.Password == nil { invalidParams.Add(request.NewErrParamRequired("Password")) } if s.Password != nil && len(*s.Password) < 6 { invalidParams.Add(request.NewErrParamMinLen("Password", 6)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *SetSMBGuestPasswordInput) SetGatewayARN(v string) *SetSMBGuestPasswordInput { s.GatewayARN = &v return s } // SetPassword sets the Password field's value. func (s *SetSMBGuestPasswordInput) SetPassword(v string) *SetSMBGuestPasswordInput { s.Password = &v return s } type SetSMBGuestPasswordOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s SetSMBGuestPasswordOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s SetSMBGuestPasswordOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *SetSMBGuestPasswordOutput) SetGatewayARN(v string) *SetSMBGuestPasswordOutput { s.GatewayARN = &v return s } // A JSON object containing the Amazon Resource Name (ARN) of the gateway to // shut down. type ShutdownGatewayInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s ShutdownGatewayInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ShutdownGatewayInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ShutdownGatewayInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ShutdownGatewayInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *ShutdownGatewayInput) SetGatewayARN(v string) *ShutdownGatewayInput { s.GatewayARN = &v return s } // A JSON object containing the Amazon Resource Name (ARN) of the gateway that // was shut down. type ShutdownGatewayOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s ShutdownGatewayOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ShutdownGatewayOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *ShutdownGatewayOutput) SetGatewayARN(v string) *ShutdownGatewayOutput { s.GatewayARN = &v return s } type StartAvailabilityMonitorTestInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s StartAvailabilityMonitorTestInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s StartAvailabilityMonitorTestInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *StartAvailabilityMonitorTestInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "StartAvailabilityMonitorTestInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *StartAvailabilityMonitorTestInput) SetGatewayARN(v string) *StartAvailabilityMonitorTestInput { s.GatewayARN = &v return s } type StartAvailabilityMonitorTestOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s StartAvailabilityMonitorTestOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s StartAvailabilityMonitorTestOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *StartAvailabilityMonitorTestOutput) SetGatewayARN(v string) *StartAvailabilityMonitorTestOutput { s.GatewayARN = &v return s } // A JSON object containing the Amazon Resource Name (ARN) of the gateway to // start. type StartGatewayInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s StartGatewayInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s StartGatewayInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *StartGatewayInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "StartGatewayInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *StartGatewayInput) SetGatewayARN(v string) *StartGatewayInput { s.GatewayARN = &v return s } // A JSON object containing the Amazon Resource Name (ARN) of the gateway that // was restarted. type StartGatewayOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s StartGatewayOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s StartGatewayOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *StartGatewayOutput) SetGatewayARN(v string) *StartGatewayOutput { s.GatewayARN = &v return s } // Describes an iSCSI stored volume. type StorediSCSIVolume struct { _ struct{} `type:"structure"` // The date the volume was created. Volumes created prior to March 28, 2017 // don’t have this time stamp. CreatedDate *time.Time `type:"timestamp"` // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // Indicates if when the stored volume was created, existing data on the underlying // local disk was preserved. // // Valid Values: true | false PreservedExistingData *bool `type:"boolean"` // If the stored volume was created from a snapshot, this field contains the // snapshot ID used, e.g. snap-78e22663. Otherwise, this field is not included. SourceSnapshotId *string `type:"string"` // The name of the iSCSI target used by an initiator to connect to a volume // and used as a suffix for the target ARN. For example, specifying TargetName // as myvolume results in the target ARN of arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume. // The target name must be unique across all volumes on a gateway. // // If you don't specify a value, Storage Gateway uses the value that was previously // used for this volume as the new target name. TargetName *string `min:"1" type:"string"` // The Amazon Resource Name (ARN) of the storage volume. VolumeARN *string `min:"50" type:"string"` // A value that indicates whether a storage volume is attached to, detached // from, or is in the process of detaching from a gateway. For more information, // see Moving your volumes to a different gateway (https://docs.aws.amazon.com/storagegateway/latest/userguide/managing-volumes.html#attach-detach-volume). VolumeAttachmentStatus *string `min:"3" type:"string"` // The ID of the local disk that was specified in the CreateStorediSCSIVolume // operation. VolumeDiskId *string `min:"1" type:"string"` // The unique identifier of the volume, e.g. vol-AE4B946D. VolumeId *string `min:"12" type:"string"` // Represents the percentage complete if the volume is restoring or bootstrapping // that represents the percent of data transferred. This field does not appear // in the response if the stored volume is not restoring or bootstrapping. VolumeProgress *float64 `type:"double"` // The size of the volume in bytes. VolumeSizeInBytes *int64 `type:"long"` // One of the VolumeStatus values that indicates the state of the storage volume. VolumeStatus *string `min:"3" type:"string"` // One of the VolumeType enumeration values describing the type of the volume. VolumeType *string `min:"3" type:"string"` // The size of the data stored on the volume in bytes. This value is calculated // based on the number of blocks that are touched, instead of the actual amount // of data written. This value can be useful for sequential write patterns but // less accurate for random write patterns. VolumeUsedInBytes is different from // the compressed size of the volume, which is the value that is used to calculate // your bill. // // This value is not available for volumes created prior to May 13, 2015, until // you store data on the volume. VolumeUsedInBytes *int64 `type:"long"` // An VolumeiSCSIAttributes object that represents a collection of iSCSI attributes // for one stored volume. VolumeiSCSIAttributes *VolumeiSCSIAttributes `type:"structure"` } // String returns the string representation func (s StorediSCSIVolume) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s StorediSCSIVolume) GoString() string { return s.String() } // SetCreatedDate sets the CreatedDate field's value. func (s *StorediSCSIVolume) SetCreatedDate(v time.Time) *StorediSCSIVolume { s.CreatedDate = &v return s } // SetKMSKey sets the KMSKey field's value. func (s *StorediSCSIVolume) SetKMSKey(v string) *StorediSCSIVolume { s.KMSKey = &v return s } // SetPreservedExistingData sets the PreservedExistingData field's value. func (s *StorediSCSIVolume) SetPreservedExistingData(v bool) *StorediSCSIVolume { s.PreservedExistingData = &v return s } // SetSourceSnapshotId sets the SourceSnapshotId field's value. func (s *StorediSCSIVolume) SetSourceSnapshotId(v string) *StorediSCSIVolume { s.SourceSnapshotId = &v return s } // SetTargetName sets the TargetName field's value. func (s *StorediSCSIVolume) SetTargetName(v string) *StorediSCSIVolume { s.TargetName = &v return s } // SetVolumeARN sets the VolumeARN field's value. func (s *StorediSCSIVolume) SetVolumeARN(v string) *StorediSCSIVolume { s.VolumeARN = &v return s } // SetVolumeAttachmentStatus sets the VolumeAttachmentStatus field's value. func (s *StorediSCSIVolume) SetVolumeAttachmentStatus(v string) *StorediSCSIVolume { s.VolumeAttachmentStatus = &v return s } // SetVolumeDiskId sets the VolumeDiskId field's value. func (s *StorediSCSIVolume) SetVolumeDiskId(v string) *StorediSCSIVolume { s.VolumeDiskId = &v return s } // SetVolumeId sets the VolumeId field's value. func (s *StorediSCSIVolume) SetVolumeId(v string) *StorediSCSIVolume { s.VolumeId = &v return s } // SetVolumeProgress sets the VolumeProgress field's value. func (s *StorediSCSIVolume) SetVolumeProgress(v float64) *StorediSCSIVolume { s.VolumeProgress = &v return s } // SetVolumeSizeInBytes sets the VolumeSizeInBytes field's value. func (s *StorediSCSIVolume) SetVolumeSizeInBytes(v int64) *StorediSCSIVolume { s.VolumeSizeInBytes = &v return s } // SetVolumeStatus sets the VolumeStatus field's value. func (s *StorediSCSIVolume) SetVolumeStatus(v string) *StorediSCSIVolume { s.VolumeStatus = &v return s } // SetVolumeType sets the VolumeType field's value. func (s *StorediSCSIVolume) SetVolumeType(v string) *StorediSCSIVolume { s.VolumeType = &v return s } // SetVolumeUsedInBytes sets the VolumeUsedInBytes field's value. func (s *StorediSCSIVolume) SetVolumeUsedInBytes(v int64) *StorediSCSIVolume { s.VolumeUsedInBytes = &v return s } // SetVolumeiSCSIAttributes sets the VolumeiSCSIAttributes field's value. func (s *StorediSCSIVolume) SetVolumeiSCSIAttributes(v *VolumeiSCSIAttributes) *StorediSCSIVolume { s.VolumeiSCSIAttributes = v return s } // A key-value pair that helps you manage, filter, and search for your resource. // Allowed characters: letters, white space, and numbers, representable in UTF-8, // and the following characters: + - = . _ : /. type Tag struct { _ struct{} `type:"structure"` // Tag key. The key can't start with aws:. // // Key is a required field Key *string `min:"1" type:"string" required:"true"` // Value of the tag key. // // Value is a required field Value *string `type:"string" required:"true"` } // String returns the string representation func (s Tag) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Tag) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *Tag) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Tag"} if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } if s.Key != nil && len(*s.Key) < 1 { invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } if s.Value == nil { invalidParams.Add(request.NewErrParamRequired("Value")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetKey sets the Key field's value. func (s *Tag) SetKey(v string) *Tag { s.Key = &v return s } // SetValue sets the Value field's value. func (s *Tag) SetValue(v string) *Tag { s.Value = &v return s } // Describes a virtual tape object. type Tape struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The ID of the pool that contains tapes that will be archived. The tapes in // this pool are archived in the S3 storage class that is associated with the // pool. When you use your backup application to eject the tape, the tape is // archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) // that corresponds to the pool. // // Valid Values: GLACIER | DEEP_ARCHIVE PoolId *string `min:"1" type:"string"` // For archiving virtual tapes, indicates how much data remains to be uploaded // before archiving is complete. // // Range: 0 (not started) to 100 (complete). Progress *float64 `type:"double"` // The Amazon Resource Name (ARN) of the virtual tape. TapeARN *string `min:"50" type:"string"` // The barcode that identifies a specific virtual tape. TapeBarcode *string `min:"7" type:"string"` // The date the virtual tape was created. TapeCreatedDate *time.Time `type:"timestamp"` // The size, in bytes, of the virtual tape capacity. TapeSizeInBytes *int64 `type:"long"` // The current state of the virtual tape. TapeStatus *string `type:"string"` // The size, in bytes, of data stored on the virtual tape. // // This value is not available for tapes created prior to May 13, 2015. TapeUsedInBytes *int64 `type:"long"` // The virtual tape library (VTL) device that the virtual tape is associated // with. VTLDevice *string `min:"50" type:"string"` } // String returns the string representation func (s Tape) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Tape) GoString() string { return s.String() } // SetKMSKey sets the KMSKey field's value. func (s *Tape) SetKMSKey(v string) *Tape { s.KMSKey = &v return s } // SetPoolId sets the PoolId field's value. func (s *Tape) SetPoolId(v string) *Tape { s.PoolId = &v return s } // SetProgress sets the Progress field's value. func (s *Tape) SetProgress(v float64) *Tape { s.Progress = &v return s } // SetTapeARN sets the TapeARN field's value. func (s *Tape) SetTapeARN(v string) *Tape { s.TapeARN = &v return s } // SetTapeBarcode sets the TapeBarcode field's value. func (s *Tape) SetTapeBarcode(v string) *Tape { s.TapeBarcode = &v return s } // SetTapeCreatedDate sets the TapeCreatedDate field's value. func (s *Tape) SetTapeCreatedDate(v time.Time) *Tape { s.TapeCreatedDate = &v return s } // SetTapeSizeInBytes sets the TapeSizeInBytes field's value. func (s *Tape) SetTapeSizeInBytes(v int64) *Tape { s.TapeSizeInBytes = &v return s } // SetTapeStatus sets the TapeStatus field's value. func (s *Tape) SetTapeStatus(v string) *Tape { s.TapeStatus = &v return s } // SetTapeUsedInBytes sets the TapeUsedInBytes field's value. func (s *Tape) SetTapeUsedInBytes(v int64) *Tape { s.TapeUsedInBytes = &v return s } // SetVTLDevice sets the VTLDevice field's value. func (s *Tape) SetVTLDevice(v string) *Tape { s.VTLDevice = &v return s } // Represents a virtual tape that is archived in the virtual tape shelf (VTS). type TapeArchive struct { _ struct{} `type:"structure"` // The time that the archiving of the virtual tape was completed. // // The default time stamp format is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' // format. CompletionTime *time.Time `type:"timestamp"` // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The ID of the pool that was used to archive the tape. The tapes in this pool // are archived in the S3 storage class that is associated with the pool. // // Valid Values: GLACIER | DEEP_ARCHIVE PoolId *string `min:"1" type:"string"` // The Amazon Resource Name (ARN) of the tape gateway that the virtual tape // is being retrieved to. // // The virtual tape is retrieved from the virtual tape shelf (VTS). RetrievedTo *string `min:"50" type:"string"` // The Amazon Resource Name (ARN) of an archived virtual tape. TapeARN *string `min:"50" type:"string"` // The barcode that identifies the archived virtual tape. TapeBarcode *string `min:"7" type:"string"` // The date the virtual tape was created. TapeCreatedDate *time.Time `type:"timestamp"` // The size, in bytes, of the archived virtual tape. TapeSizeInBytes *int64 `type:"long"` // The current state of the archived virtual tape. TapeStatus *string `type:"string"` // The size, in bytes, of data stored on the virtual tape. // // This value is not available for tapes created prior to May 13, 2015. TapeUsedInBytes *int64 `type:"long"` } // String returns the string representation func (s TapeArchive) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s TapeArchive) GoString() string { return s.String() } // SetCompletionTime sets the CompletionTime field's value. func (s *TapeArchive) SetCompletionTime(v time.Time) *TapeArchive { s.CompletionTime = &v return s } // SetKMSKey sets the KMSKey field's value. func (s *TapeArchive) SetKMSKey(v string) *TapeArchive { s.KMSKey = &v return s } // SetPoolId sets the PoolId field's value. func (s *TapeArchive) SetPoolId(v string) *TapeArchive { s.PoolId = &v return s } // SetRetrievedTo sets the RetrievedTo field's value. func (s *TapeArchive) SetRetrievedTo(v string) *TapeArchive { s.RetrievedTo = &v return s } // SetTapeARN sets the TapeARN field's value. func (s *TapeArchive) SetTapeARN(v string) *TapeArchive { s.TapeARN = &v return s } // SetTapeBarcode sets the TapeBarcode field's value. func (s *TapeArchive) SetTapeBarcode(v string) *TapeArchive { s.TapeBarcode = &v return s } // SetTapeCreatedDate sets the TapeCreatedDate field's value. func (s *TapeArchive) SetTapeCreatedDate(v time.Time) *TapeArchive { s.TapeCreatedDate = &v return s } // SetTapeSizeInBytes sets the TapeSizeInBytes field's value. func (s *TapeArchive) SetTapeSizeInBytes(v int64) *TapeArchive { s.TapeSizeInBytes = &v return s } // SetTapeStatus sets the TapeStatus field's value. func (s *TapeArchive) SetTapeStatus(v string) *TapeArchive { s.TapeStatus = &v return s } // SetTapeUsedInBytes sets the TapeUsedInBytes field's value. func (s *TapeArchive) SetTapeUsedInBytes(v int64) *TapeArchive { s.TapeUsedInBytes = &v return s } // Describes a virtual tape. type TapeInfo struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // The ID of the pool that you want to add your tape to for archiving. The tape // in this pool is archived in the S3 storage class that is associated with // the pool. When you use your backup application to eject the tape, the tape // is archived directly into the storage class (S3 Glacier or S3 Glacier Deep // Archive) that corresponds to the pool. // // Valid Values: GLACIER | DEEP_ARCHIVE PoolId *string `min:"1" type:"string"` // The Amazon Resource Name (ARN) of a virtual tape. TapeARN *string `min:"50" type:"string"` // The barcode that identifies a specific virtual tape. TapeBarcode *string `min:"7" type:"string"` // The size, in bytes, of a virtual tape. TapeSizeInBytes *int64 `type:"long"` // The status of the tape. TapeStatus *string `type:"string"` } // String returns the string representation func (s TapeInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s TapeInfo) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *TapeInfo) SetGatewayARN(v string) *TapeInfo { s.GatewayARN = &v return s } // SetPoolId sets the PoolId field's value. func (s *TapeInfo) SetPoolId(v string) *TapeInfo { s.PoolId = &v return s } // SetTapeARN sets the TapeARN field's value. func (s *TapeInfo) SetTapeARN(v string) *TapeInfo { s.TapeARN = &v return s } // SetTapeBarcode sets the TapeBarcode field's value. func (s *TapeInfo) SetTapeBarcode(v string) *TapeInfo { s.TapeBarcode = &v return s } // SetTapeSizeInBytes sets the TapeSizeInBytes field's value. func (s *TapeInfo) SetTapeSizeInBytes(v int64) *TapeInfo { s.TapeSizeInBytes = &v return s } // SetTapeStatus sets the TapeStatus field's value. func (s *TapeInfo) SetTapeStatus(v string) *TapeInfo { s.TapeStatus = &v return s } // Describes a recovery point. type TapeRecoveryPointInfo struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the virtual tape. TapeARN *string `min:"50" type:"string"` // The time when the point-in-time view of the virtual tape was replicated for // later recovery. // // The default time stamp format of the tape recovery point time is in the ISO8601 // extended YYYY-MM-DD'T'HH:MM:SS'Z' format. TapeRecoveryPointTime *time.Time `type:"timestamp"` // The size, in bytes, of the virtual tapes to recover. TapeSizeInBytes *int64 `type:"long"` // The status of the virtual tapes. TapeStatus *string `type:"string"` } // String returns the string representation func (s TapeRecoveryPointInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s TapeRecoveryPointInfo) GoString() string { return s.String() } // SetTapeARN sets the TapeARN field's value. func (s *TapeRecoveryPointInfo) SetTapeARN(v string) *TapeRecoveryPointInfo { s.TapeARN = &v return s } // SetTapeRecoveryPointTime sets the TapeRecoveryPointTime field's value. func (s *TapeRecoveryPointInfo) SetTapeRecoveryPointTime(v time.Time) *TapeRecoveryPointInfo { s.TapeRecoveryPointTime = &v return s } // SetTapeSizeInBytes sets the TapeSizeInBytes field's value. func (s *TapeRecoveryPointInfo) SetTapeSizeInBytes(v int64) *TapeRecoveryPointInfo { s.TapeSizeInBytes = &v return s } // SetTapeStatus sets the TapeStatus field's value. func (s *TapeRecoveryPointInfo) SetTapeStatus(v string) *TapeRecoveryPointInfo { s.TapeStatus = &v return s } type UpdateAutomaticTapeCreationPolicyInput struct { _ struct{} `type:"structure"` // An automatic tape creation policy consists of a list of automatic tape creation // rules. The rules determine when and how to automatically create new tapes. // // AutomaticTapeCreationRules is a required field AutomaticTapeCreationRules []*AutomaticTapeCreationRule `min:"1" type:"list" required:"true"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s UpdateAutomaticTapeCreationPolicyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateAutomaticTapeCreationPolicyInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateAutomaticTapeCreationPolicyInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateAutomaticTapeCreationPolicyInput"} if s.AutomaticTapeCreationRules == nil { invalidParams.Add(request.NewErrParamRequired("AutomaticTapeCreationRules")) } if s.AutomaticTapeCreationRules != nil && len(s.AutomaticTapeCreationRules) < 1 { invalidParams.Add(request.NewErrParamMinLen("AutomaticTapeCreationRules", 1)) } if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.AutomaticTapeCreationRules != nil { for i, v := range s.AutomaticTapeCreationRules { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AutomaticTapeCreationRules", i), err.(request.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAutomaticTapeCreationRules sets the AutomaticTapeCreationRules field's value. func (s *UpdateAutomaticTapeCreationPolicyInput) SetAutomaticTapeCreationRules(v []*AutomaticTapeCreationRule) *UpdateAutomaticTapeCreationPolicyInput { s.AutomaticTapeCreationRules = v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *UpdateAutomaticTapeCreationPolicyInput) SetGatewayARN(v string) *UpdateAutomaticTapeCreationPolicyInput { s.GatewayARN = &v return s } type UpdateAutomaticTapeCreationPolicyOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s UpdateAutomaticTapeCreationPolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateAutomaticTapeCreationPolicyOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *UpdateAutomaticTapeCreationPolicyOutput) SetGatewayARN(v string) *UpdateAutomaticTapeCreationPolicyOutput { s.GatewayARN = &v return s } // A JSON object containing one or more of the following fields: // // * UpdateBandwidthRateLimitInput$AverageDownloadRateLimitInBitsPerSec // // * UpdateBandwidthRateLimitInput$AverageUploadRateLimitInBitsPerSec type UpdateBandwidthRateLimitInput struct { _ struct{} `type:"structure"` // The average download bandwidth rate limit in bits per second. AverageDownloadRateLimitInBitsPerSec *int64 `min:"102400" type:"long"` // The average upload bandwidth rate limit in bits per second. AverageUploadRateLimitInBitsPerSec *int64 `min:"51200" type:"long"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s UpdateBandwidthRateLimitInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateBandwidthRateLimitInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateBandwidthRateLimitInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateBandwidthRateLimitInput"} if s.AverageDownloadRateLimitInBitsPerSec != nil && *s.AverageDownloadRateLimitInBitsPerSec < 102400 { invalidParams.Add(request.NewErrParamMinValue("AverageDownloadRateLimitInBitsPerSec", 102400)) } if s.AverageUploadRateLimitInBitsPerSec != nil && *s.AverageUploadRateLimitInBitsPerSec < 51200 { invalidParams.Add(request.NewErrParamMinValue("AverageUploadRateLimitInBitsPerSec", 51200)) } if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAverageDownloadRateLimitInBitsPerSec sets the AverageDownloadRateLimitInBitsPerSec field's value. func (s *UpdateBandwidthRateLimitInput) SetAverageDownloadRateLimitInBitsPerSec(v int64) *UpdateBandwidthRateLimitInput { s.AverageDownloadRateLimitInBitsPerSec = &v return s } // SetAverageUploadRateLimitInBitsPerSec sets the AverageUploadRateLimitInBitsPerSec field's value. func (s *UpdateBandwidthRateLimitInput) SetAverageUploadRateLimitInBitsPerSec(v int64) *UpdateBandwidthRateLimitInput { s.AverageUploadRateLimitInBitsPerSec = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *UpdateBandwidthRateLimitInput) SetGatewayARN(v string) *UpdateBandwidthRateLimitInput { s.GatewayARN = &v return s } // A JSON object containing the Amazon Resource Name (ARN) of the gateway whose // throttle information was updated. type UpdateBandwidthRateLimitOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s UpdateBandwidthRateLimitOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateBandwidthRateLimitOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *UpdateBandwidthRateLimitOutput) SetGatewayARN(v string) *UpdateBandwidthRateLimitOutput { s.GatewayARN = &v return s } // A JSON object containing one or more of the following fields: // // * UpdateChapCredentialsInput$InitiatorName // // * UpdateChapCredentialsInput$SecretToAuthenticateInitiator // // * UpdateChapCredentialsInput$SecretToAuthenticateTarget // // * UpdateChapCredentialsInput$TargetARN type UpdateChapCredentialsInput struct { _ struct{} `type:"structure"` // The iSCSI initiator that connects to the target. // // InitiatorName is a required field InitiatorName *string `min:"1" type:"string" required:"true"` // The secret key that the initiator (for example, the Windows client) must // provide to participate in mutual CHAP with the target. // // The secret key must be between 12 and 16 bytes when encoded in UTF-8. // // SecretToAuthenticateInitiator is a required field SecretToAuthenticateInitiator *string `min:"1" type:"string" required:"true" sensitive:"true"` // The secret key that the target must provide to participate in mutual CHAP // with the initiator (e.g. Windows client). // // Byte constraints: Minimum bytes of 12. Maximum bytes of 16. // // The secret key must be between 12 and 16 bytes when encoded in UTF-8. SecretToAuthenticateTarget *string `min:"1" type:"string" sensitive:"true"` // The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes // operation to return the TargetARN for specified VolumeARN. // // TargetARN is a required field TargetARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s UpdateChapCredentialsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateChapCredentialsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateChapCredentialsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateChapCredentialsInput"} if s.InitiatorName == nil { invalidParams.Add(request.NewErrParamRequired("InitiatorName")) } if s.InitiatorName != nil && len(*s.InitiatorName) < 1 { invalidParams.Add(request.NewErrParamMinLen("InitiatorName", 1)) } if s.SecretToAuthenticateInitiator == nil { invalidParams.Add(request.NewErrParamRequired("SecretToAuthenticateInitiator")) } if s.SecretToAuthenticateInitiator != nil && len(*s.SecretToAuthenticateInitiator) < 1 { invalidParams.Add(request.NewErrParamMinLen("SecretToAuthenticateInitiator", 1)) } if s.SecretToAuthenticateTarget != nil && len(*s.SecretToAuthenticateTarget) < 1 { invalidParams.Add(request.NewErrParamMinLen("SecretToAuthenticateTarget", 1)) } if s.TargetARN == nil { invalidParams.Add(request.NewErrParamRequired("TargetARN")) } if s.TargetARN != nil && len(*s.TargetARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("TargetARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetInitiatorName sets the InitiatorName field's value. func (s *UpdateChapCredentialsInput) SetInitiatorName(v string) *UpdateChapCredentialsInput { s.InitiatorName = &v return s } // SetSecretToAuthenticateInitiator sets the SecretToAuthenticateInitiator field's value. func (s *UpdateChapCredentialsInput) SetSecretToAuthenticateInitiator(v string) *UpdateChapCredentialsInput { s.SecretToAuthenticateInitiator = &v return s } // SetSecretToAuthenticateTarget sets the SecretToAuthenticateTarget field's value. func (s *UpdateChapCredentialsInput) SetSecretToAuthenticateTarget(v string) *UpdateChapCredentialsInput { s.SecretToAuthenticateTarget = &v return s } // SetTargetARN sets the TargetARN field's value. func (s *UpdateChapCredentialsInput) SetTargetARN(v string) *UpdateChapCredentialsInput { s.TargetARN = &v return s } // A JSON object containing the following fields: type UpdateChapCredentialsOutput struct { _ struct{} `type:"structure"` // The iSCSI initiator that connects to the target. This is the same initiator // name specified in the request. InitiatorName *string `min:"1" type:"string"` // The Amazon Resource Name (ARN) of the target. This is the same target specified // in the request. TargetARN *string `min:"50" type:"string"` } // String returns the string representation func (s UpdateChapCredentialsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateChapCredentialsOutput) GoString() string { return s.String() } // SetInitiatorName sets the InitiatorName field's value. func (s *UpdateChapCredentialsOutput) SetInitiatorName(v string) *UpdateChapCredentialsOutput { s.InitiatorName = &v return s } // SetTargetARN sets the TargetARN field's value. func (s *UpdateChapCredentialsOutput) SetTargetARN(v string) *UpdateChapCredentialsOutput { s.TargetARN = &v return s } type UpdateGatewayInformationInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that you // want to use to monitor and log events in the gateway. // // For more information, see What is Amazon CloudWatch logs? (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html). CloudWatchLogGroupARN *string `type:"string"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // The name you configured for your gateway. GatewayName *string `min:"2" type:"string"` // A value that indicates the time zone of the gateway. GatewayTimezone *string `min:"3" type:"string"` } // String returns the string representation func (s UpdateGatewayInformationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateGatewayInformationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateGatewayInformationInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateGatewayInformationInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.GatewayName != nil && len(*s.GatewayName) < 2 { invalidParams.Add(request.NewErrParamMinLen("GatewayName", 2)) } if s.GatewayTimezone != nil && len(*s.GatewayTimezone) < 3 { invalidParams.Add(request.NewErrParamMinLen("GatewayTimezone", 3)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCloudWatchLogGroupARN sets the CloudWatchLogGroupARN field's value. func (s *UpdateGatewayInformationInput) SetCloudWatchLogGroupARN(v string) *UpdateGatewayInformationInput { s.CloudWatchLogGroupARN = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *UpdateGatewayInformationInput) SetGatewayARN(v string) *UpdateGatewayInformationInput { s.GatewayARN = &v return s } // SetGatewayName sets the GatewayName field's value. func (s *UpdateGatewayInformationInput) SetGatewayName(v string) *UpdateGatewayInformationInput { s.GatewayName = &v return s } // SetGatewayTimezone sets the GatewayTimezone field's value. func (s *UpdateGatewayInformationInput) SetGatewayTimezone(v string) *UpdateGatewayInformationInput { s.GatewayTimezone = &v return s } // A JSON object containing the Amazon Resource Name (ARN) of the gateway that // was updated. type UpdateGatewayInformationOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // The name you configured for your gateway. GatewayName *string `type:"string"` } // String returns the string representation func (s UpdateGatewayInformationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateGatewayInformationOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *UpdateGatewayInformationOutput) SetGatewayARN(v string) *UpdateGatewayInformationOutput { s.GatewayARN = &v return s } // SetGatewayName sets the GatewayName field's value. func (s *UpdateGatewayInformationOutput) SetGatewayName(v string) *UpdateGatewayInformationOutput { s.GatewayName = &v return s } // A JSON object containing the Amazon Resource Name (ARN) of the gateway to // update. type UpdateGatewaySoftwareNowInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s UpdateGatewaySoftwareNowInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateGatewaySoftwareNowInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateGatewaySoftwareNowInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateGatewaySoftwareNowInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *UpdateGatewaySoftwareNowInput) SetGatewayARN(v string) *UpdateGatewaySoftwareNowInput { s.GatewayARN = &v return s } // A JSON object containing the Amazon Resource Name (ARN) of the gateway that // was updated. type UpdateGatewaySoftwareNowOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s UpdateGatewaySoftwareNowOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateGatewaySoftwareNowOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *UpdateGatewaySoftwareNowOutput) SetGatewayARN(v string) *UpdateGatewaySoftwareNowOutput { s.GatewayARN = &v return s } // A JSON object containing the following fields: // // * UpdateMaintenanceStartTimeInput$DayOfMonth // // * UpdateMaintenanceStartTimeInput$DayOfWeek // // * UpdateMaintenanceStartTimeInput$HourOfDay // // * UpdateMaintenanceStartTimeInput$MinuteOfHour type UpdateMaintenanceStartTimeInput struct { _ struct{} `type:"structure"` // The day of the month component of the maintenance start time represented // as an ordinal number from 1 to 28, where 1 represents the first day of the // month and 28 represents the last day of the month. DayOfMonth *int64 `min:"1" type:"integer"` // The day of the week component of the maintenance start time week represented // as an ordinal number from 0 to 6, where 0 represents Sunday and 6 Saturday. DayOfWeek *int64 `type:"integer"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // The hour component of the maintenance start time represented as hh, where // hh is the hour (00 to 23). The hour of the day is in the time zone of the // gateway. // // HourOfDay is a required field HourOfDay *int64 `type:"integer" required:"true"` // The minute component of the maintenance start time represented as mm, where // mm is the minute (00 to 59). The minute of the hour is in the time zone of // the gateway. // // MinuteOfHour is a required field MinuteOfHour *int64 `type:"integer" required:"true"` } // String returns the string representation func (s UpdateMaintenanceStartTimeInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateMaintenanceStartTimeInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateMaintenanceStartTimeInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateMaintenanceStartTimeInput"} if s.DayOfMonth != nil && *s.DayOfMonth < 1 { invalidParams.Add(request.NewErrParamMinValue("DayOfMonth", 1)) } if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.HourOfDay == nil { invalidParams.Add(request.NewErrParamRequired("HourOfDay")) } if s.MinuteOfHour == nil { invalidParams.Add(request.NewErrParamRequired("MinuteOfHour")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDayOfMonth sets the DayOfMonth field's value. func (s *UpdateMaintenanceStartTimeInput) SetDayOfMonth(v int64) *UpdateMaintenanceStartTimeInput { s.DayOfMonth = &v return s } // SetDayOfWeek sets the DayOfWeek field's value. func (s *UpdateMaintenanceStartTimeInput) SetDayOfWeek(v int64) *UpdateMaintenanceStartTimeInput { s.DayOfWeek = &v return s } // SetGatewayARN sets the GatewayARN field's value. func (s *UpdateMaintenanceStartTimeInput) SetGatewayARN(v string) *UpdateMaintenanceStartTimeInput { s.GatewayARN = &v return s } // SetHourOfDay sets the HourOfDay field's value. func (s *UpdateMaintenanceStartTimeInput) SetHourOfDay(v int64) *UpdateMaintenanceStartTimeInput { s.HourOfDay = &v return s } // SetMinuteOfHour sets the MinuteOfHour field's value. func (s *UpdateMaintenanceStartTimeInput) SetMinuteOfHour(v int64) *UpdateMaintenanceStartTimeInput { s.MinuteOfHour = &v return s } // A JSON object containing the Amazon Resource Name (ARN) of the gateway whose // maintenance start time is updated. type UpdateMaintenanceStartTimeOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s UpdateMaintenanceStartTimeOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateMaintenanceStartTimeOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *UpdateMaintenanceStartTimeOutput) SetGatewayARN(v string) *UpdateMaintenanceStartTimeOutput { s.GatewayARN = &v return s } // UpdateNFSFileShareInput type UpdateNFSFileShareInput struct { _ struct{} `type:"structure"` // The list of clients that are allowed to access the file gateway. The list // must contain either valid IP addresses or valid CIDR blocks. ClientList []*string `min:"1" type:"list"` // The default storage class for objects put into an Amazon S3 bucket by the // file gateway. The default value is S3_INTELLIGENT_TIERING. Optional. // // Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA DefaultStorageClass *string `min:"5" type:"string"` // The Amazon Resource Name (ARN) of the file share to be updated. // // FileShareARN is a required field FileShareARN *string `min:"50" type:"string" required:"true"` // A value that enables guessing of the MIME type for uploaded objects based // on file extensions. Set this value to true to enable MIME type guessing, // otherwise set to false. The default value is true. // // Valid Values: true | false GuessMIMETypeEnabled *bool `type:"boolean"` // Set to true to use Amazon S3 server-side encryption with your own AWS KMS // key, or false to use a key managed by Amazon S3. Optional. // // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The default values for the file share. Optional. NFSFileShareDefaults *NFSFileShareDefaults `type:"structure"` // A value that sets the access control list (ACL) permission for objects in // the S3 bucket that a file gateway puts objects into. The default value is // private. ObjectACL *string `type:"string" enum:"ObjectACL"` // A value that sets the write status of a file share. Set this value to true // to set the write status to read-only, otherwise set to false. // // Valid Values: true | false ReadOnly *bool `type:"boolean"` // A value that sets who pays the cost of the request and the cost associated // with data download from the S3 bucket. If this value is set to true, the // requester pays the costs; otherwise, the S3 bucket owner pays. However, the // S3 bucket owner always pays the cost of storing data. // // RequesterPays is a configuration for the S3 bucket that backs the file share, // so make sure that the configuration on the file share is the same as the // S3 bucket configuration. // // Valid Values: true | false RequesterPays *bool `type:"boolean"` // The user mapped to anonymous user. // // Valid values are the following: // // * RootSquash: Only root is mapped to anonymous user. // // * NoSquash: No one is mapped to anonymous user. // // * AllSquash: Everyone is mapped to anonymous user. Squash *string `min:"5" type:"string"` } // String returns the string representation func (s UpdateNFSFileShareInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateNFSFileShareInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateNFSFileShareInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateNFSFileShareInput"} if s.ClientList != nil && len(s.ClientList) < 1 { invalidParams.Add(request.NewErrParamMinLen("ClientList", 1)) } if s.DefaultStorageClass != nil && len(*s.DefaultStorageClass) < 5 { invalidParams.Add(request.NewErrParamMinLen("DefaultStorageClass", 5)) } if s.FileShareARN == nil { invalidParams.Add(request.NewErrParamRequired("FileShareARN")) } if s.FileShareARN != nil && len(*s.FileShareARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("FileShareARN", 50)) } if s.KMSKey != nil && len(*s.KMSKey) < 7 { invalidParams.Add(request.NewErrParamMinLen("KMSKey", 7)) } if s.Squash != nil && len(*s.Squash) < 5 { invalidParams.Add(request.NewErrParamMinLen("Squash", 5)) } if s.NFSFileShareDefaults != nil { if err := s.NFSFileShareDefaults.Validate(); err != nil { invalidParams.AddNested("NFSFileShareDefaults", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetClientList sets the ClientList field's value. func (s *UpdateNFSFileShareInput) SetClientList(v []*string) *UpdateNFSFileShareInput { s.ClientList = v return s } // SetDefaultStorageClass sets the DefaultStorageClass field's value. func (s *UpdateNFSFileShareInput) SetDefaultStorageClass(v string) *UpdateNFSFileShareInput { s.DefaultStorageClass = &v return s } // SetFileShareARN sets the FileShareARN field's value. func (s *UpdateNFSFileShareInput) SetFileShareARN(v string) *UpdateNFSFileShareInput { s.FileShareARN = &v return s } // SetGuessMIMETypeEnabled sets the GuessMIMETypeEnabled field's value. func (s *UpdateNFSFileShareInput) SetGuessMIMETypeEnabled(v bool) *UpdateNFSFileShareInput { s.GuessMIMETypeEnabled = &v return s } // SetKMSEncrypted sets the KMSEncrypted field's value. func (s *UpdateNFSFileShareInput) SetKMSEncrypted(v bool) *UpdateNFSFileShareInput { s.KMSEncrypted = &v return s } // SetKMSKey sets the KMSKey field's value. func (s *UpdateNFSFileShareInput) SetKMSKey(v string) *UpdateNFSFileShareInput { s.KMSKey = &v return s } // SetNFSFileShareDefaults sets the NFSFileShareDefaults field's value. func (s *UpdateNFSFileShareInput) SetNFSFileShareDefaults(v *NFSFileShareDefaults) *UpdateNFSFileShareInput { s.NFSFileShareDefaults = v return s } // SetObjectACL sets the ObjectACL field's value. func (s *UpdateNFSFileShareInput) SetObjectACL(v string) *UpdateNFSFileShareInput { s.ObjectACL = &v return s } // SetReadOnly sets the ReadOnly field's value. func (s *UpdateNFSFileShareInput) SetReadOnly(v bool) *UpdateNFSFileShareInput { s.ReadOnly = &v return s } // SetRequesterPays sets the RequesterPays field's value. func (s *UpdateNFSFileShareInput) SetRequesterPays(v bool) *UpdateNFSFileShareInput { s.RequesterPays = &v return s } // SetSquash sets the Squash field's value. func (s *UpdateNFSFileShareInput) SetSquash(v string) *UpdateNFSFileShareInput { s.Squash = &v return s } // UpdateNFSFileShareOutput type UpdateNFSFileShareOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the updated file share. FileShareARN *string `min:"50" type:"string"` } // String returns the string representation func (s UpdateNFSFileShareOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateNFSFileShareOutput) GoString() string { return s.String() } // SetFileShareARN sets the FileShareARN field's value. func (s *UpdateNFSFileShareOutput) SetFileShareARN(v string) *UpdateNFSFileShareOutput { s.FileShareARN = &v return s } // UpdateSMBFileShareInput type UpdateSMBFileShareInput struct { _ struct{} `type:"structure"` // A list of users in the Active Directory that have administrator rights to // the file share. A group must be prefixed with the @ character. For example, // @group1. Can only be set if Authentication is set to ActiveDirectory. AdminUserList []*string `type:"list"` // The Amazon Resource Name (ARN) of the storage used for the audit logs. AuditDestinationARN *string `type:"string"` // The default storage class for objects put into an Amazon S3 bucket by the // file gateway. The default value is S3_INTELLIGENT_TIERING. Optional. // // Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA DefaultStorageClass *string `min:"5" type:"string"` // The Amazon Resource Name (ARN) of the SMB file share that you want to update. // // FileShareARN is a required field FileShareARN *string `min:"50" type:"string" required:"true"` // A value that enables guessing of the MIME type for uploaded objects based // on file extensions. Set this value to true to enable MIME type guessing, // otherwise set to false. The default value is true. // // Valid Values: true | false GuessMIMETypeEnabled *bool `type:"boolean"` // A list of users or groups in the Active Directory that are not allowed to // access the file share. A group must be prefixed with the @ character. For // example @group1. Can only be set if Authentication is set to ActiveDirectory. InvalidUserList []*string `type:"list"` // Set to true to use Amazon S3 server-side encryption with your own AWS KMS // key, or false to use a key managed by Amazon S3. Optional. // // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // A value that sets the access control list (ACL) permission for objects in // the S3 bucket that a file gateway puts objects into. The default value is // private. ObjectACL *string `type:"string" enum:"ObjectACL"` // A value that sets the write status of a file share. Set this value to true // to set write status to read-only, otherwise set to false. // // Valid Values: true | false ReadOnly *bool `type:"boolean"` // A value that sets who pays the cost of the request and the cost associated // with data download from the S3 bucket. If this value is set to true, the // requester pays the costs; otherwise, the S3 bucket owner pays. However, the // S3 bucket owner always pays the cost of storing data. // // RequesterPays is a configuration for the S3 bucket that backs the file share, // so make sure that the configuration on the file share is the same as the // S3 bucket configuration. // // Valid Values: true | false RequesterPays *bool `type:"boolean"` // Set this value to true to enable access control list (ACL) on the SMB file // share. Set it to false to map file and directory permissions to the POSIX // permissions. // // For more information, see Using Microsoft Windows ACLs to control access // to an SMB file share (https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.html) // in the AWS Storage Gateway User Guide. // // Valid Values: true | false SMBACLEnabled *bool `type:"boolean"` // A list of users or groups in the Active Directory that are allowed to access // the file share. A group must be prefixed with the @ character. For example, // @group1. Can only be set if Authentication is set to ActiveDirectory. ValidUserList []*string `type:"list"` } // String returns the string representation func (s UpdateSMBFileShareInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateSMBFileShareInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateSMBFileShareInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateSMBFileShareInput"} if s.DefaultStorageClass != nil && len(*s.DefaultStorageClass) < 5 { invalidParams.Add(request.NewErrParamMinLen("DefaultStorageClass", 5)) } if s.FileShareARN == nil { invalidParams.Add(request.NewErrParamRequired("FileShareARN")) } if s.FileShareARN != nil && len(*s.FileShareARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("FileShareARN", 50)) } if s.KMSKey != nil && len(*s.KMSKey) < 7 { invalidParams.Add(request.NewErrParamMinLen("KMSKey", 7)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAdminUserList sets the AdminUserList field's value. func (s *UpdateSMBFileShareInput) SetAdminUserList(v []*string) *UpdateSMBFileShareInput { s.AdminUserList = v return s } // SetAuditDestinationARN sets the AuditDestinationARN field's value. func (s *UpdateSMBFileShareInput) SetAuditDestinationARN(v string) *UpdateSMBFileShareInput { s.AuditDestinationARN = &v return s } // SetDefaultStorageClass sets the DefaultStorageClass field's value. func (s *UpdateSMBFileShareInput) SetDefaultStorageClass(v string) *UpdateSMBFileShareInput { s.DefaultStorageClass = &v return s } // SetFileShareARN sets the FileShareARN field's value. func (s *UpdateSMBFileShareInput) SetFileShareARN(v string) *UpdateSMBFileShareInput { s.FileShareARN = &v return s } // SetGuessMIMETypeEnabled sets the GuessMIMETypeEnabled field's value. func (s *UpdateSMBFileShareInput) SetGuessMIMETypeEnabled(v bool) *UpdateSMBFileShareInput { s.GuessMIMETypeEnabled = &v return s } // SetInvalidUserList sets the InvalidUserList field's value. func (s *UpdateSMBFileShareInput) SetInvalidUserList(v []*string) *UpdateSMBFileShareInput { s.InvalidUserList = v return s } // SetKMSEncrypted sets the KMSEncrypted field's value. func (s *UpdateSMBFileShareInput) SetKMSEncrypted(v bool) *UpdateSMBFileShareInput { s.KMSEncrypted = &v return s } // SetKMSKey sets the KMSKey field's value. func (s *UpdateSMBFileShareInput) SetKMSKey(v string) *UpdateSMBFileShareInput { s.KMSKey = &v return s } // SetObjectACL sets the ObjectACL field's value. func (s *UpdateSMBFileShareInput) SetObjectACL(v string) *UpdateSMBFileShareInput { s.ObjectACL = &v return s } // SetReadOnly sets the ReadOnly field's value. func (s *UpdateSMBFileShareInput) SetReadOnly(v bool) *UpdateSMBFileShareInput { s.ReadOnly = &v return s } // SetRequesterPays sets the RequesterPays field's value. func (s *UpdateSMBFileShareInput) SetRequesterPays(v bool) *UpdateSMBFileShareInput { s.RequesterPays = &v return s } // SetSMBACLEnabled sets the SMBACLEnabled field's value. func (s *UpdateSMBFileShareInput) SetSMBACLEnabled(v bool) *UpdateSMBFileShareInput { s.SMBACLEnabled = &v return s } // SetValidUserList sets the ValidUserList field's value. func (s *UpdateSMBFileShareInput) SetValidUserList(v []*string) *UpdateSMBFileShareInput { s.ValidUserList = v return s } // UpdateSMBFileShareOutput type UpdateSMBFileShareOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the updated SMB file share. FileShareARN *string `min:"50" type:"string"` } // String returns the string representation func (s UpdateSMBFileShareOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateSMBFileShareOutput) GoString() string { return s.String() } // SetFileShareARN sets the FileShareARN field's value. func (s *UpdateSMBFileShareOutput) SetFileShareARN(v string) *UpdateSMBFileShareOutput { s.FileShareARN = &v return s } type UpdateSMBSecurityStrategyInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // Specifies the type of security strategy. // // ClientSpecified: if you use this option, requests are established based on // what is negotiated by the client. This option is recommended when you want // to maximize compatibility across different clients in your environment. // // MandatorySigning: if you use this option, file gateway only allows connections // from SMBv2 or SMBv3 clients that have signing enabled. This option works // with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer. // // MandatoryEncryption: if you use this option, file gateway only allows connections // from SMBv3 clients that have encryption enabled. This option is highly recommended // for environments that handle sensitive data. This option works with SMB clients // on Microsoft Windows 8, Windows Server 2012 or newer. // // SMBSecurityStrategy is a required field SMBSecurityStrategy *string `type:"string" required:"true" enum:"SMBSecurityStrategy"` } // String returns the string representation func (s UpdateSMBSecurityStrategyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateSMBSecurityStrategyInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateSMBSecurityStrategyInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateSMBSecurityStrategyInput"} if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) } if s.SMBSecurityStrategy == nil { invalidParams.Add(request.NewErrParamRequired("SMBSecurityStrategy")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetGatewayARN sets the GatewayARN field's value. func (s *UpdateSMBSecurityStrategyInput) SetGatewayARN(v string) *UpdateSMBSecurityStrategyInput { s.GatewayARN = &v return s } // SetSMBSecurityStrategy sets the SMBSecurityStrategy field's value. func (s *UpdateSMBSecurityStrategyInput) SetSMBSecurityStrategy(v string) *UpdateSMBSecurityStrategyInput { s.SMBSecurityStrategy = &v return s } type UpdateSMBSecurityStrategyOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` } // String returns the string representation func (s UpdateSMBSecurityStrategyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateSMBSecurityStrategyOutput) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *UpdateSMBSecurityStrategyOutput) SetGatewayARN(v string) *UpdateSMBSecurityStrategyOutput { s.GatewayARN = &v return s } // A JSON object containing one or more of the following fields: // // * UpdateSnapshotScheduleInput$Description // // * UpdateSnapshotScheduleInput$RecurrenceInHours // // * UpdateSnapshotScheduleInput$StartAt // // * UpdateSnapshotScheduleInput$VolumeARN type UpdateSnapshotScheduleInput struct { _ struct{} `type:"structure"` // Optional description of the snapshot that overwrites the existing description. Description *string `min:"1" type:"string"` // Frequency of snapshots. Specify the number of hours between snapshots. // // RecurrenceInHours is a required field RecurrenceInHours *int64 `min:"1" type:"integer" required:"true"` // The hour of the day at which the snapshot schedule begins represented as // hh, where hh is the hour (0 to 23). The hour of the day is in the time zone // of the gateway. // // StartAt is a required field StartAt *int64 `type:"integer" required:"true"` // A list of up to 50 tags that can be assigned to a snapshot. Each tag is a // key-value pair. // // Valid characters for key and value are letters, spaces, and numbers representable // in UTF-8 format, and the following special characters: + - = . _ : / @. The // maximum length of a tag's key is 128 characters, and the maximum length for // a tag's value is 256. Tags []*Tag `type:"list"` // The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation // to return a list of gateway volumes. // // VolumeARN is a required field VolumeARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s UpdateSnapshotScheduleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateSnapshotScheduleInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateSnapshotScheduleInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateSnapshotScheduleInput"} if s.Description != nil && len(*s.Description) < 1 { invalidParams.Add(request.NewErrParamMinLen("Description", 1)) } if s.RecurrenceInHours == nil { invalidParams.Add(request.NewErrParamRequired("RecurrenceInHours")) } if s.RecurrenceInHours != nil && *s.RecurrenceInHours < 1 { invalidParams.Add(request.NewErrParamMinValue("RecurrenceInHours", 1)) } if s.StartAt == nil { invalidParams.Add(request.NewErrParamRequired("StartAt")) } if s.VolumeARN == nil { invalidParams.Add(request.NewErrParamRequired("VolumeARN")) } if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) } if s.Tags != nil { for i, v := range s.Tags { if v == nil { continue } if err := v.Validate(); err != nil { invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) } } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDescription sets the Description field's value. func (s *UpdateSnapshotScheduleInput) SetDescription(v string) *UpdateSnapshotScheduleInput { s.Description = &v return s } // SetRecurrenceInHours sets the RecurrenceInHours field's value. func (s *UpdateSnapshotScheduleInput) SetRecurrenceInHours(v int64) *UpdateSnapshotScheduleInput { s.RecurrenceInHours = &v return s } // SetStartAt sets the StartAt field's value. func (s *UpdateSnapshotScheduleInput) SetStartAt(v int64) *UpdateSnapshotScheduleInput { s.StartAt = &v return s } // SetTags sets the Tags field's value. func (s *UpdateSnapshotScheduleInput) SetTags(v []*Tag) *UpdateSnapshotScheduleInput { s.Tags = v return s } // SetVolumeARN sets the VolumeARN field's value. func (s *UpdateSnapshotScheduleInput) SetVolumeARN(v string) *UpdateSnapshotScheduleInput { s.VolumeARN = &v return s } // A JSON object containing the Amazon Resource Name (ARN) of the updated storage // volume. type UpdateSnapshotScheduleOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation // to return a list of gateway volumes. VolumeARN *string `min:"50" type:"string"` } // String returns the string representation func (s UpdateSnapshotScheduleOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateSnapshotScheduleOutput) GoString() string { return s.String() } // SetVolumeARN sets the VolumeARN field's value. func (s *UpdateSnapshotScheduleOutput) SetVolumeARN(v string) *UpdateSnapshotScheduleOutput { s.VolumeARN = &v return s } type UpdateVTLDeviceTypeInput struct { _ struct{} `type:"structure"` // The type of medium changer you want to select. // // Valid Values: STK-L700 | AWS-Gateway-VTL // // DeviceType is a required field DeviceType *string `min:"2" type:"string" required:"true"` // The Amazon Resource Name (ARN) of the medium changer you want to select. // // VTLDeviceARN is a required field VTLDeviceARN *string `min:"50" type:"string" required:"true"` } // String returns the string representation func (s UpdateVTLDeviceTypeInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateVTLDeviceTypeInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateVTLDeviceTypeInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateVTLDeviceTypeInput"} if s.DeviceType == nil { invalidParams.Add(request.NewErrParamRequired("DeviceType")) } if s.DeviceType != nil && len(*s.DeviceType) < 2 { invalidParams.Add(request.NewErrParamMinLen("DeviceType", 2)) } if s.VTLDeviceARN == nil { invalidParams.Add(request.NewErrParamRequired("VTLDeviceARN")) } if s.VTLDeviceARN != nil && len(*s.VTLDeviceARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("VTLDeviceARN", 50)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetDeviceType sets the DeviceType field's value. func (s *UpdateVTLDeviceTypeInput) SetDeviceType(v string) *UpdateVTLDeviceTypeInput { s.DeviceType = &v return s } // SetVTLDeviceARN sets the VTLDeviceARN field's value. func (s *UpdateVTLDeviceTypeInput) SetVTLDeviceARN(v string) *UpdateVTLDeviceTypeInput { s.VTLDeviceARN = &v return s } // UpdateVTLDeviceTypeOutput type UpdateVTLDeviceTypeOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the medium changer you have selected. VTLDeviceARN *string `min:"50" type:"string"` } // String returns the string representation func (s UpdateVTLDeviceTypeOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateVTLDeviceTypeOutput) GoString() string { return s.String() } // SetVTLDeviceARN sets the VTLDeviceARN field's value. func (s *UpdateVTLDeviceTypeOutput) SetVTLDeviceARN(v string) *UpdateVTLDeviceTypeOutput { s.VTLDeviceARN = &v return s } // Represents a device object associated with a tape gateway. type VTLDevice struct { _ struct{} `type:"structure"` // A list of iSCSI information about a VTL device. DeviceiSCSIAttributes *DeviceiSCSIAttributes `type:"structure"` // Specifies the unique Amazon Resource Name (ARN) of the device (tape drive // or media changer). VTLDeviceARN *string `min:"50" type:"string"` // Specifies the model number of device that the VTL device emulates. VTLDeviceProductIdentifier *string `type:"string"` // Specifies the type of device that the VTL device emulates. VTLDeviceType *string `type:"string"` // Specifies the vendor of the device that the VTL device object emulates. VTLDeviceVendor *string `type:"string"` } // String returns the string representation func (s VTLDevice) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s VTLDevice) GoString() string { return s.String() } // SetDeviceiSCSIAttributes sets the DeviceiSCSIAttributes field's value. func (s *VTLDevice) SetDeviceiSCSIAttributes(v *DeviceiSCSIAttributes) *VTLDevice { s.DeviceiSCSIAttributes = v return s } // SetVTLDeviceARN sets the VTLDeviceARN field's value. func (s *VTLDevice) SetVTLDeviceARN(v string) *VTLDevice { s.VTLDeviceARN = &v return s } // SetVTLDeviceProductIdentifier sets the VTLDeviceProductIdentifier field's value. func (s *VTLDevice) SetVTLDeviceProductIdentifier(v string) *VTLDevice { s.VTLDeviceProductIdentifier = &v return s } // SetVTLDeviceType sets the VTLDeviceType field's value. func (s *VTLDevice) SetVTLDeviceType(v string) *VTLDevice { s.VTLDeviceType = &v return s } // SetVTLDeviceVendor sets the VTLDeviceVendor field's value. func (s *VTLDevice) SetVTLDeviceVendor(v string) *VTLDevice { s.VTLDeviceVendor = &v return s } // Describes a storage volume object. type VolumeInfo struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` // The unique identifier assigned to your gateway during activation. This ID // becomes part of the gateway Amazon Resource Name (ARN), which you use as // input for other operations. // // Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens // (-). GatewayId *string `min:"12" type:"string"` // The Amazon Resource Name (ARN) for the storage volume. For example, the following // is a valid ARN: // // arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABB // // Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens // (-). VolumeARN *string `min:"50" type:"string"` // One of the VolumeStatus values that indicates the state of the storage volume. VolumeAttachmentStatus *string `min:"3" type:"string"` // The unique identifier assigned to the volume. This ID becomes part of the // volume Amazon Resource Name (ARN), which you use as input for other operations. // // Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens // (-). VolumeId *string `min:"12" type:"string"` // The size of the volume in bytes. // // Valid Values: 50 to 500 lowercase letters, numbers, periods (.), and hyphens // (-). VolumeSizeInBytes *int64 `type:"long"` // One of the VolumeType enumeration values describing the type of the volume. VolumeType *string `min:"3" type:"string"` } // String returns the string representation func (s VolumeInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s VolumeInfo) GoString() string { return s.String() } // SetGatewayARN sets the GatewayARN field's value. func (s *VolumeInfo) SetGatewayARN(v string) *VolumeInfo { s.GatewayARN = &v return s } // SetGatewayId sets the GatewayId field's value. func (s *VolumeInfo) SetGatewayId(v string) *VolumeInfo { s.GatewayId = &v return s } // SetVolumeARN sets the VolumeARN field's value. func (s *VolumeInfo) SetVolumeARN(v string) *VolumeInfo { s.VolumeARN = &v return s } // SetVolumeAttachmentStatus sets the VolumeAttachmentStatus field's value. func (s *VolumeInfo) SetVolumeAttachmentStatus(v string) *VolumeInfo { s.VolumeAttachmentStatus = &v return s } // SetVolumeId sets the VolumeId field's value. func (s *VolumeInfo) SetVolumeId(v string) *VolumeInfo { s.VolumeId = &v return s } // SetVolumeSizeInBytes sets the VolumeSizeInBytes field's value. func (s *VolumeInfo) SetVolumeSizeInBytes(v int64) *VolumeInfo { s.VolumeSizeInBytes = &v return s } // SetVolumeType sets the VolumeType field's value. func (s *VolumeInfo) SetVolumeType(v string) *VolumeInfo { s.VolumeType = &v return s } // Describes a storage volume recovery point object. type VolumeRecoveryPointInfo struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the volume target. VolumeARN *string `min:"50" type:"string"` // The time the recovery point was taken. VolumeRecoveryPointTime *string `type:"string"` // The size of the volume in bytes. VolumeSizeInBytes *int64 `type:"long"` // The size of the data stored on the volume in bytes. // // This value is not available for volumes created prior to May 13, 2015, until // you store data on the volume. VolumeUsageInBytes *int64 `type:"long"` } // String returns the string representation func (s VolumeRecoveryPointInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s VolumeRecoveryPointInfo) GoString() string { return s.String() } // SetVolumeARN sets the VolumeARN field's value. func (s *VolumeRecoveryPointInfo) SetVolumeARN(v string) *VolumeRecoveryPointInfo { s.VolumeARN = &v return s } // SetVolumeRecoveryPointTime sets the VolumeRecoveryPointTime field's value. func (s *VolumeRecoveryPointInfo) SetVolumeRecoveryPointTime(v string) *VolumeRecoveryPointInfo { s.VolumeRecoveryPointTime = &v return s } // SetVolumeSizeInBytes sets the VolumeSizeInBytes field's value. func (s *VolumeRecoveryPointInfo) SetVolumeSizeInBytes(v int64) *VolumeRecoveryPointInfo { s.VolumeSizeInBytes = &v return s } // SetVolumeUsageInBytes sets the VolumeUsageInBytes field's value. func (s *VolumeRecoveryPointInfo) SetVolumeUsageInBytes(v int64) *VolumeRecoveryPointInfo { s.VolumeUsageInBytes = &v return s } // Lists iSCSI information about a volume. type VolumeiSCSIAttributes struct { _ struct{} `type:"structure"` // Indicates whether mutual CHAP is enabled for the iSCSI target. ChapEnabled *bool `type:"boolean"` // The logical disk number. LunNumber *int64 `min:"1" type:"integer"` // The network interface identifier. NetworkInterfaceId *string `type:"string"` // The port used to communicate with iSCSI targets. NetworkInterfacePort *int64 `type:"integer"` // The Amazon Resource Name (ARN) of the volume target. TargetARN *string `min:"50" type:"string"` } // String returns the string representation func (s VolumeiSCSIAttributes) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s VolumeiSCSIAttributes) GoString() string { return s.String() } // SetChapEnabled sets the ChapEnabled field's value. func (s *VolumeiSCSIAttributes) SetChapEnabled(v bool) *VolumeiSCSIAttributes { s.ChapEnabled = &v return s } // SetLunNumber sets the LunNumber field's value. func (s *VolumeiSCSIAttributes) SetLunNumber(v int64) *VolumeiSCSIAttributes { s.LunNumber = &v return s } // SetNetworkInterfaceId sets the NetworkInterfaceId field's value. func (s *VolumeiSCSIAttributes) SetNetworkInterfaceId(v string) *VolumeiSCSIAttributes { s.NetworkInterfaceId = &v return s } // SetNetworkInterfacePort sets the NetworkInterfacePort field's value. func (s *VolumeiSCSIAttributes) SetNetworkInterfacePort(v int64) *VolumeiSCSIAttributes { s.NetworkInterfacePort = &v return s } // SetTargetARN sets the TargetARN field's value. func (s *VolumeiSCSIAttributes) SetTargetARN(v string) *VolumeiSCSIAttributes { s.TargetARN = &v return s } const ( // ActiveDirectoryStatusAccessDenied is a ActiveDirectoryStatus enum value ActiveDirectoryStatusAccessDenied = "ACCESS_DENIED" // ActiveDirectoryStatusDetached is a ActiveDirectoryStatus enum value ActiveDirectoryStatusDetached = "DETACHED" // ActiveDirectoryStatusJoined is a ActiveDirectoryStatus enum value ActiveDirectoryStatusJoined = "JOINED" // ActiveDirectoryStatusJoining is a ActiveDirectoryStatus enum value ActiveDirectoryStatusJoining = "JOINING" // ActiveDirectoryStatusNetworkError is a ActiveDirectoryStatus enum value ActiveDirectoryStatusNetworkError = "NETWORK_ERROR" // ActiveDirectoryStatusTimeout is a ActiveDirectoryStatus enum value ActiveDirectoryStatusTimeout = "TIMEOUT" // ActiveDirectoryStatusUnknownError is a ActiveDirectoryStatus enum value ActiveDirectoryStatusUnknownError = "UNKNOWN_ERROR" ) const ( // AvailabilityMonitorTestStatusComplete is a AvailabilityMonitorTestStatus enum value AvailabilityMonitorTestStatusComplete = "COMPLETE" // AvailabilityMonitorTestStatusFailed is a AvailabilityMonitorTestStatus enum value AvailabilityMonitorTestStatusFailed = "FAILED" // AvailabilityMonitorTestStatusPending is a AvailabilityMonitorTestStatus enum value AvailabilityMonitorTestStatusPending = "PENDING" ) const ( // ErrorCodeActivationKeyExpired is a ErrorCode enum value ErrorCodeActivationKeyExpired = "ActivationKeyExpired" // ErrorCodeActivationKeyInvalid is a ErrorCode enum value ErrorCodeActivationKeyInvalid = "ActivationKeyInvalid" // ErrorCodeActivationKeyNotFound is a ErrorCode enum value ErrorCodeActivationKeyNotFound = "ActivationKeyNotFound" // ErrorCodeGatewayInternalError is a ErrorCode enum value ErrorCodeGatewayInternalError = "GatewayInternalError" // ErrorCodeGatewayNotConnected is a ErrorCode enum value ErrorCodeGatewayNotConnected = "GatewayNotConnected" // ErrorCodeGatewayNotFound is a ErrorCode enum value ErrorCodeGatewayNotFound = "GatewayNotFound" // ErrorCodeGatewayProxyNetworkConnectionBusy is a ErrorCode enum value ErrorCodeGatewayProxyNetworkConnectionBusy = "GatewayProxyNetworkConnectionBusy" // ErrorCodeAuthenticationFailure is a ErrorCode enum value ErrorCodeAuthenticationFailure = "AuthenticationFailure" // ErrorCodeBandwidthThrottleScheduleNotFound is a ErrorCode enum value ErrorCodeBandwidthThrottleScheduleNotFound = "BandwidthThrottleScheduleNotFound" // ErrorCodeBlocked is a ErrorCode enum value ErrorCodeBlocked = "Blocked" // ErrorCodeCannotExportSnapshot is a ErrorCode enum value ErrorCodeCannotExportSnapshot = "CannotExportSnapshot" // ErrorCodeChapCredentialNotFound is a ErrorCode enum value ErrorCodeChapCredentialNotFound = "ChapCredentialNotFound" // ErrorCodeDiskAlreadyAllocated is a ErrorCode enum value ErrorCodeDiskAlreadyAllocated = "DiskAlreadyAllocated" // ErrorCodeDiskDoesNotExist is a ErrorCode enum value ErrorCodeDiskDoesNotExist = "DiskDoesNotExist" // ErrorCodeDiskSizeGreaterThanVolumeMaxSize is a ErrorCode enum value ErrorCodeDiskSizeGreaterThanVolumeMaxSize = "DiskSizeGreaterThanVolumeMaxSize" // ErrorCodeDiskSizeLessThanVolumeSize is a ErrorCode enum value ErrorCodeDiskSizeLessThanVolumeSize = "DiskSizeLessThanVolumeSize" // ErrorCodeDiskSizeNotGigAligned is a ErrorCode enum value ErrorCodeDiskSizeNotGigAligned = "DiskSizeNotGigAligned" // ErrorCodeDuplicateCertificateInfo is a ErrorCode enum value ErrorCodeDuplicateCertificateInfo = "DuplicateCertificateInfo" // ErrorCodeDuplicateSchedule is a ErrorCode enum value ErrorCodeDuplicateSchedule = "DuplicateSchedule" // ErrorCodeEndpointNotFound is a ErrorCode enum value ErrorCodeEndpointNotFound = "EndpointNotFound" // ErrorCodeIamnotSupported is a ErrorCode enum value ErrorCodeIamnotSupported = "IAMNotSupported" // ErrorCodeInitiatorInvalid is a ErrorCode enum value ErrorCodeInitiatorInvalid = "InitiatorInvalid" // ErrorCodeInitiatorNotFound is a ErrorCode enum value ErrorCodeInitiatorNotFound = "InitiatorNotFound" // ErrorCodeInternalError is a ErrorCode enum value ErrorCodeInternalError = "InternalError" // ErrorCodeInvalidGateway is a ErrorCode enum value ErrorCodeInvalidGateway = "InvalidGateway" // ErrorCodeInvalidEndpoint is a ErrorCode enum value ErrorCodeInvalidEndpoint = "InvalidEndpoint" // ErrorCodeInvalidParameters is a ErrorCode enum value ErrorCodeInvalidParameters = "InvalidParameters" // ErrorCodeInvalidSchedule is a ErrorCode enum value ErrorCodeInvalidSchedule = "InvalidSchedule" // ErrorCodeLocalStorageLimitExceeded is a ErrorCode enum value ErrorCodeLocalStorageLimitExceeded = "LocalStorageLimitExceeded" // ErrorCodeLunAlreadyAllocated is a ErrorCode enum value ErrorCodeLunAlreadyAllocated = "LunAlreadyAllocated " // ErrorCodeLunInvalid is a ErrorCode enum value ErrorCodeLunInvalid = "LunInvalid" // ErrorCodeJoinDomainInProgress is a ErrorCode enum value ErrorCodeJoinDomainInProgress = "JoinDomainInProgress" // ErrorCodeMaximumContentLengthExceeded is a ErrorCode enum value ErrorCodeMaximumContentLengthExceeded = "MaximumContentLengthExceeded" // ErrorCodeMaximumTapeCartridgeCountExceeded is a ErrorCode enum value ErrorCodeMaximumTapeCartridgeCountExceeded = "MaximumTapeCartridgeCountExceeded" // ErrorCodeMaximumVolumeCountExceeded is a ErrorCode enum value ErrorCodeMaximumVolumeCountExceeded = "MaximumVolumeCountExceeded" // ErrorCodeNetworkConfigurationChanged is a ErrorCode enum value ErrorCodeNetworkConfigurationChanged = "NetworkConfigurationChanged" // ErrorCodeNoDisksAvailable is a ErrorCode enum value ErrorCodeNoDisksAvailable = "NoDisksAvailable" // ErrorCodeNotImplemented is a ErrorCode enum value ErrorCodeNotImplemented = "NotImplemented" // ErrorCodeNotSupported is a ErrorCode enum value ErrorCodeNotSupported = "NotSupported" // ErrorCodeOperationAborted is a ErrorCode enum value ErrorCodeOperationAborted = "OperationAborted" // ErrorCodeOutdatedGateway is a ErrorCode enum value ErrorCodeOutdatedGateway = "OutdatedGateway" // ErrorCodeParametersNotImplemented is a ErrorCode enum value ErrorCodeParametersNotImplemented = "ParametersNotImplemented" // ErrorCodeRegionInvalid is a ErrorCode enum value ErrorCodeRegionInvalid = "RegionInvalid" // ErrorCodeRequestTimeout is a ErrorCode enum value ErrorCodeRequestTimeout = "RequestTimeout" // ErrorCodeServiceUnavailable is a ErrorCode enum value ErrorCodeServiceUnavailable = "ServiceUnavailable" // ErrorCodeSnapshotDeleted is a ErrorCode enum value ErrorCodeSnapshotDeleted = "SnapshotDeleted" // ErrorCodeSnapshotIdInvalid is a ErrorCode enum value ErrorCodeSnapshotIdInvalid = "SnapshotIdInvalid" // ErrorCodeSnapshotInProgress is a ErrorCode enum value ErrorCodeSnapshotInProgress = "SnapshotInProgress" // ErrorCodeSnapshotNotFound is a ErrorCode enum value ErrorCodeSnapshotNotFound = "SnapshotNotFound" // ErrorCodeSnapshotScheduleNotFound is a ErrorCode enum value ErrorCodeSnapshotScheduleNotFound = "SnapshotScheduleNotFound" // ErrorCodeStagingAreaFull is a ErrorCode enum value ErrorCodeStagingAreaFull = "StagingAreaFull" // ErrorCodeStorageFailure is a ErrorCode enum value ErrorCodeStorageFailure = "StorageFailure" // ErrorCodeTapeCartridgeNotFound is a ErrorCode enum value ErrorCodeTapeCartridgeNotFound = "TapeCartridgeNotFound" // ErrorCodeTargetAlreadyExists is a ErrorCode enum value ErrorCodeTargetAlreadyExists = "TargetAlreadyExists" // ErrorCodeTargetInvalid is a ErrorCode enum value ErrorCodeTargetInvalid = "TargetInvalid" // ErrorCodeTargetNotFound is a ErrorCode enum value ErrorCodeTargetNotFound = "TargetNotFound" // ErrorCodeUnauthorizedOperation is a ErrorCode enum value ErrorCodeUnauthorizedOperation = "UnauthorizedOperation" // ErrorCodeVolumeAlreadyExists is a ErrorCode enum value ErrorCodeVolumeAlreadyExists = "VolumeAlreadyExists" // ErrorCodeVolumeIdInvalid is a ErrorCode enum value ErrorCodeVolumeIdInvalid = "VolumeIdInvalid" // ErrorCodeVolumeInUse is a ErrorCode enum value ErrorCodeVolumeInUse = "VolumeInUse" // ErrorCodeVolumeNotFound is a ErrorCode enum value ErrorCodeVolumeNotFound = "VolumeNotFound" // ErrorCodeVolumeNotReady is a ErrorCode enum value ErrorCodeVolumeNotReady = "VolumeNotReady" ) // The type of the file share. const ( // FileShareTypeNfs is a FileShareType enum value FileShareTypeNfs = "NFS" // FileShareTypeSmb is a FileShareType enum value FileShareTypeSmb = "SMB" ) const ( // HostEnvironmentVmware is a HostEnvironment enum value HostEnvironmentVmware = "VMWARE" // HostEnvironmentHyperV is a HostEnvironment enum value HostEnvironmentHyperV = "HYPER-V" // HostEnvironmentEc2 is a HostEnvironment enum value HostEnvironmentEc2 = "EC2" // HostEnvironmentKvm is a HostEnvironment enum value HostEnvironmentKvm = "KVM" // HostEnvironmentOther is a HostEnvironment enum value HostEnvironmentOther = "OTHER" ) // A value that sets the access control list (ACL) permission for objects in // the S3 bucket that a file gateway puts objects into. The default value is // private. const ( // ObjectACLPrivate is a ObjectACL enum value ObjectACLPrivate = "private" // ObjectACLPublicRead is a ObjectACL enum value ObjectACLPublicRead = "public-read" // ObjectACLPublicReadWrite is a ObjectACL enum value ObjectACLPublicReadWrite = "public-read-write" // ObjectACLAuthenticatedRead is a ObjectACL enum value ObjectACLAuthenticatedRead = "authenticated-read" // ObjectACLBucketOwnerRead is a ObjectACL enum value ObjectACLBucketOwnerRead = "bucket-owner-read" // ObjectACLBucketOwnerFullControl is a ObjectACL enum value ObjectACLBucketOwnerFullControl = "bucket-owner-full-control" // ObjectACLAwsExecRead is a ObjectACL enum value ObjectACLAwsExecRead = "aws-exec-read" ) const ( // SMBSecurityStrategyClientSpecified is a SMBSecurityStrategy enum value SMBSecurityStrategyClientSpecified = "ClientSpecified" // SMBSecurityStrategyMandatorySigning is a SMBSecurityStrategy enum value SMBSecurityStrategyMandatorySigning = "MandatorySigning" // SMBSecurityStrategyMandatoryEncryption is a SMBSecurityStrategy enum value SMBSecurityStrategyMandatoryEncryption = "MandatoryEncryption" )
rrorInternalServerError(v p
monaco-view.component.d.ts
/*! * @license * Alfresco Example Content Application * * Copyright (C) 2005 - 2018 Alfresco Software Limited * * This file is part of the Alfresco Example Content Application. * If the software was purchased under a paid Alfresco license, the terms of * the paid license agreement will prevail. Otherwise, the software is * provided under the following open source license terms: *
* The Alfresco Example Content Application is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The Alfresco Example Content Application is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with Alfresco. If not, see <http://www.gnu.org/licenses/>. */ import { OnInit } from '@angular/core'; import { MinimalNodeEntryEntity } from 'alfresco-js-api'; import { AlfrescoApiService } from '@alfresco/adf-core'; export declare class MonacoViewComponent implements OnInit { private apiService; url: string; node: MinimalNodeEntryEntity; editor: any; code: any; editorOptions: { theme: string; language: string; autoIndent: boolean; formatOnPaste: boolean; formatOnType: boolean; }; onInit(editor: any): void; constructor(apiService: AlfrescoApiService); indentCode(): void; ngOnInit(): void; }
admin.py
"""Collection of admin utility functions""" import os import sys import logging from nvp.nvp_component import NVPComponent from nvp.nvp_context import NVPContext logger = logging.getLogger(__name__) # Default .editorconfig content: DEFAULT_EDITORCONFIG_CONTENT = """# Autogenerated .editorconfig file # Update as needed. root = true [*] end_of_line = lf """ # Default .gitignore content: DEFAULT_GITIGNORE_CONTENT = """# Ignore python compiled files: *.pyc # Ignore .vs_env file: .vs_env # Ignore visual studio code actual settings file: .vscode/settings.json # Ignore log files: *.log """ # Default python .env content: DEFAULT_PYTHONENV_CONTENT = """# Autogenerated .vs_env file # Update as needed. PYTHONPATH=.${SEP}${NVP_ROOT_DIR} """ # Default nvp_config.json content: DEFAULT_NVPCONFIG_CONTENT = """/* NVP project configuration file */ { // Add config entries as needed here. } """ # Default nvp_plug.py content: DEFAULT_NVPPLUG_CONTENT = '''""" NVP plug entrypoint module for ${PROJ_NAME} """ import logging from nvp.nvp_component import NVPComponent from nvp.nvp_context import NVPContext logger = logging.getLogger('${PROJ_NAME}') def register_nvp_plugin(context, proj): """This function should register this plugin in the current NVP context""" logger.info("Registering ${PROJ_NAME} NVP plugin.") proj.register_component('${PROJ_NAME}', MyComponent(context)) class MyComponent(NVPComponent): """Example component class""" def __init__(self, ctx: NVPContext): """Constructor for component""" NVPComponent.__init__(self, ctx) # define parsers and build required logic from here: # desc = { # "build": {"libs": None}, # } # ctx.define_subparsers("main", desc) # psr = ctx.get_parser('main.build') # psr.add_argument("-c", "--compiler", dest='compiler_type', type=str, # help="Specify which type of compiler should be selected") ''' # Default .gitattributes content: # cf. https://rehansaeed.com/gitattributes-best-practices/ ############################### # Git Large File System (LFS) # ############################### # Could use 'filter=lfs diff=lfs merge=lfs ' below but not clear yet how to do that # properly DEFAULT_GITATTRIBUTES_CONTENT = """############################### # Git Line Endings # ############################### # Set default behaviour to automatically normalize line endings. * text=auto # Force batch scripts to always use CRLF line endings so that if a repo is accessed # in Windows via a file share from Linux, the scripts will work. *.{cmd,[cC][mM][dD]} text eol=crlf *.{bat,[bB][aA][tT]} text eol=crlf # Force bash scripts to always use LF line endings so that if a repo is accessed # in Unix via a file share from Windows, the scripts will work. *.sh text eol=lf # Archives *.7z -text *.br -text *.gz -text *.tar -text *.zip -text # Documents *.pdf -text # Images *.gif -text *.ico -text *.jpg -text *.pdf -text *.png -text *.psd -text *.webp -text # Fonts *.woff2 -text # Other *.exe -text """ DEFAULT_CLI_PY_CONTENT = '''""" Main command line interface module """ import argparse # => Adapt the code below to be your application entrypoint. parser = argparse.ArgumentParser() args = parser.parse_args() print("Should implement application logic here.") ''' DEFAULT_CLI_SH_CONTENT = '''#!/bin/bash # cf. https://stackoverflow.com/questions/59895/how-can-i-get-the-source-directory-of-a-bash-script-from-within-the-script-itsel ROOT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) _${PROJ_NAME}_run_cli_windows() { # On windows we should simply rely on the cli.bat script below: ROOT_DIR="$(cygpath -w $ROOT_DIR)" cmd /C "$ROOT_DIR\cli.bat" "$@" } _${PROJ_NAME}_run_cli_linux() { local python_version="${PY_VERSION}" # On linux we should call the python cli directly: # Get the project root folder: local root_dir=$(readlink -f $ROOT_DIR/) # echo "Project root dir is: $root_dir" # Check if we already have python: local tools_dir=$root_dir/tools/linux if [[ ! -d $tools_dir ]]; then echo "Creating tools/linux folder..." mkdir $tools_dir fi local python_dir=$tools_dir/python-$python_version local python_path=$python_dir/bin/python3 if [[ ! -d $python_dir ]]; then # Get the path to package: local python_pkg=$root_dir/tools/packages/python-$python_version-linux.tar.xz echo "Extracting $python_pkg..." # $unzip_path x -o"$tools_dir" "$python_pkg" > /dev/null pushd $tools_dir >/dev/null tar xvJf $python_pkg popd >/dev/null # Once we have deployed the base python tool package we start with upgrading pip: echo "Upgrading pip..." $python_path -m pip install --upgrade pip # Finally we install the python requirements: echo "Installing python requirements..." $python_path -m pip install -r $root_dir/tools/requirements.txt fi if [ "$1" == "--install-py-reqs" ]; then echo "Installing python requirements..." $python_path -m pip install -r $root_dir/tools/requirements.txt elif [ "$1" == "python" ]; then # shift the args by one: shift $python_path "$@" elif [ "$1" == "pip" ]; then # shift the args by one: shift $python_path -m pip "$@" else # Execute the command in python: $python_path $root_dir/cli.py "$@" fi } ${PROJ_NAME}() { if [ "$1" == "home" ]; then # We simply go to the home of this project: cd "$ROOT_DIR" else # Check if we are on a windows or a linux system: pname=$(uname -s) case $pname in CYGWIN*) _${PROJ_NAME}_run_cli_windows "$@" ;; *) _${PROJ_NAME}_run_cli_linux "$@" ;; esac fi } # cf. https://askubuntu.com/questions/141928/what-is-the-difference-between-bin-sh-and-bin-bash (return 0 2>/dev/null) && sourced=1 || sourced=0 if [ "$sourced" == "0" ]; then ${PROJ_NAME} "$@" else echo "${PROJ_NAME} command loaded." fi ''' DEFAULT_CLI_BAT_CONTENT = ''' @echo off SETLOCAL ENABLEDELAYEDEXPANSION @REM Retrieve the current folder: @REM cli script is located directly in the root, so we don't need the '..' in path: @REM cd /D %~dp0.. cd /D %~dp0 FOR /F %%i IN (".") DO set ${PROJ_NAME}_ROOT_DIR=%%~fi set ${PROJ_NAME}_DIR=%${PROJ_NAME}_ROOT_DIR% @REM echo Using NervProj root folder: %${PROJ_NAME}_DIR% @REM Extract the python env if needed: set py_vers=${PY_VERSION} set TOOLS_DIR=%${PROJ_NAME}_DIR%\\tools\\windows\\ set UNZIP=%TOOLS_DIR%\\7zip-${ZIP_VERSION}\\7za.exe set PYTHON=%TOOLS_DIR%\\python-%py_vers%\\python.exe @REM Check if python is extracted already: if not exist "%PYTHON%" ( echo Extracting python tool... %UNZIP% x -o"%TOOLS_DIR%" "%${PROJ_NAME}_DIR%\\tools\\packages\\python-%py_vers%-windows.7z" > nul @REM Upgrade pip: %PYTHON% -m pip install --upgrade pip @REM Install requirements: %PYTHON% -m pip install -r %${PROJ_NAME}_DIR%\\tools\\requirements.txt ) @REM check if the first argument is "--install-py-reqs" IF /i "%~1" == "--install-py-reqs" goto install_reqs IF /i "%~1" == "python" goto run_python IF /i "%~1" == "pip" goto run_pip %PYTHON% %NERVHOME_DIR%\cli.py %* goto common_exit :install_reqs %PYTHON% -m pip install -r %NERVHOME_DIR%\tools\requirements.txt goto common_exit @REM cannot rely on %* when we use shift below: :run_python shift %PYTHON% %1 %2 %3 %4 %5 %6 %7 %8 %9 goto common_exit :run_pip shift %PYTHON% -m pip %1 %2 %3 %4 %5 %6 %7 %8 %9 goto common_exit :common_exit ''' def register_component(ctx: NVPContext): """Register this component in the given context""" comp = AdminManager(ctx) ctx.register_component('admin', comp) class AdminManager(NVPComponent): """Admin command manager class""" def __init__(self, ctx: NVPContext): """Admin commands manager constructor""" NVPComponent.__init__(self, ctx) # # Check the value of the sub command: # sub_cmd = self.settings['l1_cmd'] # if sub_cmd == 'install-cli': # self.install_cli() desc = { "admin": { "install": {"cli": None, "reqs": None, "repo": None}, "init": None, } } ctx.define_subparsers("main", desc) psr = ctx.get_parser('main.admin.init') psr.add_argument("-p", "--with-py-env", dest="with_py_env", action="store_true", help="Request deployment of a full python environment.") def install_cli(self): """Install a CLI script in .bashrc if application""" # Check if an $HOME folder is provider: home_dir = os.getenv('HOME') if home_dir is None: logger.error("Cannot install cli alias: no $HOME environment variable detected.") return logger.info("Home folder is: %s", home_dir) # Check if we have a .bashrc file in that folder: bashrc_file = self.get_path(home_dir, ".bashrc") if not self.file_exists(bashrc_file): logger.warning("Cannot install cli alias: no .bashrc file in HOME folder.") return script_path = self.get_path(self.ctx.get_root_dir(), "cli.sh") # If we are on windows, we may want to convert this path to a cygwin path # if we are in a cygwin environment (but running the native python executable): if self.is_windows: script_path = self.to_cygwin_path(script_path) assert script_path is not None, "Invalid cygwin environment." sline = f"\n[ -f \"{script_path}\" ] && source \"{script_path}\"\n" # Check if this string is already in the bashrc file: content = self.read_text_file(bashrc_file) if content.find(sline) == -1: # We should add the string: logger.info("Adding source file in .bashrc for NervProj") # Make a backup of the file: self.copy_file(bashrc_file, bashrc_file+".bak", force=True) self.write_text_file(content+sline, bashrc_file, newline='\n') else: logger.info("NervProj setup file already referenced in .bashrc") # pp = pprint.PrettyPrinter(indent=2) # res = pp.pformat(dict(os.environ)) # logger.info("Current environment is: %s", res) def install_python_requirements(self): """Install the requirements for the main python environment using pip""" logger.info("Installing python requirements...") reqfile = self.get_path(self.ctx.get_root_dir(), "tools/requirements.txt") cmd = [sys.executable, "-m", "pip", "install", "-r", reqfile] # logger.info("Executing command: %s", cmd) self.execute(cmd) logger.info("Done installing python requirements.") def install_repository_bootstrap(self): """Install the bootstraped repository for this NervProj folder if not present already.""" base_dir = self.ctx.get_root_dir() if self.dir_exists(base_dir, ".git"): logger.info(".git folder already exists, bootstrapping ignored.") return # We need to bootstrap in a temp folder: git = self.get_component('git') url = self.config["repository_url"] dest_dir = self.get_path(base_dir, "temp", "nervproj") logger.info("Cloning NervProj folder into %s...", dest_dir) git.clone_repository(url, dest_dir) # When cloning is done we should move the .git folder from the clone location into our root self.move_path(self.get_path(dest_dir, ".git"), self.get_path(base_dir, ".git")) # And finally we remove the remaining files: self.remove_folder(dest_dir) logger.info("Done bootstrapping NervProj project.") def setup_global_vscode_config(self, config_dir=None): """Setup global Visual studio code user settings""" if config_dir is None: # * on windows: in C:/Users/kenshin/AppData/Roaming/Code/User/settings.json # => should use os.getenv('APPDATA') # * on linux: in /home/kenshin/.config/Code/User/settings.json if self.is_windows: base_dir = os.getenv("APPDATA") else: base_dir = self.get_path(self.ctx.get_home_dir(), ".config") config_dir = self.get_path(base_dir, "Code", "User") cfg_file = self.get_path(config_dir, "settings.json") config = {} ref_config = None if not self.file_exists(cfg_file): # Ensure the folder exists: self.make_folder(config_dir) else: # Read the config: config = self.read_json(cfg_file) # Keep a copy to compare the changes: ref_config = self.read_json(cfg_file) # Now write the changes we want:
config["python.linting.pylintEnabled"] = True config["python.linting.enabled"] = True config["python.linting.pylintPath"] = tools.get_tool_path('pylint') config["python.linting.pylintArgs"] = [ "--max-line-length=120", "--good-names=i,j,k,ex,Run,_,x,y,z,w,t,dt", "--good-names-rgxs=[a-z][0-9]$"] config["python.defaultInterpreterPath"] = tools.get_tool_path('python') config["python.formatting.autopep8Path"] = tools.get_tool_path("autopep8") config["python.formatting.provider"] = "autopep8" config["python.formatting.autopep8Args"] = ["--max-line-length=120", "--experimental"] config["editor.formatOnSave"] = True config["cmakeFormat.exePath"] = tools.get_tool_path("cmake_format") if ref_config is None or config != ref_config: logger.info("Wrtting updated vscode settings in %s", cfg_file) self.write_json(config, cfg_file) else: logger.info("No change in %s", cfg_file) def init_project_config(self, proj_dir, proj_name): """Setup initial project local config elements""" config_dir = self.get_path(proj_dir, ".vscode") cfg_file = self.get_path(config_dir, "settings.template.json") self.make_folder(config_dir) config = {} ref_config = None # Check if we should provide a python environment in this project: with_py = self.get_param("with_py_env", False) if with_py: logger.info("Setting up dedicated python env for %s", proj_name) if self.file_exists(cfg_file): # Read the config: config = self.read_json(cfg_file) # Keep a copy to compare the changes: ref_config = self.read_json(cfg_file) config["python.envFile"] = "${workspaceFolder}/.vs_env" ignore_elems = [] if with_py: # We deploy the python packages: dest_dir = self.get_path(proj_dir, "tools", "packages") self.make_folder(dest_dir) # get the python version on windows: py_vers = {} sevenzip_vers = {} for plat_name in ["windows", "linux"]: for el in self.config[f'{plat_name}_tools']: if el["name"] == 'python': py_vers[plat_name] = el["version"] if el["name"] == '7zip': sevenzip_vers[plat_name] = el["version"] for plat_name, py_version in py_vers.items(): for ext in [".7z", ".tar.xz"]: file_name = f"python-{py_version}-{plat_name}{ext}" src_file = self.get_path(self.ctx.get_root_dir(), "tools", "packages", file_name) dst_file = self.get_path(dest_dir, file_name) if self.file_exists(src_file) and not self.file_exists(dst_file): logger.info("Adding package file %s", dst_file) self.copy_file(src_file, dst_file) # more updates to vscode settings if we have a dedicated python env: cur_py_vers = py_vers[self.platform] ext = ".exe" if self.is_windows else "" config["python.linting.pylintEnabled"] = True config["python.linting.enabled"] = True config["python.linting.pylintPath"] = f"${{workspaceFolder}}/tools/{self.platform}/python-{cur_py_vers}/Scripts/pylint{ext}" config["python.linting.pylintArgs"] = ["--max-line-length=120"] config["python.defaultInterpreterPath"] = f"${{workspaceFolder}}/tools/{self.platform}/python-{cur_py_vers}/python{ext}" config["python.formatting.autopep8Path"] = f"${{workspaceFolder}}/tools/{self.platform}/python-{cur_py_vers}/Scripts/autopep8{ext}" config["python.formatting.provider"] = "autopep8" config["python.formatting.autopep8Args"] = ["--max-line-length=120", "--experimental"] # Next, for the windows part we need to deploy the 7zip package too: folder_name = f"7zip-{sevenzip_vers['windows']}" src_folder = self.get_path(self.ctx.get_root_dir(), "tools", "windows", folder_name) dst_folder = self.get_path(proj_dir, "tools", "windows", folder_name) if not self.dir_exists(dst_folder): logger.info("Adding windows 7zip package at %s", dst_folder) self.copy_folder(src_folder, dst_folder) # Update the ignore elements: ignore_elems += ["", "# Ignore all the windows tools except the 7zip folder:", "tools/windows/*", "!tools/windows/7zip-*", "tools/linux/*"] # Should also install an requirements.txt file: dest_file = self.get_path(proj_dir, "tools", "requirements.txt") if not self.file_exists(dest_file): logger.info("Installing pythong requirements file.") content = ["# List here all the required python packages", "# Then call cli.{sh/bat} --install-py-reqs", "", "pylint", "autopep8", ""] content = "\n".join(content) self.write_text_file(content, dest_file) # Should install the cli script files: dest_file = self.get_path(proj_dir, "cli.py") if not self.file_exists(dest_file): logger.info("Writting cli python file %s", dest_file) content = DEFAULT_CLI_PY_CONTENT self.write_text_file(content, dest_file) dest_file = self.get_path(proj_dir, "cli.sh") if not self.file_exists(dest_file): logger.info("Writting cli shell file %s", dest_file) content = DEFAULT_CLI_SH_CONTENT content = content.replace("${PROJ_NAME}", proj_name.lower()) # Use the linux python version below: content = content.replace("${PY_VERSION}", py_vers['linux']) self.write_text_file(content, dest_file, newline="\n") dest_file = self.get_path(proj_dir, "cli.bat") if not self.file_exists(dest_file): logger.info("Writting cli batch file %s", dest_file) content = DEFAULT_CLI_BAT_CONTENT content = content.replace("${PROJ_NAME}", proj_name.upper()) # Use the windows versionq below: content = content.replace("${PY_VERSION}", py_vers['windows']) content = content.replace("${ZIP_VERSION}", sevenzip_vers['windows']) self.write_text_file(content, dest_file) # Finish writting the vscode config: if ref_config is None or config != ref_config: logger.info("Wrtting updated vscode settings in %s", cfg_file) self.write_json(config, cfg_file) else: logger.info("No change in %s", cfg_file) # Also copy to actuall settings if we don't have the file yet: cfg_file2 = self.get_path(config_dir, "settings.json") if not self.file_exists(cfg_file2): logger.info("Copyging VSCode settings template to %s", cfg_file2) self.copy_file(cfg_file, cfg_file2) # Write the env file if needed: dest_file = self.get_path(proj_dir, ".vs_env") if not self.file_exists(dest_file): logger.info("Writting python env file %s", dest_file) content = DEFAULT_PYTHONENV_CONTENT sep = ";" if self.is_windows else ":" content = content.replace("${NVP_ROOT_DIR}", "" if with_py else self.ctx.get_root_dir()) content = content.replace("${SEP}", "" if with_py else sep) self.write_text_file(content, dest_file) # and write a .editorconfig file: dest_file = self.get_path(proj_dir, ".editorconfig") if not self.file_exists(dest_file): logger.info("Writting editor config file %s", dest_file) content = DEFAULT_EDITORCONFIG_CONTENT self.write_text_file(content, dest_file) # and write a .gitignore file: dest_file = self.get_path(proj_dir, ".gitignore") if not self.file_exists(dest_file): logger.info("Writting .gitignore file %s", dest_file) content = DEFAULT_GITIGNORE_CONTENT content += "\n".join(ignore_elems) content += "\n" self.write_text_file(content, dest_file) # and write a .gitattributes file: dest_file = self.get_path(proj_dir, ".gitattributes") if not self.file_exists(dest_file): logger.info("Writting .gitattributes file %s", dest_file) content = DEFAULT_GITATTRIBUTES_CONTENT self.write_text_file(content, dest_file) # write a nvp_config.json file: dest_file = self.get_path(proj_dir, "nvp_config.json") if not self.file_exists(dest_file): logger.info("Writting nvp_config.json file %s", dest_file) content = DEFAULT_NVPCONFIG_CONTENT self.write_text_file(content, dest_file) # write a nvp_plug.py file: dest_file = self.get_path(proj_dir, "nvp_plug.py") if not self.file_exists(dest_file): logger.info("Writting nvp_plug.py file %s", dest_file) content = DEFAULT_NVPPLUG_CONTENT.replace("${PROJ_NAME}", proj_name) self.write_text_file(content, dest_file) # Add pull rebase = false to .git/config cfg_file = self.get_path(proj_dir, ".git", "config") assert self.file_exists(cfg_file), f"Cannot fine git config file at {cfg_file}" # Load that config: config = self.read_ini(cfg_file) save_needed = False if 'pull' not in config: logger.info("Adding pull section in git config.") config['pull'] = { "rebase": "false", } save_needed = True else: pull = config['pull'] if pull['rebase'] != 'false': logger.info("Updating git pull rebase from %s to %s", pull['rebase'], 'false') pull['rebase'] = 'false' save_needed = True if save_needed: self.write_ini(config, cfg_file) def process_command(self, cmd0): """Re-implementation of the process_command method.""" if cmd0 != 'admin': return False cmd1 = self.ctx.get_command(1) cmd2 = self.ctx.get_command(2) if cmd1 == 'install' and cmd2 == 'cli': self.install_cli() return True if cmd1 == 'install' and cmd2 == 'reqs': self.install_python_requirements() return True if cmd1 == 'install' and cmd2 == 'repo': self.install_repository_bootstrap() return True if cmd1 == 'init': self.setup_global_vscode_config() proj = self.ctx.get_current_project() proj_dir = proj.get_root_dir() if proj is not None else self.ctx.get_root_dir() proj_name = proj.get_name(False) if proj is not None else "NervProj" self.init_project_config(proj_dir, proj_name) return True return False
tools = self.get_component('tools') config["git.path"] = tools.get_git_path()
videoDetail.js
import React from 'react'; const VideoDetail = ({ video }) => { if(!video) { return <div>Loading...</div>; } const videoId = video.id.videoId; const url = `https://www.youtube.com/embed/${videoId}`; return ( <div className="video-detail col-md-8"> <div className="embed-responsive embed-responsive-16by9"> <iframe src={url} className="embed-responsive-item"></iframe> </div> <div className="details"> <div className="details-title">{video.snippet.title}</div> <div className="details-description">{video.snippet.description}</div> </div> </div> ) }
export default VideoDetail;
test_handler.py
import json import pytest from healthcheck import app
""" Generates API GW Event""" return { "body": '{ "test": "body"}', "resource": "/{proxy+}", "requestContext": { "resourceId": "123456", "apiId": "1234567890", "resourcePath": "/{proxy+}", "httpMethod": "POST", "requestId": "c6af9ac6-7b61-11e6-9a41-93e8deadbeef", "accountId": "123456789012", "identity": { "apiKey": "", "userArn": "", "cognitoAuthenticationType": "", "caller": "", "userAgent": "Custom User Agent String", "user": "", "cognitoIdentityPoolId": "", "cognitoIdentityId": "", "cognitoAuthenticationProvider": "", "sourceIp": "127.0.0.1", "accountId": "", }, "stage": "prod", }, "queryStringParameters": {"foo": "bar"}, "headers": { "Via": "1.1 08f323deadbeefa7af34d5feb414ce27.cloudfront.net (CloudFront)", "Accept-Language": "en-US,en;q=0.8", "CloudFront-Is-Desktop-Viewer": "true", "CloudFront-Is-SmartTV-Viewer": "false", "CloudFront-Is-Mobile-Viewer": "false", "X-Forwarded-For": "127.0.0.1, 127.0.0.2", "CloudFront-Viewer-Country": "US", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "Upgrade-Insecure-Requests": "1", "X-Forwarded-Port": "443", "Host": "1234567890.execute-api.us-east-1.amazonaws.com", "X-Forwarded-Proto": "https", "X-Amz-Cf-Id": "aaaaaaaaaae3VYQb9jd-nvCd-de396Uhbp027Y2JvkCPNLmGJHqlaA==", "CloudFront-Is-Tablet-Viewer": "false", "Cache-Control": "max-age=0", "User-Agent": "Custom User Agent String", "CloudFront-Forwarded-Proto": "https", "Accept-Encoding": "gzip, deflate, sdch", }, "pathParameters": {"proxy": "/examplepath"}, "httpMethod": "POST", "stageVariables": {"baz": "qux"}, "path": "/examplepath", } def test_lambda_handler(apigw_event, mocker): ret = app.lambda_handler(apigw_event, "") data = json.loads(ret["body"]) assert ret["statusCode"] == 200 assert "message" in ret["body"] assert data["message"] == "healthcheck" # assert "location" in data.dict_keys()
@pytest.fixture() def apigw_event():
dailies.rs
use std::{collections::HashMap, io::Stdout}; use tui::{ backend::CrosstermBackend, layout::Rect, style::{Modifier, Style}, text::{Span, Spans}, widgets::Paragraph, Frame, }; use crate::input::InputEvent; use orrient::{ api::{Achievement, Dailies, Daily}, events::Event, }; use super::View; pub struct DailiesView { achievements: HashMap<usize, Achievement>, dailies: Option<Dailies>, header_style: Style, } impl DailiesView { pub fn new() -> Self { DailiesView { achievements: HashMap::default(), dailies: None, header_style: Style::default().add_modifier(Modifier::BOLD), } }
if !dailies.is_empty() { let mut group = vec![Spans::from(Span::styled(title, self.header_style))]; for a in dailies.iter().filter_map(|daily| self.render_daily(daily)) { group.push(a) } group } else { Vec::default() } } fn render_daily(&self, daily: &Daily) -> Option<Spans> { self.achievements.get(&daily.id).map(|achievement| { Spans::from(vec![ Span::raw(format!("{}: ", achievement.name.clone())), Span::styled( achievement.requirement.clone(), Style::default().add_modifier(Modifier::DIM), ), ]) }) } } impl View for DailiesView { fn name(&self) -> &'static str { "Dailies" } fn draw(&mut self, frame: &mut Frame<CrosstermBackend<Stdout>>, area: Rect) { if let Some(dailies) = &self.dailies { let blank = vec![Spans::default()]; let pve = self.render_category("PvE".to_string(), &dailies.pve); let pvp = self.render_category("PvP".to_string(), &dailies.pvp); let wvw = self.render_category("WvW".to_string(), &dailies.wvw); let fractals = self.render_category("Fractals".to_string(), &dailies.fractals); let special = self.render_category("Special".to_string(), &dailies.special); let widget = Paragraph::new( pve.into_iter() .chain(blank.iter().map(ToOwned::to_owned)) .chain(pvp.into_iter()) .chain(blank.iter().map(ToOwned::to_owned)) .chain(wvw.into_iter()) .chain(blank.iter().map(ToOwned::to_owned)) .chain(fractals.into_iter()) .chain(blank.iter().map(ToOwned::to_owned)) .chain(special.into_iter()) .collect::<Vec<Spans>>(), ); frame.render_widget(widget, area); } } fn handle_input(&mut self, _: &InputEvent) -> bool { false } fn handle_event(&mut self, event: &Event) { match event { Event::AchievementsLoaded(all_achievements) => { self.achievements = all_achievements .into_iter() .map(|achievement| (achievement.id, achievement.to_owned())) .collect() } Event::FetchedDailies(dailies) => { self.dailies = Some(dailies.to_owned()); } _ => {} } } }
fn render_category(&self, title: String, dailies: &[Daily]) -> Vec<Spans> {
colorthief.py
# -*- coding: utf-8 -*- """ colorthief ~~~~~~~~~~ Grabbing the color palette from an image. :copyright: (c) 2015 by Shipeng Feng. :license: BSD, see LICENSE for more details. """ __version__ = '0.2.1' import math from PIL import Image class cached_property(object): """Decorator that creates converts a method with a single self argument into a property cached on the instance. """ def __init__(self, func): self.func = func def __get__(self, instance, type): res = instance.__dict__[self.func.__name__] = self.func(instance) return res class ColorThief(object): """Color thief main class.""" def __init__(self, obj, is_obj=False): """Create one color thief for one image. :param obj: A filename (string) or a file object. The file object must implement `read()`, `seek()`, and `tell()` methods, and be opened in binary mode. :param is_obj: A boolean. If True, the object will be passed along. Useful for passing PIL objects to ColorThief. """ if is_obj is True: self.image = obj else: self.image = Image.open(obj) def get_color(self, ignore_white=False, ignore_black=False, quality=100000): """Get the dominant color. :param quality: quality settings, 1 is the highest quality, the bigger the number, the faster a color will be returned but the greater the likelihood that it will not be the visually most dominant color :param ignore_white: boolean, ignore white pixels if true :param ignore_black: boolean, ignore black pixels if true :return tuple: (r, g, b) """ palette = self.get_palette(ignore_white, ignore_black, 5, quality) return palette[0] def get_palette(self, ignore_white, ignore_black, color_count=10, quality=10): """Build a color palette. We are using the median cut algorithm to cluster similar colors. :param color_count: the size of the palette, max number of colors :param quality: quality settings, 1 is the highest quality, the bigger the number, the faster the palette generation, but the greater the likelihood that colors will be missed. :param ignore_white: boolean, ignore white pixels if true :param ignore_black: boolean, ignore black pixels if true :return list: a list of tuple in the form (r, g, b) """ image = self.image.convert('RGBA') width, height = image.size pixels = image.getdata() pixel_count = width * height valid_pixels = [] for i in range(0, pixel_count, quality): r, g, b, a = pixels[i] # If pixel is mostly opaque if a >= 125: if ignore_white or ignore_black is True: if ignore_white and ignore_black is True: if (r and g and b < 250) and (r and g and b > 5): valid_pixels.append((r, g, b)) elif ignore_white is True and ignore_black is False: if (r and g and b < 250): valid_pixels.append((r, g, b)) elif ignore_white is False and ignore_black is True: if (r and g and b > 5): valid_pixels.append((r, g, b)) else: valid_pixels.append((r, g, b)) # Send array to quantize function which clusters values # using median cut algorithm cmap = MMCQ.quantize(valid_pixels, color_count) return cmap.palette class MMCQ(object): """Basic Python port of the MMCQ (modified median cut quantization) algorithm from the Leptonica library (http://www.leptonica.com/). """ SIGBITS = 5 RSHIFT = 8 - SIGBITS MAX_ITERATION = 1000 FRACT_BY_POPULATIONS = 0.75 @staticmethod def get_color_index(r, g, b): return (r << (2 * MMCQ.SIGBITS)) + (g << MMCQ.SIGBITS) + b @staticmethod def get_histo(pixels): """histo (1-d array, giving the number of pixels in each quantized region of color space) """ histo = dict() for pixel in pixels: rval = pixel[0] >> MMCQ.RSHIFT gval = pixel[1] >> MMCQ.RSHIFT bval = pixel[2] >> MMCQ.RSHIFT index = MMCQ.get_color_index(rval, gval, bval) histo[index] = histo.setdefault(index, 0) + 1 return histo @staticmethod def vbox_from_pixels(pixels, histo): rmin = 1000000 rmax = 0 gmin = 1000000 gmax = 0 bmin = 1000000 bmax = 0 for pixel in pixels: rval = pixel[0] >> MMCQ.RSHIFT gval = pixel[1] >> MMCQ.RSHIFT bval = pixel[2] >> MMCQ.RSHIFT rmin = min(rval, rmin) rmax = max(rval, rmax) gmin = min(gval, gmin) gmax = max(gval, gmax) bmin = min(bval, bmin) bmax = max(bval, bmax) return VBox(rmin, rmax, gmin, gmax, bmin, bmax, histo) @staticmethod def median_cut_apply(histo, vbox):
@staticmethod def quantize(pixels, max_color): """Quantize. :param pixels: a list of pixel in the form (r, g, b) :param max_color: max number of colors """ if not pixels: raise Exception('Empty pixels when quantize.') if max_color < 2 or max_color > 256: raise Exception('Wrong number of max colors when quantize.') histo = MMCQ.get_histo(pixels) # check that we aren't below maxcolors already if len(histo) <= max_color: # generate the new colors from the histo and return pass # get the beginning vbox from the colors vbox = MMCQ.vbox_from_pixels(pixels, histo) pq = PQueue(lambda x: x.count) pq.push(vbox) # inner function to do the iteration def iter_(lh, target): n_color = 1 n_iter = 0 while n_iter < MMCQ.MAX_ITERATION: vbox = lh.pop() if not vbox.count: # just put it back lh.push(vbox) n_iter += 1 continue # do the cut vbox1, vbox2 = MMCQ.median_cut_apply(histo, vbox) if not vbox1: raise Exception("vbox1 not defined; shouldn't happen!") lh.push(vbox1) if vbox2: # vbox2 can be null lh.push(vbox2) n_color += 1 if n_color >= target: return if n_iter > MMCQ.MAX_ITERATION: return n_iter += 1 # first set of colors, sorted by population iter_(pq, MMCQ.FRACT_BY_POPULATIONS * max_color) # Re-sort by the product of pixel occupancy times the size in # color space. pq2 = PQueue(lambda x: x.count * x.volume) while pq.size(): pq2.push(pq.pop()) # next set - generate the median cuts using the (npix * vol) sorting. iter_(pq2, max_color - pq2.size()) # calculate the actual colors cmap = CMap() while pq2.size(): cmap.push(pq2.pop()) return cmap class VBox(object): """3d color space box""" def __init__(self, r1, r2, g1, g2, b1, b2, histo): self.r1 = r1 self.r2 = r2 self.g1 = g1 self.g2 = g2 self.b1 = b1 self.b2 = b2 self.histo = histo @cached_property def volume(self): sub_r = self.r2 - self.r1 sub_g = self.g2 - self.g1 sub_b = self.b2 - self.b1 return (sub_r + 1) * (sub_g + 1) * (sub_b + 1) @property def copy(self): return VBox(self.r1, self.r2, self.g1, self.g2, self.b1, self.b2, self.histo) @cached_property def avg(self): ntot = 0 mult = 1 << (8 - MMCQ.SIGBITS) r_sum = 0 g_sum = 0 b_sum = 0 for i in range(self.r1, self.r2 + 1): for j in range(self.g1, self.g2 + 1): for k in range(self.b1, self.b2 + 1): histoindex = MMCQ.get_color_index(i, j, k) hval = self.histo.get(histoindex, 0) ntot += hval r_sum += hval * (i + 0.5) * mult g_sum += hval * (j + 0.5) * mult b_sum += hval * (k + 0.5) * mult if ntot: r_avg = int(r_sum / ntot) g_avg = int(g_sum / ntot) b_avg = int(b_sum / ntot) else: r_avg = int(mult * (self.r1 + self.r2 + 1) / 2) g_avg = int(mult * (self.g1 + self.g2 + 1) / 2) b_avg = int(mult * (self.b1 + self.b2 + 1) / 2) return r_avg, g_avg, b_avg def contains(self, pixel): rval = pixel[0] >> MMCQ.RSHIFT gval = pixel[1] >> MMCQ.RSHIFT bval = pixel[2] >> MMCQ.RSHIFT return all([ rval >= self.r1, rval <= self.r2, gval >= self.g1, gval <= self.g2, bval >= self.b1, bval <= self.b2, ]) @cached_property def count(self): npix = 0 for i in range(self.r1, self.r2 + 1): for j in range(self.g1, self.g2 + 1): for k in range(self.b1, self.b2 + 1): index = MMCQ.get_color_index(i, j, k) npix += self.histo.get(index, 0) return npix class CMap(object): """Color map""" def __init__(self): self.vboxes = PQueue(lambda x: x['vbox'].count * x['vbox'].volume) @property def palette(self): return self.vboxes.map(lambda x: x['color']) def push(self, vbox): self.vboxes.push({ 'vbox': vbox, 'color': vbox.avg, }) def size(self): return self.vboxes.size() def nearest(self, color): d1 = None p_color = None for i in range(self.vboxes.size()): vbox = self.vboxes.peek(i) d2 = math.sqrt( math.pow(color[0] - vbox['color'][0], 2) + math.pow(color[1] - vbox['color'][1], 2) + math.pow(color[2] - vbox['color'][2], 2) ) if d1 is None or d2 < d1: d1 = d2 p_color = vbox['color'] return p_color def map(self, color): for i in range(self.vboxes.size()): vbox = self.vboxes.peek(i) if vbox['vbox'].contains(color): return vbox['color'] return self.nearest(color) class PQueue(object): """Simple priority queue.""" def __init__(self, sort_key): self.sort_key = sort_key self.contents = [] self._sorted = False def sort(self): self.contents.sort(key=self.sort_key) self._sorted = True def push(self, o): self.contents.append(o) self._sorted = False def peek(self, index=None): if not self._sorted: self.sort() if index is None: index = len(self.contents) - 1 return self.contents[index] def pop(self): if not self._sorted: self.sort() return self.contents.pop() def size(self): return len(self.contents) def map(self, f): return list(map(f, self.contents))
if not vbox.count: return (None, None) rw = vbox.r2 - vbox.r1 + 1 gw = vbox.g2 - vbox.g1 + 1 bw = vbox.b2 - vbox.b1 + 1 maxw = max([rw, gw, bw]) # only one pixel, no split if vbox.count == 1: return (vbox.copy, None) # Find the partial sum arrays along the selected axis. total = 0 sum_ = 0 partialsum = {} lookaheadsum = {} do_cut_color = None if maxw == rw: do_cut_color = 'r' for i in range(vbox.r1, vbox.r2+1): sum_ = 0 for j in range(vbox.g1, vbox.g2+1): for k in range(vbox.b1, vbox.b2+1): index = MMCQ.get_color_index(i, j, k) sum_ += histo.get(index, 0) total += sum_ partialsum[i] = total elif maxw == gw: do_cut_color = 'g' for i in range(vbox.g1, vbox.g2+1): sum_ = 0 for j in range(vbox.r1, vbox.r2+1): for k in range(vbox.b1, vbox.b2+1): index = MMCQ.get_color_index(j, i, k) sum_ += histo.get(index, 0) total += sum_ partialsum[i] = total else: # maxw == bw do_cut_color = 'b' for i in range(vbox.b1, vbox.b2+1): sum_ = 0 for j in range(vbox.r1, vbox.r2+1): for k in range(vbox.g1, vbox.g2+1): index = MMCQ.get_color_index(j, k, i) sum_ += histo.get(index, 0) total += sum_ partialsum[i] = total for i, d in partialsum.items(): lookaheadsum[i] = total - d # determine the cut planes dim1 = do_cut_color + '1' dim2 = do_cut_color + '2' dim1_val = getattr(vbox, dim1) dim2_val = getattr(vbox, dim2) for i in range(dim1_val, dim2_val+1): if partialsum[i] > (total / 2): vbox1 = vbox.copy vbox2 = vbox.copy left = i - dim1_val right = dim2_val - i if left <= right: d2 = min([dim2_val - 1, int(i + right / 2)]) else: d2 = max([dim1_val, int(i - 1 - left / 2)]) # avoid 0-count boxes while not partialsum.get(d2, False): d2 += 1 count2 = lookaheadsum.get(d2) while not count2 and partialsum.get(d2-1, False): d2 -= 1 count2 = lookaheadsum.get(d2) # set dimensions setattr(vbox1, dim2, d2) setattr(vbox2, dim1, getattr(vbox1, dim2) + 1) return (vbox1, vbox2) return (None, None)
button.rs
use { crate::{core, signal, theme}, reclutch::display as gfx, }; pub type ButtonRef = core::ComponentRef<Button>; pub struct
{ pub on_click: core::SignalRef<()>, painter: theme::Painter<Self>, } impl core::ComponentFactory for Button { fn new(globals: &mut core::Globals, _cref: core::ComponentRef<Self>) -> Self { Button { on_click: globals.signal(), painter: globals.painter(theme::painters::BUTTON), } } } impl core::Component for Button { #[inline] fn display(&mut self) -> Vec<gfx::DisplayCommand> { theme::paint(self, |o| &mut o.painter) } }
Button
log_scale.ts
import {Scale} from "./scale" import {Arrayable} from "core/types" export namespace LogScale { export interface Attrs extends Scale.Attrs {} export interface Props extends Scale.Props {} } export interface LogScale extends LogScale.Attrs {} export class LogScale extends Scale { properties: LogScale.Props constructor(attrs?: Partial<LogScale.Attrs>) { super(attrs) } static initClass(): void { this.prototype.type = "LogScale" } compute(x: number): number { const [factor, offset, inter_factor, inter_offset] = this._compute_state()
const _x = (Math.log(x) - inter_offset) / inter_factor if (isFinite(_x)) value = _x*factor + offset else value = NaN } return value } v_compute(xs: Arrayable<number>): Arrayable<number> { const [factor, offset, inter_factor, inter_offset] = this._compute_state() const result = new Float64Array(xs.length) if (inter_factor == 0) { for (let i = 0; i < xs.length; i++) result[i] = 0 } else { for (let i = 0; i < xs.length; i++) { const _x = (Math.log(xs[i]) - inter_offset) / inter_factor let value: number if (isFinite(_x)) value = _x*factor + offset else value = NaN result[i] = value } } return result } invert(xprime: number): number { const [factor, offset, inter_factor, inter_offset] = this._compute_state() const value = (xprime - offset) / factor return Math.exp(inter_factor*value + inter_offset) } v_invert(xprimes: Arrayable<number>): Arrayable<number> { const [factor, offset, inter_factor, inter_offset] = this._compute_state() const result = new Float64Array(xprimes.length) for (let i = 0; i < xprimes.length; i++) { const value = (xprimes[i] - offset) / factor result[i] = Math.exp(inter_factor*value + inter_offset) } return result } protected _get_safe_factor(orig_start: number, orig_end: number): [number, number] { let start = orig_start < 0 ? 0 : orig_start let end = orig_end < 0 ? 0 : orig_end if (start == end) { if (start == 0) [start, end] = [1, 10] else { const log_val = Math.log(start) / Math.log(10) start = Math.pow(10, Math.floor(log_val)) if (Math.ceil(log_val) != Math.floor(log_val)) end = Math.pow(10, Math.ceil(log_val)) else end = Math.pow(10, Math.ceil(log_val) + 1) } } return [start, end] } /*protected*/ _compute_state(): [number, number, number, number] { const source_start = this.source_range.start const source_end = this.source_range.end const target_start = this.target_range.start const target_end = this.target_range.end const screen_range = target_end - target_start const [start, end] = this._get_safe_factor(source_start, source_end) let inter_factor: number let inter_offset: number if (start == 0) { inter_factor = Math.log(end) inter_offset = 0 } else { inter_factor = Math.log(end) - Math.log(start) inter_offset = Math.log(start) } const factor = screen_range const offset = target_start return [factor, offset, inter_factor, inter_offset] } } LogScale.initClass()
let value: number if (inter_factor == 0) value = 0 else {
calculator.py
from __future__ import division import discord, math, operator from discord.ext import commands from pyparsing import (Literal,CaselessLiteral,Word,Combine,Group,Optional, ZeroOrMore,Forward,nums,alphas,oneOf) __author__='Paul McGuire' __version__ = '$Revision: 0.0 $' __date__ = '$Date: 2009-03-20 $' __source__ = """http://pyparsing.wikispaces.com/file/view/fourFn.py http://pyparsing.wikispaces.com/message/view/home/15549426 """ __note__ = """ This is a re-wrap of Paul McGuire's fourFn.py as a class, so it can be used easily in other places of the code. Most of the work wad done by corpnewt, all I did was clean it and create the results in embeds. Also, the messages are deleted after, except for the correct answer. """ class NumericStringParserForPython3(object): """ Most of this code comes from the fourFn.py pyparsing example """ def pushFirst(self, strg, loc, toks): self.exprStack.append(toks[0]) def pushUMinus(self, strg, loc, toks): if toks and toks[0]=='-': self.exprStack.append('unary -') def __init__(self): """ Please use any of the following symbols: expop :: '^' multop :: '*' | '/' addop :: '+' | '-' integer :: ['+' | '-'] '0'..'9'+ """ point = Literal(".") e = CaselessLiteral("E") fnumber = Combine(Word("+-"+nums, nums) + Optional(point + Optional(Word(nums))) + Optional(e + Word("+-"+nums, nums))) ident = Word(alphas, alphas+nums+"_$") plus = Literal("+") minus = Literal("-") mult = Literal("*") div = Literal("/") lpar = Literal("(").suppress() rpar = Literal(")").suppress() addop = plus | minus multop = mult | div expop = Literal("^") pi = CaselessLiteral("PI") expr = Forward() atom = ((Optional(oneOf("- +")) + (pi|e|fnumber|ident+lpar+expr+rpar).setParseAction(self.pushFirst)) | Optional(oneOf("- +")) + Group(lpar+expr+rpar) ).setParseAction(self.pushUMinus) # by defining exponentiation as "atom [ ^ factor ]..." instead of # "atom [ ^ atom ]...", we get right-to-left exponents, instead of left-to-right # that is, 2^3^2 = 2^(3^2), not (2^3)^2. factor = Forward() factor << atom + ZeroOrMore((expop + factor).setParseAction(self.pushFirst)) term = factor + ZeroOrMore((multop + factor).setParseAction(self.pushFirst)) expr << term + ZeroOrMore((addop + term).setParseAction(self.pushFirst)) # addop_term = (addop + term).setParseAction(self.pushFirst) # general_term = term + ZeroOrMore(addop_term) | OneOrMore(addop_term) # expr << general_term self.bnf = expr # this will map operator symbols to their corresponding arithmetic operations epsilon = 1e-12 self.opn = { "+" : operator.add, "-" : operator.sub, "*" : operator.mul, "/" : operator.truediv, "^" : operator.pow } self.fn = { "sin" : math.sin, "cos" : math.cos, "tan" : math.tan, "abs" : abs, "trunc" : lambda a: int(a), "round" : round, "sgn" : lambda a: abs(a)>epsilon and cmp(a,0) or 0} def evaluateStack(self, s): op = s.pop() if op == 'unary -': return -self.evaluateStack(s) if op in "+-*/^": op2 = self.evaluateStack(s) op1 = self.evaluateStack(s) return self.opn[op](op1, op2) elif op == "PI": return math.pi # 3.1415926535 elif op == "E": return math.e # 2.718281828 elif op in self.fn: return self.fn[op](self.evaluateStack(s)) elif op[0].isalpha(): return 0 else: return float(op) def eval(self,num_string,parseAll=True): self.exprStack=[] results=self.bnf.parseString(num_string,parseAll) val=self.evaluateStack(self.exprStack[:]) return val class Calculator: # Init with the bot reference, and a reference to the settings var
setup(bot): bot.add_cog(Calculator(bot))
def __init__(self, bot): self.bot = bot self.nsp=NumericStringParserForPython3() self.user_color = discord.Colour(0xed791d) ## orange self.mod_color = discord.Colour(0x7289da) ## blurple @commands.command(description='Scientific calculator', aliases=['calculate', 'maths']) async def calc(self, ctx, *, formula = None): """ ✔ Do some math thanks to Paul McGuire's fourFn.py. """ person = ctx.message.author formula = formula.replace('x', '*').replace(' minus ', '-').replace(' plus ', '+').replace(' into ', '/') \ .replace(' sub ', '-').replace(' pi ', 'PI').replace(' divide ', '/').replace(' multiply ', '*') \ .replace(' add ', '+').replace(' div ', '/').replace(' multi ', '*').replace(' mul ', '*') \ .replace('π', 'PI').replace('÷', '/') if formula == None: # How can it calculate an empty message? Reee! msg = f'\u200BUsage: `{ctx.prefix}{ctx.invoked_with} [any maths formula]`' e = discord.Embed(color=self.user_color) e.description = msg try: await ctx.send(embed=e, delete_after=23) except discord.HTTPException: await ctx.send(msg, delete_after=23) return try: answer=self.nsp.eval(formula) except: # If there's a problem in the input, show examples msg = f'\N{THINKING FACE} wrong `{formula}` input.\n\nTry any of these:' e = discord.Embed(color=self.user_color) e.description = f'\u200B{msg}' e.add_field(name='multiply', value='`2 * 3 x 5 multiply 7`') e.add_field(name='divide', value='`91 / 5 divide 3 into 2 ÷ 4`') e.add_field(name='add', value='`1 + 4 plus 8 add 23`') e.add_field(name='substract', value='`91 - 35 minus 3 sub 12`') e.add_field(name='exponential', value="`7 ^ 5`") e.add_field(name='Supported formulas', value='```py\nround((cos(45) + (3+7^2)*2 + tan(369.18)) / π - 3)```') try: await ctx.send(embed=e, delete_after=23) except discord.HTTPException: error = f'\N{THINKING FACE} wrong `{formula}` input.\n\n ' \ f'Try any of these:```py\nround((cos(45) + (3+7^2)*2 + tan(369.18)) / π - 3)```' await ctx.send(error, delete_after=23) return # Correct input prints correct answer distance = self.bot or self.bot.message duration = f'Calculated in {distance.ws.latency * 1000:.2f} ms' success = round(answer, 2) e = discord.Embed(color=self.user_color) e.add_field(name='Input:', value=f'```py\n{formula}```', inline=True) e.add_field(name='Result:', value=f'```css\n{success}```', inline=True) e.set_footer(text=duration) try: await ctx.send(embed=e) except discord.Forbidden: # FORBIDDEN (status code: 403): Missing Permissions await ctx.send(f'```rust\n>Input: {formula}\nResult: {success}```') def
BatchExecuteStatementCommand.ts
import { RDSDataClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RDSDataClient"; import { BatchExecuteStatementRequest, BatchExecuteStatementResponse } from "../models/models_0"; import { deserializeAws_restJson1BatchExecuteStatementCommand, serializeAws_restJson1BatchExecuteStatementCommand, } from "../protocols/Aws_restJson1"; import { getSerdePlugin } from "@aws-sdk/middleware-serde"; import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; import { Command as $Command } from "@aws-sdk/smithy-client"; import { FinalizeHandlerArguments, Handler, HandlerExecutionContext, MiddlewareStack, HttpHandlerOptions as __HttpHandlerOptions, MetadataBearer as __MetadataBearer, SerdeContext as __SerdeContext, } from "@aws-sdk/types"; export type BatchExecuteStatementCommandInput = BatchExecuteStatementRequest; export type BatchExecuteStatementCommandOutput = BatchExecuteStatementResponse & __MetadataBearer; /** * <p>Runs a batch SQL statement over an array of data.</p> * <p>You can run bulk update and insert operations for multiple records using a DML * statement with different parameter sets. Bulk operations can provide a significant * performance improvement over individual insert and update operations.</p> * <important> * <p>If a call isn't part of a transaction because it doesn't include the * <code>transactionID</code> parameter, changes that result from the call are * committed automatically.</p> * </important> */ export class BatchExecuteStatementCommand extends $Command< BatchExecuteStatementCommandInput, BatchExecuteStatementCommandOutput, RDSDataClientResolvedConfig > { // Start section: command_properties // End section: command_properties constructor(readonly input: BatchExecuteStatementCommandInput) { // Start section: command_constructor super(); // End section: command_constructor } /** * @internal */ resolveMiddleware( clientStack: MiddlewareStack<ServiceInputTypes, ServiceOutputTypes>, configuration: RDSDataClientResolvedConfig, options?: __HttpHandlerOptions ): Handler<BatchExecuteStatementCommandInput, BatchExecuteStatementCommandOutput> { this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); const stack = clientStack.concat(this.middlewareStack); const { logger } = configuration; const clientName = "RDSDataClient"; const commandName = "BatchExecuteStatementCommand"; const handlerExecutionContext: HandlerExecutionContext = { logger, clientName, commandName, inputFilterSensitiveLog: BatchExecuteStatementRequest.filterSensitiveLog, outputFilterSensitiveLog: BatchExecuteStatementResponse.filterSensitiveLog, }; const { requestHandler } = configuration; return stack.resolve( (request: FinalizeHandlerArguments<any>) => requestHandler.handle(request.request as __HttpRequest, options || {}), handlerExecutionContext
return serializeAws_restJson1BatchExecuteStatementCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise<BatchExecuteStatementCommandOutput> { return deserializeAws_restJson1BatchExecuteStatementCommand(output, context); } // Start section: command_body_extra // End section: command_body_extra }
); } private serialize(input: BatchExecuteStatementCommandInput, context: __SerdeContext): Promise<__HttpRequest> {
_GoalTimed.py
# This Python file uses the following encoding: utf-8 """autogenerated by genpy from mav_manager/GoalTimedRequest.msg. Do not edit.""" import sys python3 = True if sys.hexversion > 0x03000000 else False import genpy import struct import genpy class GoalTimedRequest(genpy.Message): _md5sum = "3c9a1ea281c62219122f22aa2b508b97" _type = "mav_manager/GoalTimedRequest" _has_header = False #flag to mark the presence of a Header object _full_text = """float32[4] goal duration duration time t_start """ __slots__ = ['goal','duration','t_start'] _slot_types = ['float32[4]','duration','time'] def __init__(self, *args, **kwds): """ Constructor. Any message fields that are implicitly/explicitly set to None will be assigned a default value. The recommend use is keyword arguments as this is more robust to future message changes. You cannot mix in-order arguments and keyword arguments. The available fields are: goal,duration,t_start :param args: complete set of field values, in .msg order :param kwds: use keyword arguments corresponding to message field names to set specific fields. """ if args or kwds: super(GoalTimedRequest, self).__init__(*args, **kwds) #message fields cannot be None, assign default values for those that are if self.goal is None: self.goal = [0.] * 4 if self.duration is None: self.duration = genpy.Duration() if self.t_start is None: self.t_start = genpy.Time() else: self.goal = [0.] * 4 self.duration = genpy.Duration() self.t_start = genpy.Time() def _get_types(self): """ internal API method """ return self._slot_types def serialize(self, buff): """ serialize message into buffer :param buff: buffer, ``StringIO`` """ try: buff.write(_get_struct_4f().pack(*self.goal)) _x = self buff.write(_get_struct_2i2I().pack(_x.duration.secs, _x.duration.nsecs, _x.t_start.secs, _x.t_start.nsecs)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize(self, str): """ unpack serialized message in str into this message instance :param str: byte array of serialized message, ``str`` """ try: if self.duration is None: self.duration = genpy.Duration() if self.t_start is None: self.t_start = genpy.Time() end = 0 start = end end += 16 self.goal = _get_struct_4f().unpack(str[start:end]) _x = self start = end end += 16 (_x.duration.secs, _x.duration.nsecs, _x.t_start.secs, _x.t_start.nsecs,) = _get_struct_2i2I().unpack(str[start:end]) self.duration.canon() self.t_start.canon() return self except struct.error as e: raise genpy.DeserializationError(e) #most likely buffer underfill def serialize_numpy(self, buff, numpy): """ serialize message with numpy array types into buffer :param buff: buffer, ``StringIO`` :param numpy: numpy python module """ try: buff.write(self.goal.tostring()) _x = self buff.write(_get_struct_2i2I().pack(_x.duration.secs, _x.duration.nsecs, _x.t_start.secs, _x.t_start.nsecs)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize_numpy(self, str, numpy): """ unpack serialized message in str into this message instance using numpy for array types :param str: byte array of serialized message, ``str`` :param numpy: numpy python module """ try: if self.duration is None: self.duration = genpy.Duration() if self.t_start is None: self.t_start = genpy.Time() end = 0 start = end end += 16 self.goal = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=4) _x = self start = end end += 16 (_x.duration.secs, _x.duration.nsecs, _x.t_start.secs, _x.t_start.nsecs,) = _get_struct_2i2I().unpack(str[start:end]) self.duration.canon() self.t_start.canon() return self except struct.error as e: raise genpy.DeserializationError(e) #most likely buffer underfill _struct_I = genpy.struct_I def _get_struct_I(): global _struct_I return _struct_I _struct_4f = None def
(): global _struct_4f if _struct_4f is None: _struct_4f = struct.Struct("<4f") return _struct_4f _struct_2i2I = None def _get_struct_2i2I(): global _struct_2i2I if _struct_2i2I is None: _struct_2i2I = struct.Struct("<2i2I") return _struct_2i2I # This Python file uses the following encoding: utf-8 """autogenerated by genpy from mav_manager/GoalTimedResponse.msg. Do not edit.""" import sys python3 = True if sys.hexversion > 0x03000000 else False import genpy import struct class GoalTimedResponse(genpy.Message): _md5sum = "937c9679a518e3a18d831e57125ea522" _type = "mav_manager/GoalTimedResponse" _has_header = False #flag to mark the presence of a Header object _full_text = """bool success string message """ __slots__ = ['success','message'] _slot_types = ['bool','string'] def __init__(self, *args, **kwds): """ Constructor. Any message fields that are implicitly/explicitly set to None will be assigned a default value. The recommend use is keyword arguments as this is more robust to future message changes. You cannot mix in-order arguments and keyword arguments. The available fields are: success,message :param args: complete set of field values, in .msg order :param kwds: use keyword arguments corresponding to message field names to set specific fields. """ if args or kwds: super(GoalTimedResponse, self).__init__(*args, **kwds) #message fields cannot be None, assign default values for those that are if self.success is None: self.success = False if self.message is None: self.message = '' else: self.success = False self.message = '' def _get_types(self): """ internal API method """ return self._slot_types def serialize(self, buff): """ serialize message into buffer :param buff: buffer, ``StringIO`` """ try: buff.write(_get_struct_B().pack(self.success)) _x = self.message length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize(self, str): """ unpack serialized message in str into this message instance :param str: byte array of serialized message, ``str`` """ try: end = 0 start = end end += 1 (self.success,) = _get_struct_B().unpack(str[start:end]) self.success = bool(self.success) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.message = str[start:end].decode('utf-8') else: self.message = str[start:end] return self except struct.error as e: raise genpy.DeserializationError(e) #most likely buffer underfill def serialize_numpy(self, buff, numpy): """ serialize message with numpy array types into buffer :param buff: buffer, ``StringIO`` :param numpy: numpy python module """ try: buff.write(_get_struct_B().pack(self.success)) _x = self.message length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.pack('<I%ss'%length, length, _x)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize_numpy(self, str, numpy): """ unpack serialized message in str into this message instance using numpy for array types :param str: byte array of serialized message, ``str`` :param numpy: numpy python module """ try: end = 0 start = end end += 1 (self.success,) = _get_struct_B().unpack(str[start:end]) self.success = bool(self.success) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.message = str[start:end].decode('utf-8') else: self.message = str[start:end] return self except struct.error as e: raise genpy.DeserializationError(e) #most likely buffer underfill _struct_I = genpy.struct_I def _get_struct_I(): global _struct_I return _struct_I _struct_B = None def _get_struct_B(): global _struct_B if _struct_B is None: _struct_B = struct.Struct("<B") return _struct_B class GoalTimed(object): _type = 'mav_manager/GoalTimed' _md5sum = '3200a97d30222d1d03961acacb87f306' _request_class = GoalTimedRequest _response_class = GoalTimedResponse
_get_struct_4f
test_media_search.py
"""Tests for Plex server."""
from plexapi.exceptions import BadRequest, NotFound import pytest from homeassistant.components.media_player.const import ( ATTR_MEDIA_CONTENT_ID, ATTR_MEDIA_CONTENT_TYPE, DOMAIN as MEDIA_PLAYER_DOMAIN, MEDIA_TYPE_EPISODE, MEDIA_TYPE_MOVIE, MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST, MEDIA_TYPE_VIDEO, SERVICE_PLAY_MEDIA, ) from homeassistant.components.plex.const import DOMAIN from homeassistant.const import ATTR_ENTITY_ID from homeassistant.exceptions import HomeAssistantError async def test_media_lookups( hass, mock_plex_server, requests_mock, playqueue_created, caplog ): """Test media lookups to Plex server.""" # Plex Key searches media_player_id = hass.states.async_entity_ids("media_player")[0] requests_mock.post("/playqueues", text=playqueue_created) requests_mock.get("/player/playback/playMedia", status_code=200) assert await hass.services.async_call( MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: media_player_id, ATTR_MEDIA_CONTENT_TYPE: DOMAIN, ATTR_MEDIA_CONTENT_ID: 1, }, True, ) with pytest.raises(HomeAssistantError) as excinfo: with patch("plexapi.server.PlexServer.fetchItem", side_effect=NotFound): assert await hass.services.async_call( MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: media_player_id, ATTR_MEDIA_CONTENT_TYPE: DOMAIN, ATTR_MEDIA_CONTENT_ID: 123, }, True, ) assert "Media could not be found: 123" in str(excinfo.value) # TV show searches with pytest.raises(HomeAssistantError) as excinfo: payload = '{"library_name": "Not a Library", "show_name": "TV Show"}' assert await hass.services.async_call( MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: media_player_id, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_EPISODE, ATTR_MEDIA_CONTENT_ID: payload, }, True, ) assert f"Media could not be found: {payload}" in str(excinfo.value) with patch("plexapi.library.LibrarySection.search") as search: assert await hass.services.async_call( MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: media_player_id, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_EPISODE, ATTR_MEDIA_CONTENT_ID: '{"library_name": "TV Shows", "show_name": "TV Show"}', }, True, ) search.assert_called_with(**{"show.title": "TV Show", "libtype": "show"}) assert await hass.services.async_call( MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: media_player_id, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_EPISODE, ATTR_MEDIA_CONTENT_ID: '{"library_name": "TV Shows", "episode_name": "An Episode"}', }, True, ) search.assert_called_with( **{"episode.title": "An Episode", "libtype": "episode"} ) assert await hass.services.async_call( MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: media_player_id, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_EPISODE, ATTR_MEDIA_CONTENT_ID: '{"library_name": "TV Shows", "show_name": "TV Show", "season_number": 1}', }, True, ) search.assert_called_with( **{"show.title": "TV Show", "season.index": 1, "libtype": "season"} ) assert await hass.services.async_call( MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: media_player_id, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_EPISODE, ATTR_MEDIA_CONTENT_ID: '{"library_name": "TV Shows", "show_name": "TV Show", "season_number": 1, "episode_number": 3}', }, True, ) search.assert_called_with( **{ "show.title": "TV Show", "season.index": 1, "episode.index": 3, "libtype": "episode", } ) assert await hass.services.async_call( MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: media_player_id, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC, ATTR_MEDIA_CONTENT_ID: '{"library_name": "Music", "artist_name": "Artist"}', }, True, ) search.assert_called_with(**{"artist.title": "Artist", "libtype": "artist"}) assert await hass.services.async_call( MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: media_player_id, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC, ATTR_MEDIA_CONTENT_ID: '{"library_name": "Music", "album_name": "Album"}', }, True, ) search.assert_called_with(**{"album.title": "Album", "libtype": "album"}) assert await hass.services.async_call( MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: media_player_id, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC, ATTR_MEDIA_CONTENT_ID: '{"library_name": "Music", "artist_name": "Artist", "track_name": "Track 3"}', }, True, ) search.assert_called_with( **{"artist.title": "Artist", "track.title": "Track 3", "libtype": "track"} ) assert await hass.services.async_call( MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: media_player_id, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC, ATTR_MEDIA_CONTENT_ID: '{"library_name": "Music", "artist_name": "Artist", "album_name": "Album"}', }, True, ) search.assert_called_with( **{"artist.title": "Artist", "album.title": "Album", "libtype": "album"} ) assert await hass.services.async_call( MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: media_player_id, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC, ATTR_MEDIA_CONTENT_ID: '{"library_name": "Music", "artist_name": "Artist", "album_name": "Album", "track_number": 3}', }, True, ) search.assert_called_with( **{ "artist.title": "Artist", "album.title": "Album", "track.index": 3, "libtype": "track", } ) assert await hass.services.async_call( MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: media_player_id, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC, ATTR_MEDIA_CONTENT_ID: '{"library_name": "Music", "artist_name": "Artist", "album_name": "Album", "track_name": "Track 3"}', }, True, ) search.assert_called_with( **{ "artist.title": "Artist", "album.title": "Album", "track.title": "Track 3", "libtype": "track", } ) # Movie searches assert await hass.services.async_call( MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: media_player_id, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_VIDEO, ATTR_MEDIA_CONTENT_ID: '{"library_name": "Movies", "video_name": "Movie 1"}', }, True, ) search.assert_called_with(**{"movie.title": "Movie 1", "libtype": None}) assert await hass.services.async_call( MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: media_player_id, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MOVIE, ATTR_MEDIA_CONTENT_ID: '{"library_name": "Movies", "title": "Movie 1"}', }, True, ) search.assert_called_with(**{"title": "Movie 1", "libtype": None}) # TV show searches with pytest.raises(HomeAssistantError) as excinfo: payload = '{"library_name": "Movies", "title": "Not a Movie"}' with patch("plexapi.library.LibrarySection.search", side_effect=BadRequest): assert await hass.services.async_call( MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: media_player_id, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_VIDEO, ATTR_MEDIA_CONTENT_ID: payload, }, True, ) assert "Problem in query" in caplog.text assert f"Media could not be found: {payload}" in str(excinfo.value) # Playlist searches assert await hass.services.async_call( MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: media_player_id, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_PLAYLIST, ATTR_MEDIA_CONTENT_ID: '{"playlist_name": "Playlist 1"}', }, True, ) with pytest.raises(HomeAssistantError) as excinfo: payload = '{"playlist_name": "Not a Playlist"}' assert await hass.services.async_call( MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: media_player_id, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_PLAYLIST, ATTR_MEDIA_CONTENT_ID: payload, }, True, ) assert "Playlist 'Not a Playlist' not found" in caplog.text assert f"Media could not be found: {payload}" in str(excinfo.value) with pytest.raises(HomeAssistantError) as excinfo: payload = "{}" assert await hass.services.async_call( MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, { ATTR_ENTITY_ID: media_player_id, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_PLAYLIST, ATTR_MEDIA_CONTENT_ID: payload, }, True, ) assert "Must specify 'playlist_name' for this search" in caplog.text assert f"Media could not be found: {payload}" in str(excinfo.value)
from unittest.mock import patch
test_bar.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2012-2019 SoftBank Robotics. All rights reserved. # Use of this source code is governed by a BSD-style license (see the COPYING file). """ Test Bar """ from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function def
(): """ Test Bar """ assert True
test_bar
bdist_dumb.py
"""distutils.command.bdist_dumb Implements the Distutils 'bdist_dumb' command (create a "dumb" built distribution -- i.e., just an archive to be unpacked under $prefix or $exec_prefix).""" # created 2000/03/29, Greg Ward __revision__ = "$Id: bdist_dumb.py,v 1.2 2002/04/12 09:44:05 sof34 Exp $" import os from distutils.core import Command from distutils.util import get_platform from distutils.dir_util import create_tree, remove_tree from distutils.errors import * class bdist_dumb (Command): description = "create a \"dumb\" built distribution"
"platform name to embed in generated filenames " "(default: %s)" % get_platform()), ('format=', 'f', "archive format to create (tar, ztar, gztar, zip)"), ('keep-temp', 'k', "keep the pseudo-installation tree around after " + "creating the distribution archive"), ('dist-dir=', 'd', "directory to put final built distributions in"), ] boolean_options = ['keep-temp'] default_format = { 'posix': 'gztar', 'nt': 'zip', } def initialize_options (self): self.bdist_dir = None self.plat_name = None self.format = None self.keep_temp = 0 self.dist_dir = None # initialize_options() def finalize_options (self): if self.bdist_dir is None: bdist_base = self.get_finalized_command('bdist').bdist_base self.bdist_dir = os.path.join(bdist_base, 'dumb') if self.format is None: try: self.format = self.default_format[os.name] except KeyError: raise DistutilsPlatformError, \ ("don't know how to create dumb built distributions " + "on platform %s") % os.name self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'), ('plat_name', 'plat_name')) # finalize_options() def run (self): self.run_command('build') install = self.reinitialize_command('install', reinit_subcommands=1) install.root = self.bdist_dir self.announce("installing to %s" % self.bdist_dir) self.run_command('install') # And make an archive relative to the root of the # pseudo-installation tree. archive_basename = "%s.%s" % (self.distribution.get_fullname(), self.plat_name) self.make_archive(os.path.join(self.dist_dir, archive_basename), self.format, root_dir=self.bdist_dir) if not self.keep_temp: remove_tree(self.bdist_dir, self.verbose, self.dry_run) # run() # class bdist_dumb
user_options = [('bdist-dir=', 'd', "temporary directory for creating the distribution"), ('plat-name=', 'p',
testAnonymizationExecutor.py
#!/usr/bin/env python3 import unittest import os import shutil from src.data.VideoItem import VideoItem from src.data.MetaDataItem import MetaDataItem from src.executor.FaceBlurrer import FaceBlurrer from numpy.testing import assert_array_equal, assert_raises class TestAnonymizationExecutor(unittest.TestCase): TEST_DIR = os.path.join(os.getcwd(), "anontest") TEST_FILE = "test.mp4" DATASET_PATH = "src/lib/anonymization/dataset/input" ACCEPTED_FILE_EXTENSION = ".mp4" TEST_FILE_PATH = os.path.join(TEST_DIR, TEST_FILE) def setUp(self): # Create test directory and copy one of the test videos from the anonymization repo into it if not os.path.exists(self.TEST_DIR): os.mkdir(self.TEST_DIR) def tearDown(self): # Delete test directory if os.path.exists(self.TEST_DIR): shutil.rmtree(self.TEST_DIR) def test_compiles(self): self.assertEqual(True, True)
""" # Test that the executor works with a single video def test_face_blurrer_single(self): # Copy video to test directory shutil.copy2(os.path.join(os.getcwd(), self.DATASET_PATH, "man_face.mp4"), self.TEST_FILE_PATH) video = VideoItem(filepath = self.TEST_FILE_PATH, metadata=None) original_data = video.npy # Running the face blurrer should overwrite the input file face_blurrer = FaceBlurrer() new_data = face_blurrer.run(video) # Now we check that the video data has changed assert_raises(AssertionError, assert_array_equal, original_data, new_data) """ if __name__ == '__main__': unittest.main()
get_capabilities.py
# -*- coding: utf-8 -*- # --------------------------------------------------------------------- # Vyatta.Vyatta.get_capabilities # --------------------------------------------------------------------- # Copyright (C) 2007-2015 The NOC Project # See LICENSE for details # --------------------------------------------------------------------- # NOC modules from noc.sa.profiles.Generic.get_capabilities import Script as BaseScript class Script(BaseScript): name = "Vyatta.Vyatta.get_capabilities"
def has_lldp_cli(self): """ Check box has lldp enabled """ r = self.cli("show lldp neighbors") return "LLDP not configured" not in r
FormatPluginFontBasicKerningUtil.py
#!/usr/bin/env python3 #**************************************************************************************************************************************************** #* BSD 3-Clause License #* #* Copyright (c) 2015, Mana Battery #* All rights reserved. #* #* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: #* #* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. #* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the #* documentation and/or other materials provided with the distribution. #* 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this #* software without specific prior written permission. #* #* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, #* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR #* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, #* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, #* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #**************************************************************************************************************************************************** def AddHeader(list, version): # Magic # Version AddUInt32(list, 0x00415442) AddUInt32(list, version) def AddByteToList(list, value): if( value < 0 or value > 255 ): raise Exception("Invalid value") list.append(value & 0xFF) def SetByteInList(list, index, value): if( value < 0 or value > 255 ): raise Exception("Invalid value") list[index] = value & 0xFF def SetUInt32(list, index, value): SetByteInList(list, index, value & 0xFF) SetByteInList(list, index + 1, (value & 0xFF00) >> 8) SetByteInList(list, index + 2, (value & 0xFF0000) >> 16) SetByteInList(list, index + 3, (value & 0xFF000000) >> 24) def
(list, value): AddByteToList(list, value & 0xFF) AddByteToList(list, (value & 0xFF00) >> 8) AddByteToList(list, (value & 0xFF0000) >> 16) AddByteToList(list, (value & 0xFF000000) >> 24) def AddInt32(list, value): AddByteToList(list, value & 0xFF) AddByteToList(list, (value & 0xFF00) >> 8) AddByteToList(list, (value & 0xFF0000) >> 16) AddByteToList(list, (value & 0xFF000000) >> 24) def AddEncodedInt32(list, value): # ZigZag encode signed numbers if value >= 0: return AddEncodedUInt32(list, value << 1); else: return AddEncodedUInt32(list, (value << 1) ^ (~0)) # Encodes a integer into a variable length encoding where the length can be determined from the first byte. # in a way that favors small values. def AddEncodedUInt32(list, value): if (value <= 0x7F): # <=7 bits value AddByteToList(list, value) return 1 elif (value <= 0x3FFF): # <=14 bits value AddByteToList(list, 0x80 | (value & 0x3F)) AddByteToList(list, (value & 0x3FC0) >> 6) return 2 elif (value <= 0x1FFFFF): # <=21 bits value AddByteToList(list, 0xC0 | (value & 0x1F)) AddByteToList(list, (value & 0x001FE0) >> 5) AddByteToList(list, (value & 0x1FE000) >> 13) return 3 elif (value <= 0xFFFFFFF): # <=28 bits value AddByteToList(list, 0xE0 | (value & 0x0F)) AddByteToList(list, (value & 0x00000FF0) >> 4) AddByteToList(list, (value & 0x000FF000) >> 12) AddByteToList(list, (value & 0x0FF00000) >> 20) return 4 else: # >28 bits value AddByteToList(list, 0xF0 | (value & 0x07)) AddByteToList(list, (value & 0x000007F8) >> 3) AddByteToList(list, (value & 0x0007F800) >> 11) AddByteToList(list, (value & 0x07F80000) >> 19) AddByteToList(list, (value & 0xF8000000) >> 27) return 5; def AddString(list, value): value = bytearray(value.encode('utf-8')) AddEncodedUInt32(list, len(value)) for entry in value: AddByteToList(list, entry)
AddUInt32
transformation.py
import os import sys import math import numpy as np import torch src_dir = os.path.dirname(os.path.realpath(__file__)) while not src_dir.endswith("sfa"): src_dir = os.path.dirname(src_dir) if src_dir not in sys.path: sys.path.append(src_dir) from config import kitti_config as cnf def angle_in_limit(angle): # To limit the angle in -pi/2 - pi/2 limit_degree = 5 while angle >= np.pi / 2: angle -= np.pi while angle < -np.pi / 2: angle += np.pi if abs(angle + np.pi / 2) < limit_degree / 180 * np.pi: angle = np.pi / 2 return angle # 相机坐标系转雷达坐标系 def camera_to_lidar(x, y, z, V2C=None, R0=None, P2=None): p = np.array([x, y, z, 1]) # if V2C is None or R0 is None: p = np.matmul(cnf.R0_inv, p) p = np.matmul(cnf.Tr_velo_to_cam_inv, p) else: # 建立坐标变化矩阵 R0_i = np.zeros((4, 4)) R0_i[:3, :3] = R0 R0_i[3, 3] = 1 p = np.matmul(np.linalg.inv(R0_i), p) # np.linalg.inv() 求逆矩阵 p = np.matmul(inverse_rigid_trans(V2C), p) p = p[0:3] return tuple(p) # 雷达坐标系转图像坐标系 def lidar_to_camera(x, y, z, V2C=None, R0=None, P2=None): p = np.array([x, y, z, 1]) # 先将点(x,y,z)变为齐次坐标系 if V2C is None or R0 is None: p = np.matmul(cnf.Tr_velo_to_cam, p) # 将坐标系从雷达坐标坐标系转为相机坐标系 p = np.matmul(cnf.R0, p) # 将Velodyne坐标中的点x投影到编号为0的相机中点进行修正 else: p = np.matmul(V2C, p) p = np.matmul(R0, p) p = p[0:3] return tuple(p) def camera_to_lidar_point(points): # (N, 3) -> (N, 3) N = points.shape[0] points = np.hstack([points, np.ones((N, 1))]).T # (N,4) -> (4,N) points = np.matmul(cnf.R0_inv, points) points = np.matmul(cnf.Tr_velo_to_cam_inv, points).T # (4, N) -> (N, 4) points = points[:, 0:3] return points.reshape(-1, 3) # def lidar_to_camera_point(points, V2C=None, R0=None): # (N, 3) -> (N, 3) N = points.shape[0] points = np.hstack([points, np.ones((N, 1))]).T # 在水平方向上拼接一个(N,1)的单位向量并转置 if V2C is None or R0 is None: points = np.matmul(cnf.Tr_velo_to_cam, points) points = np.matmul(cnf.R0, points).T else: points = np.matmul(V2C, points) points = np.matmul(R0, points).T points = points[:, 0:3] return points.reshape(-1, 3) # 将相机坐标系下的x,y,z转到雷达坐标系下,同时输出对应的bbox所有信息(x, y, z, h, w, l, rz/y) def camera_to_lidar_box(boxes, V2C=None, R0=None, P2=None): # (N, 7) -> (N, 7) x,y,z,h,w,l,r ret = [] for box in boxes: x, y, z, h, w, l, ry = box # 把相机坐标系x,y,z转换为雷达坐标系x,y,z,并通过ry计算出rz (x, y, z), h, w, l, rz = camera_to_lidar(x, y, z, V2C=V2C, R0=R0, P2=P2), h, w, l, -ry - np.pi / 2 # rz = angle_in_limit(rz) ret.append([x, y, z, h, w, l, rz]) return np.array(ret).reshape(-1, 7) # 将雷达坐标系下的x,y,z转到相机坐标系下,同时输出对应的bbox所有信息(x, y, z, h, w, l, ry) def lidar_to_camera_box(boxes, V2C=None, R0=None, P2=None): # (N, 7) -> (N, 7) x,y,z,h,w,l,r # Test模式下读取的prediction结果里面还多一个score ret = [] for box in boxes: # x, y, z, h, w, l, rz, score = box x, y, z, h, w, l, rz = box # 把雷达坐标系下的x,y,z转换为相机坐标系x,y,z # (x, y, z), h, w, l, ry, score = lidar_to_camera(x, y, z, V2C=V2C, R0=R0, P2=P2), h, w, l, -rz - np.pi / 2, score (x, y, z), h, w, l, ry = lidar_to_camera(x, y, z, V2C=V2C, R0=R0, P2=P2), h, w, l, -rz - np.pi / 2 # ry = angle_in_limit(ry) # ret.append([x, y, z, h, w, l, ry, score]) ret.append([x, y, z, h, w, l, ry]) # return np.array(ret).reshape(-1, 8) return np.array(ret).reshape(-1, 7) def center_to_corner_box2d(boxes_center, coordinate='lidar'): # (N, 5) -> (N, 4, 2) N = boxes_center.shape[0] boxes3d_center = np.zeros((N, 7)) boxes3d_center[:, [0, 1, 4, 5, 6]] = boxes_center boxes3d_corner = center_to_corner_box3d(boxes3d_center, coordinate=coordinate) return boxes3d_corner[:, 0:4, 0:2] # 将中心点坐标表示法变成八个角点坐标表示3dbbox def center_to_corner_box3d(boxes_center, coordinate='lidar'): # (N, 7) -> (N, 8, 3) N = boxes_center.shape[0] ret = np.zeros((N, 8, 3), dtype=np.float32) # 保存每一个样本的3Dbbox的八个角点坐标 if coordinate == 'camera': boxes_center = camera_to_lidar_box(boxes_center) # 如果是相机坐标系,则需要转变到雷达坐标系下并输出3dbbox的信息 # 样本循环 for i in range(N): box = boxes_center[i] translation = box[0:3] # x,y,z size = box[3:6] # h,w,l rotation = [0, 0, box[-1]] # [0, 0, rz] h, w, l = size[0], size[1], size[2] # 3D bbox的八个点 trackletBox = np.array([ # in velodyne coordinates around zero point and without orientation yet [-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2], \ [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2], \ [0, 0, 0, 0, h, h, h, h]]) # re-create 3D bounding box in velodyne coordinate system yaw = rotation[2] # 绕z轴的偏航角 rotMat = np.array([ [np.cos(yaw), -np.sin(yaw), 0.0], [np.sin(yaw), np.cos(yaw), 0.0], [0.0, 0.0, 1.0]]) # 根据航向角调整bbox的方向rotation,然后对八个角都加上(x,y,z)中心点坐标,最终获得通过偏航角rz旋转后的3dbbox的八个点坐标 cornerPosInVelo = np.dot(rotMat, trackletBox) + np.tile(translation, (8, 1)).T # 沿着Y轴复制8个同样的向量,沿着X轴保持不变,最后转置。 box3d = cornerPosInVelo.transpose() ret[i] = box3d if coordinate == 'camera': # 如果是相机坐标系则需要从雷达坐标系变回相机坐标系 for idx in range(len(ret)): ret[idx] = lidar_to_camera_point(ret[idx]) return ret CORNER2CENTER_AVG = True # 3dbbox的八个角点表示法变成以3dbbox中心点坐标来表示 def corner_to_center_box3d(boxes_corner, coordinate='camera'): # (N, 8, 3) -> (N, 7) x,y,z,h,w,l,ry/z if coordinate == 'lidar': # 如果是雷达坐标系则需要先变为相机坐标系 for idx in range(len(boxes_corner)): boxes_corner[idx] = lidar_to_camera_point(boxes_corner[idx]) ret = [] for roi in boxes_corner: if CORNER2CENTER_AVG: # average version roi = np.array(roi) # roi = () # 相机坐标系下y轴代表高度 h = abs(np.sum(roi[:4, 1] - roi[4:, 1]) / 4) # 前四个角点的y轴接近0,后四个角点y轴接近h,对他们四个取平均 # 前后相邻的两个角点的欧式距离 w = sqrt(x^2+y^2),对四条边求平均值 # [0, 2]表示x,y坐标 w = np.sum( np.sqrt(np.sum((roi[0, [0, 2]] - roi[3, [0, 2]]) ** 2)) + np.sqrt(np.sum((roi[1, [0, 2]] - roi[2, [0, 2]]) ** 2)) + np.sqrt(np.sum((roi[4, [0, 2]] - roi[7, [0, 2]]) ** 2)) + np.sqrt(np.sum((roi[5, [0, 2]] - roi[6, [0, 2]]) ** 2)) ) / 4 # 左右相邻的两个角点的欧式距离 l = sqrt(x^2+y^2),对四条边求平均值 l = np.sum( np.sqrt(np.sum((roi[0, [0, 2]] - roi[1, [0, 2]]) ** 2)) + np.sqrt(np.sum((roi[2, [0, 2]] - roi[3, [0, 2]]) ** 2)) + np.sqrt(np.sum((roi[4, [0, 2]] - roi[5, [0, 2]]) ** 2)) + np.sqrt(np.sum((roi[6, [0, 2]] - roi[7, [0, 2]]) ** 2)) ) / 4 x = np.sum(roi[:, 0], axis=0) / 8 # 对八个角点的x坐标求平均值 y = np.sum(roi[0:4, 1], axis=0) / 4 # 对四个角点的y坐标求平均值 z = np.sum(roi[:, 2], axis=0) / 8 # 对八个角点的z坐标求平均值 # 对航向角求平均值 ry = np.sum( math.atan2(roi[2, 0] - roi[1, 0], roi[2, 2] - roi[1, 2]) + math.atan2(roi[6, 0] - roi[5, 0], roi[6, 2] - roi[5, 2]) + math.atan2(roi[3, 0] - roi[0, 0], roi[3, 2] - roi[0, 2]) + math.atan2(roi[7, 0] - roi[4, 0], roi[7, 2] - roi[4, 2]) + math.atan2(roi[0, 2] - roi[1, 2], roi[1, 0] - roi[0, 0]) + math.atan2(roi[4, 2] - roi[5, 2], roi[5, 0] - roi[4, 0]) + math.atan2(roi[3, 2] - roi[2, 2], roi[2, 0] - roi[3, 0]) + math.atan2(roi[7, 2] - roi[6, 2], roi[6, 0] - roi[7, 0]) ) / 8 if w > l: w, l = l, w ry = ry - np.pi / 2 elif l > w: l, w = w, l ry = ry - np.pi / 2 ret.append([x, y, z, h, w, l, ry]) else: # max version h = max(abs(roi[:4, 1] - roi[4:, 1])) # 前四个角点的z轴接近0,后四个角点z轴接近h,对他们四个取最大 w = np.max( np.sqrt(np.sum((roi[0, [0, 2]] - roi[3, [0, 2]]) ** 2)) + np.sqrt(np.sum((roi[1, [0, 2]] - roi[2, [0, 2]]) ** 2)) + np.sqrt(np.sum((roi[4, [0, 2]] - roi[7, [0, 2]]) ** 2)) + np.sqrt(np.sum((roi[5, [0, 2]] - roi[6, [0, 2]]) ** 2)) ) l = np.max( np.sqrt(np.sum((roi[0, [0, 2]] - roi[1, [0, 2]]) ** 2)) + np.sqrt(np.sum((roi[2, [0, 2]] - roi[3, [0, 2]]) ** 2)) + np.sqrt(np.sum((roi[4, [0, 2]] - roi[5, [0, 2]]) ** 2)) + np.sqrt(np.sum((roi[6, [0, 2]] - roi[7, [0, 2]]) ** 2)) ) x = np.sum(roi[:, 0], axis=0) / 8 y = np.sum(roi[0:4, 1], axis=0) / 4 z = np.sum(roi[:, 2], axis=0) / 8 ry = np.sum( math.atan2(roi[2, 0] - roi[1, 0], roi[2, 2] - roi[1, 2]) + math.atan2(roi[6, 0] - roi[5, 0], roi[6, 2] - roi[5, 2]) + math.atan2(roi[3, 0] - roi[0, 0], roi[3, 2] - roi[0, 2]) + math.atan2(roi[7, 0] - roi[4, 0], roi[7, 2] - roi[4, 2]) + math.atan2(roi[0, 2] - roi[1, 2], roi[1, 0] - roi[0, 0]) + math.atan2(roi[4, 2] - roi[5, 2], roi[5, 0] - roi[4, 0]) + math.atan2(roi[3, 2] - roi[2, 2], roi[2, 0] - roi[3, 0]) + math.atan2(roi[7, 2] - roi[6, 2], roi[6, 0] - roi[7, 0]) ) / 8 if w > l: w, l = l, w ry = angle_in_limit(ry + np.pi / 2) ret.append([x, y, z, h, w, l, ry]) if coordinate == 'lidar': ret = camera_to_lidar_box(np.array(ret)) return np.array(ret) def point_transform(points, tx, ty, tz, rx=0, ry=0, rz=0): # Input: # points: (N, 3) # rx/y/z: in radians # Output: # points: (N, 3) N = points.shape[0] points = np.hstack([points, np.ones((N, 1))]) # 点云数据平移 mat1 = np.eye(4) mat1[3, 0:3] = tx, ty, tz points = np.matmul(points, mat1) # 点云数据旋转 # 4x4围绕x轴旋转的矩阵 if rx != 0: mat = np.zeros((4, 4)) mat[0, 0] = 1 mat[3, 3] = 1 mat[1, 1] = np.cos(rx) mat[1, 2] = -np.sin(rx) mat[2, 1] = np.sin(rx) mat[2, 2] = np.cos(rx) points = np.matmul(points, mat)
# 4x4围绕y轴旋转的矩阵 if ry != 0: mat = np.zeros((4, 4)) mat[1, 1] = 1 mat[3, 3] = 1 mat[0, 0] = np.cos(ry) mat[0, 2] = np.sin(ry) mat[2, 0] = -np.sin(ry) mat[2, 2] = np.cos(ry) points = np.matmul(points, mat) # 4x4围绕z轴旋转的矩阵 if rz != 0: mat = np.zeros((4, 4)) mat[2, 2] = 1 mat[3, 3] = 1 mat[0, 0] = np.cos(rz) mat[0, 1] = -np.sin(rz) mat[1, 0] = np.sin(rz) mat[1, 1] = np.cos(rz) points = np.matmul(points, mat) return points[:, 0:3] # 返回旋转过后的label标签,如果雷达坐标系下则返回雷达label,反之camera_label def box_transform(boxes, tx, ty, tz, r=0, coordinate='lidar'): # Input: # boxes: (N, 7) x y z h w l rz/y # Output: # boxes: (N, 7) x y z h w l rz/y # 将每个样本的label中心点坐标根据长宽高变为其3dbbox八个角点的坐标(这个过程需要在雷达坐标系下进行),如果input_label是雷达坐标系则返回雷达坐标,如果是camera坐标系则需要把雷达坐标变回camera坐标 boxes_corner = center_to_corner_box3d(boxes, coordinate=coordinate) # (N, 8, 3) for idx in range(len(boxes_corner)): if coordinate == 'lidar': boxes_corner[idx] = point_transform(boxes_corner[idx], tx, ty, tz, rz=r) # 如果是lidar坐标系的话偏向角是沿z轴旋转 else: boxes_corner[idx] = point_transform(boxes_corner[idx], tx, ty, tz, ry=r) # 如果是camera坐标系的话偏向角是沿y轴旋转 return corner_to_center_box3d(boxes_corner, coordinate=coordinate) # 刚体的坐标变换 def inverse_rigid_trans(Tr): ''' Inverse a rigid body transform matrix (3x4 as [R|t]) [R'|-R't; 0|1] ''' inv_Tr = np.zeros_like(Tr) # 3x4 inv_Tr[0:3, 0:3] = np.transpose(Tr[0:3, 0:3]) inv_Tr[0:3, 3] = np.dot(-np.transpose(Tr[0:3, 0:3]), Tr[0:3, 3]) return inv_Tr # 选择多个方法结合进行数据增强 class Compose(object): def __init__(self, transforms, p=1.0): self.transforms = transforms self.p = p def __call__(self, lidar, labels): if np.random.random() <= self.p: for t in self.transforms: lidar, labels = t(lidar, labels) return lidar, labels # 选择一个方法进行数据增强 class OneOf(object): def __init__(self, transforms, p=1.0): self.transforms = transforms self.p = p def __call__(self, lidar, labels): if np.random.random() <= self.p: choice = np.random.randint(low=0, high=len(self.transforms)) lidar, labels = self.transforms[choice](lidar, labels) return lidar, labels class Random_Rotation(object): def __init__(self, limit_angle=np.pi / 4, p=0.5): self.limit_angle = limit_angle self.p = p def __call__(self, lidar, labels): """ :param labels: # (N', 7) x, y, z, h, w, l, r :return: """ if np.random.random() <= self.p: # 随机取一个角度在-limit_angle到limit_angle之间 angle = np.random.uniform(-self.limit_angle, self.limit_angle) # 点云数据绕Z轴旋转 lidar[:, 0:3] = point_transform(lidar[:, 0:3], 0, 0, 0, rz=angle) # 把数据对应的label也旋转 labels = box_transform(labels, 0, 0, 0, r=angle, coordinate='lidar') return lidar, labels class Random_Scaling(object): def __init__(self, scaling_range=(0.95, 1.05), p=0.5): self.scaling_range = scaling_range self.p = p def __call__(self, lidar, labels): """ :param labels: # (N', 7) x, y, z, h, w, l, r :return: """ if np.random.random() <= self.p: # 数据缩放因子 factor = np.random.uniform(self.scaling_range[0], self.scaling_range[0]) # lidar和label数据缩放 lidar[:, 0:3] = lidar[:, 0:3] * factor labels[:, 0:6] = labels[:, 0:6] * factor return lidar, labels class Cutout(object): """Randomly mask out one or more patches from an image. Args: n_holes (int): Number of patches to cut out of each image. length (int): The length (in pixels) of each square patch. Refer from: https://github.com/uoguelph-mlrg/Cutout/blob/master/util/cutout.py """ def __init__(self, n_holes, ratio, fill_value=0., p=1.0): self.n_holes = n_holes self.ratio = ratio assert 0. <= fill_value <= 1., "the fill value is in a range of 0 to 1" self.fill_value = fill_value self.p = p def __call__(self, img, targets): """ Args: img (Tensor): Tensor image of size (C, H, W). Returns: Tensor: Image with n_holes of dimension length x length cut out of it. """ if np.random.random() <= self.p: h = img.size(1) w = img.size(2) h_cutout = int(self.ratio * h) w_cutout = int(self.ratio * w) for n in range(self.n_holes): y = np.random.randint(h) x = np.random.randint(w) y1 = np.clip(y - h_cutout // 2, 0, h) y2 = np.clip(y + h_cutout // 2, 0, h) x1 = np.clip(x - w_cutout // 2, 0, w) x2 = np.clip(x + w_cutout // 2, 0, w) img[:, y1: y2, x1: x2] = self.fill_value # Zero out the selected area # Remove targets that are in the selected area keep_target = [] for target_idx, target in enumerate(targets): _, _, target_x, target_y, target_w, target_l, _, _ = target if (x1 <= target_x * w <= x2) and (y1 <= target_y * h <= y2): continue keep_target.append(target_idx) targets = targets[keep_target] return img, targets
fastRequests.py
# import threading from pathlib import Path from multiprocessing.dummy import Pool as ThreadPool from more_itertools import unique_everseen import requests, json, datetime from scripts.byteSize import human_byte_size # Initialization Total_Size = 0 Processed_URLs = 0 Progress = 0 Total_URLs = 0 Rate = 0 Report = False ReportJson = [] """ Main fuction to gather info about URL """ def url_info(URL): linkStatus = {} global Total_Size, Processed_URLs, Progress, Total_URLs, Rate, Report if URL not in [' ','']: # Ignoring any whitespaces within the list try: File_Size = 0 # Initialize fileLink = requests.head(URL, stream=True) # Get the link header info fileLink.raise_for_status() # To catch 404 and 500 earlier # Why i use get instead of head, Source: https://stackoverflow.com/questions/14270698/get-file-size-using-python-requests-while-only-getting-the-header HEAD = requests.get(URL, stream=True).headers # Invoked if 400 series File_Size = int(HEAD['Content-length']) # Get only the headers not the entire content Progress += Rate Processed_URLs = Processed_URLs + 1 Total_Size += File_Size print('URLs Done:{0}/{1} File Size:{2} Total Size:{3} Progress:{4:.2f}%'.format(Processed_URLs, Total_URLs, human_byte_size(File_Size), human_byte_size(Total_Size), Progress)) except requests.exceptions.HTTPError as errh: print ("Http Error:",errh) except requests.exceptions.ConnectionError as errc: print ("Error Connecting:",errc) except requests.exceptions.Timeout as errt: print ("Timeout Error:",errt) except requests.exceptions.RequestException as err: print ("Oops: Something Else",err) if Report is True: linkStatus['link'] = URL linkStatus['size'] = human_byte_size(File_Size) linkStatus['status'] = fileLink.status_code linkStatus['last-checked'] = datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S") ReportJson.append(linkStatus) def thread_series_creator(List_Of_URLs): global Total_Size, Processed_URLs, Progress, Total_URLs, Rate, Report # Make the Pool of workers pool = ThreadPool(100) # Open the urls in their own threads and return the results results = pool.map(url_info, List_Of_URLs) # close the pool and wait for the work to finish pool.close() pool.join() def main(file_path, report=False):
global Total_Size, Processed_URLs, Progress, Total_URLs, Rate, Report # If exist check if it is a file file_of_links = Path(file_path) if file_of_links.is_file(): try: # Preprocessing with open(file_of_links,'r') as f: # Loading URLs into list for faster access List_of_URLs = list(unique_everseen(f.read().splitlines())) # Removing duplicates without changing order Total_URLs = len(List_of_URLs) # Total number of links Rate = 100/Total_URLs # Calculate each link percentage except IOError: print("IO Error : Unable to read from file") print("Exiting...") return else: print("Error! Invalid file path!") print("Exiting...") return Report = report thread_series_creator(List_of_URLs) if Report is True: # Creating report Date = datetime.date.today().strftime('%d.%b.%Y') with open("muchspace.Report."+Date+".json", "w") as write_file: json.dump(ReportJson, write_file, indent=4) # Final Console Report print("******Final Diagnostic Report******") print("Total URLs: {0} Processed URLs: {1} Rate of completion: {2:.2f}%".format(Total_URLs, Processed_URLs, Progress)) print("Total size of {}/{} links is: {}".format(Processed_URLs, Total_URLs, human_byte_size(Total_Size)))
end-to-end.test.ts
import assert from 'assert' import expect from 'expect' import got from 'got' import { random, sortBy } from 'lodash' import { describe, test, before, beforeEach, after, afterEach } from 'mocha' import MockDate from 'mockdate' import { gql } from '@sourcegraph/shared/src/graphql/graphql' import { ExternalServiceKind } from '@sourcegraph/shared/src/graphql/schema' import { getConfig } from '@sourcegraph/shared/src/testing/config' import { afterEachRecordCoverage } from '@sourcegraph/shared/src/testing/coverage' import { createDriverForTest, Driver, percySnapshot } from '@sourcegraph/shared/src/testing/driver' import { afterEachSaveScreenshotIfFailed } from '@sourcegraph/shared/src/testing/screenshotReporter' import { retry } from '@sourcegraph/shared/src/testing/utils' import { Settings } from '../schema/settings.schema' const { gitHubToken, sourcegraphBaseUrl } = getConfig('gitHubToken', 'sourcegraphBaseUrl') describe('e2e test suite', () => { let driver: Driver before(async function () { // Cloning the repositories takes ~1 minute, so give initialization 2 // minutes instead of 1 (which would be inherited from // `jest.setTimeout(1 * 60 * 1000)` above). this.timeout(5 * 60 * 1000) // Reset date mocking MockDate.reset() const config = getConfig('headless', 'slowMo', 'testUserPassword') // Start browser driver = await createDriverForTest({ sourcegraphBaseUrl, logBrowserConsole: true, ...config, }) const clonedRepoSlugs = [ 'sourcegraph/java-langserver', 'gorilla/mux', 'gorilla/securecookie', 'sourcegraph/jsonrpc2', 'sourcegraph/go-diff', 'sourcegraph/appdash', 'sourcegraph/sourcegraph-typescript', 'sourcegraph-testing/automation-e2e-test', 'sourcegraph/e2e-test-private-repository', ] const alwaysCloningRepoSlugs = ['sourcegraphtest/AlwaysCloningTest'] await driver.ensureLoggedIn({ username: 'test', password: config.testUserPassword, email: '[email protected]' }) await driver.resetUserSettings() await driver.ensureHasExternalService({ kind: ExternalServiceKind.GITHUB, displayName: 'test-test-github', config: JSON.stringify({ url: 'https://github.com', token: gitHubToken, repos: clonedRepoSlugs.concat(alwaysCloningRepoSlugs), }), ensureRepos: clonedRepoSlugs.map(slug => `github.com/${slug}`), alwaysCloning: alwaysCloningRepoSlugs.map(slug => `github.com/${slug}`), }) }) after('Close browser', () => driver?.close()) afterEachSaveScreenshotIfFailed(() => driver.page) afterEachRecordCoverage(() => driver) beforeEach(async () => { if (driver) { // Clear local storage to reset sidebar selection (files or tabs) for each test await driver.page.evaluate(() => { localStorage.setItem('repo-revision-sidebar-last-tab', 'files') }) await driver.resetUserSettings() } }) describe('Core functionality', () => { test('Check settings are saved and applied', async () => { await driver.page.goto(sourcegraphBaseUrl + '/users/test/settings') await driver.page.waitForSelector('.test-settings-file .monaco-editor') const message = 'A wild notice appears!' await driver.replaceText({ selector: '.test-settings-file .monaco-editor', newText: JSON.stringify({ notices: [ { dismissable: false, location: 'top', message, }, ], }), selectMethod: 'keyboard', }) await driver.page.click('.test-settings-file .test-save-toolbar-save') await driver.page.waitForSelector('.test-global-alert .notices .global-alerts__alert', { visible: true }) await driver.page.evaluate((message: string) => { const element = document.querySelector<HTMLElement>('.test-global-alert .notices .global-alerts__alert') if (!element) { throw new Error('No .test-global-alert .notices .global-alerts__alert element found') } if (!element.textContent?.includes(message)) { throw new Error(`Expected "${message}" message, but didn't find it`) } }, message) }) test('Check access tokens work (create, use and delete)', async () => { await driver.page.goto(sourcegraphBaseUrl + '/users/test/settings/tokens/new') await driver.page.waitForSelector('.test-create-access-token-description') const name = `E2E Test ${new Date().toISOString()} ${random(1, 1e7)}` await driver.replaceText({ selector: '.test-create-access-token-description', newText: name, selectMethod: 'keyboard', }) await driver.page.click('.test-create-access-token-submit') const token = (await ( await driver.page.waitForFunction( () => document.querySelector<HTMLInputElement>('.test-access-token input[type=text]')?.value ) ).jsonValue()) as string | null assert(token) const response = await got.post('.api/graphql', { prefixUrl: sourcegraphBaseUrl, headers: { Authorization: 'token ' + token, }, body: JSON.stringify({ query: gql` query { currentUser { username } } `, variables: {}, }), }) const username = JSON.parse(response.body).data.currentUser.username expect(username).toBe('test') await Promise.all([ driver.acceptNextDialog(), ( await driver.page.waitForSelector( `[data-test-access-token-description="${name}"] .test-access-token-delete`, { visible: true } ) ).click(),
{}, name ) }) test('Check allowed usernames', async () => { await driver.page.goto(sourcegraphBaseUrl + '/users/test/settings/profile') await driver.page.waitForSelector('.test-UserProfileFormFields-username') const name = 'alice.bob-chris-' await driver.replaceText({ selector: '.test-UserProfileFormFields-username', newText: name, selectMethod: 'selectall', }) await driver.page.click('#test-EditUserProfileForm__save') await driver.page.waitForSelector('.test-EditUserProfileForm__success', { visible: true }) await driver.page.goto(sourcegraphBaseUrl + `/users/${name}/settings/profile`) await driver.replaceText({ selector: '.test-UserProfileFormFields-username', newText: 'test', selectMethod: 'selectall', }) await driver.page.click('#test-EditUserProfileForm__save') await driver.page.waitForSelector('.test-EditUserProfileForm__success', { visible: true }) }) }) describe('External services', () => { test('External service add, edit, delete', async () => { const displayName = 'test-github-test-2' await driver.ensureHasExternalService({ kind: ExternalServiceKind.GITHUB, displayName, config: '{"url": "https://github.myenterprise.com", "token": "initial-token", "repositoryQuery": ["none"]}', }) await driver.page.goto(sourcegraphBaseUrl + '/site-admin/external-services') await ( await driver.page.waitForSelector( `[data-test-external-service-name="${displayName}"] .test-edit-external-service-button` ) ).click() // Type in a new external service configuration. await driver.replaceText({ selector: '.test-external-service-editor .monaco-editor', newText: '{"url": "https://github.myenterprise.com", "token": "second-token", "repositoryQuery": ["none"]}', selectMethod: 'selectall', enterTextMethod: 'paste', }) await driver.page.click('.test-update-external-service-button') // Must wait for the operation to complete, or else a "Discard changes?" dialog will pop up await driver.page.waitForSelector('.test-update-external-service-button:not([disabled])', { visible: true }) await ( await driver.page.waitForSelector('.list-group-item[href="/site-admin/external-services"]', { visible: true, }) ).click() await Promise.all([ driver.acceptNextDialog(), ( await driver.page.waitForSelector( '[data-test-external-service-name="test-github-test-2"] .test-delete-external-service-button', { visible: true } ) ).click(), ]) await driver.page.waitFor( () => !document.querySelector('[data-test-external-service-name="test-github-test-2"]') ) }) test('External service repositoryPathPattern', async () => { const repo = 'sourcegraph/go-blame' // Tiny repo, fast to clone const repositoryPathPattern = 'foobar/{host}/{nameWithOwner}' const slug = `github.com/${repo}` const pathPatternSlug = `foobar/github.com/${repo}` const config = { kind: ExternalServiceKind.GITHUB, displayName: 'test-test-github-repoPathPattern', config: JSON.stringify({ url: 'https://github.com', token: gitHubToken, repos: [repo], repositoryPathPattern, }), // Make sure repository is named according to path pattern ensureRepos: [pathPatternSlug], } await driver.ensureHasExternalService(config) // Make sure repository slug without path pattern redirects to path pattern await driver.page.goto(sourcegraphBaseUrl + '/' + slug) await driver.assertWindowLocationPrefix('/' + pathPatternSlug) }) const awsAccessKeyID = process.env.AWS_ACCESS_KEY_ID const awsSecretAccessKey = process.env.AWS_SECRET_ACCESS_KEY const awsCodeCommitUsername = process.env.AWS_CODE_COMMIT_GIT_USERNAME const awsCodeCommitPassword = process.env.AWS_CODE_COMMIT_GIT_PASSWORD const testIfAwsCredentialsSet = awsSecretAccessKey && awsAccessKeyID && awsCodeCommitUsername && awsCodeCommitPassword ? test : test.skip.bind(test) testIfAwsCredentialsSet('AWS CodeCommit', async () => { await driver.ensureHasExternalService({ kind: ExternalServiceKind.AWSCODECOMMIT, displayName: 'test-aws-code-commit', config: JSON.stringify({ region: 'us-west-1', accessKeyID: awsAccessKeyID, secretAccessKey: awsSecretAccessKey, repositoryPathPattern: 'aws/{name}', gitCredentials: { username: awsCodeCommitUsername, password: awsCodeCommitPassword, }, }), ensureRepos: ['aws/test'], }) await driver.page.goto(sourcegraphBaseUrl + '/aws/test/-/blob/README') const blob = (await ( await driver.page.waitFor(() => document.querySelector<HTMLElement>('.test-repo-blob')?.textContent) ).jsonValue()) as string | null expect(blob).toBe('README\n\nchange') }) const bbsURL = process.env.BITBUCKET_SERVER_URL const bbsToken = process.env.BITBUCKET_SERVER_TOKEN const bbsUsername = process.env.BITBUCKET_SERVER_USERNAME const testIfBBSCredentialsSet = bbsURL && bbsToken && bbsUsername ? test : test.skip.bind(test) testIfBBSCredentialsSet('Bitbucket Server', async () => { await driver.ensureHasExternalService({ kind: ExternalServiceKind.BITBUCKETSERVER, displayName: 'test-bitbucket-server', config: JSON.stringify({ url: bbsURL, token: bbsToken, username: bbsUsername, repos: ['SOURCEGRAPH/jsonrpc2'], repositoryPathPattern: 'bbs/{projectKey}/{repositorySlug}', }), ensureRepos: ['bbs/SOURCEGRAPH/jsonrpc2'], }) await driver.page.goto(sourcegraphBaseUrl + '/bbs/SOURCEGRAPH/jsonrpc2/-/blob/.travis.yml') const blob = (await ( await driver.page.waitFor(() => document.querySelector<HTMLElement>('.test-repo-blob')?.textContent) ).jsonValue()) as string | null expect(blob).toBe('language: go\ngo: \n - 1.x\n\nscript:\n - go test -race -v ./...') }) }) describe('Visual tests', () => { test('Repositories list', async () => { await driver.page.goto(sourcegraphBaseUrl + '/site-admin/repositories?query=gorilla%2Fmux') await driver.page.waitForSelector('a[href="/github.com/gorilla/mux"]', { visible: true }) await percySnapshot(driver.page, 'Repositories list') }) test('Search results repo', async () => { await driver.page.goto( sourcegraphBaseUrl + '/search?q=repo:%5Egithub.com/gorilla/mux%24&patternType=regexp' ) await driver.page.waitForSelector('a[href="/github.com/gorilla/mux"]', { visible: true }) // Flaky https://github.com/sourcegraph/sourcegraph/issues/2704 // await percySnapshot(page, 'Search results repo') }) test('Search results file', async () => { await driver.page.goto( sourcegraphBaseUrl + '/search?q=repo:%5Egithub.com/gorilla/mux%24+file:%5Emux.go%24&patternType=regexp' ) await driver.page.waitForSelector('a[href="/github.com/gorilla/mux"]', { visible: true }) // Flaky https://github.com/sourcegraph/sourcegraph/issues/2704 // await percySnapshot(page, 'Search results file') }) test('Search visibility:private|public', async () => { const privateRepos = ['github.com/sourcegraph/e2e-test-private-repository'] await driver.page.goto(sourcegraphBaseUrl + '/search?q=type:repo+visibility:private') await driver.page.waitForFunction(() => document.querySelectorAll('.test-search-result').length >= 1) const privateResults = await driver.page.evaluate(() => [...document.querySelectorAll('.test-search-result-label')].map(label => (label.textContent || '').trim() ) ) expect(privateResults).toEqual(expect.arrayContaining(privateRepos)) await driver.page.goto(sourcegraphBaseUrl + '/search?q=type:repo+visibility:public') await driver.page.waitForFunction(() => document.querySelectorAll('.test-search-result').length > 1) const publicResults = await driver.page.evaluate(() => [...document.querySelectorAll('.etest-search-result-label')].map(label => (label.textContent || '').trim() ) ) expect(publicResults).not.toEqual(expect.arrayContaining(privateRepos)) await driver.page.goto(sourcegraphBaseUrl + '/search?q=type:repo+visibility:any') await driver.page.waitForFunction(() => document.querySelectorAll('.test-search-result').length > 1) const anyResults = await driver.page.evaluate(() => [...document.querySelectorAll('.test-search-result-label')].map(label => (label.textContent || '').trim() ) ) expect(anyResults).toEqual(expect.arrayContaining(privateRepos)) }) test('Search results code', async () => { await driver.page.goto( sourcegraphBaseUrl + '/search?q=repo:^github.com/gorilla/mux$&patternType=regexp file:mux.go "func NewRouter"' ) await driver.page.waitForSelector('a[href="/github.com/gorilla/mux"]', { visible: true }) // Flaky https://github.com/sourcegraph/sourcegraph/issues/2704 // await percySnapshot(page, 'Search results code') }) test('Site admin overview', async () => { await driver.page.goto(sourcegraphBaseUrl + '/site-admin') await driver.page.waitForSelector('.test-site-admin-overview-menu', { visible: true }) await driver.page.waitForSelector('.test-product-certificate', { visible: true }) await percySnapshot(driver.page, 'Site admin overview') }) }) describe('Theme switcher', () => { test('changes the theme', async () => { await driver.page.goto(sourcegraphBaseUrl + '/github.com/gorilla/mux/-/blob/mux.go') await driver.page.waitForSelector('.theme.theme-dark, .theme.theme-light', { visible: true }) const getActiveThemeClasses = (): Promise<string[]> => driver.page.evaluate(() => [...document.querySelector('.theme')!.classList].filter(className => className.startsWith('theme-')) ) expect(await getActiveThemeClasses()).toHaveLength(1) await driver.page.waitForSelector('.test-user-nav-item-toggle') await driver.page.click('.test-user-nav-item-toggle') // Switch to dark await driver.page.select('.test-theme-toggle', 'dark') expect(await getActiveThemeClasses()).toEqual(['theme-dark']) // Switch to light await driver.page.select('.test-theme-toggle', 'light') expect(await getActiveThemeClasses()).toEqual(['theme-light']) }) }) describe('Repository component', () => { const blobTableSelector = '.test-blob > table' const getHoverContents = async (): Promise<string[]> => { // Search for any child of test-tooltip-content: as test-tooltip-content has display: contents, // it will never be detected as visible by waitForSelector(), but its children will. const selector = '.test-tooltip-content *' await driver.page.waitForSelector(selector, { visible: true }) return driver.page.evaluate(() => // You can't reference hoverContentSelector in puppeteer's driver.page.evaluate [...document.querySelectorAll('.test-tooltip-content')].map(content => content.textContent || '') ) } const assertHoverContentContains = async (value: string): Promise<void> => { expect(await getHoverContents()).toEqual(expect.arrayContaining([expect.stringContaining(value)])) } const clickHoverJ2D = async (): Promise<void> => { const selector = '.test-tooltip-go-to-definition' await driver.page.waitForSelector(selector, { visible: true }) await driver.page.click(selector) } const clickHoverFindReferences = async (): Promise<void> => { const selector = '.test-tooltip-find-references' await driver.page.waitForSelector(selector, { visible: true }) await driver.page.click(selector) } describe('file tree', () => { test('does navigation on file click', async () => { await driver.page.goto( sourcegraphBaseUrl + '/github.com/sourcegraph/jsonrpc2@c6c7b9aa99fb76ee5460ccd3912ba35d419d493d' ) await ( await driver.page.waitForSelector('[data-tree-path="async.go"]', { visible: true, }) ).click() await driver.assertWindowLocation( '/github.com/sourcegraph/jsonrpc2@c6c7b9aa99fb76ee5460ccd3912ba35d419d493d/-/blob/async.go' ) }) test('expands directory on row click (no navigation)', async () => { await driver.page.goto( sourcegraphBaseUrl + '/github.com/sourcegraph/jsonrpc2@c6c7b9aa99fb76ee5460ccd3912ba35d419d493d' ) await driver.page.waitForSelector('.tree__row-icon', { visible: true }) await driver.page.click('.tree__row-icon') await driver.page.waitForSelector('.tree__row--selected [data-tree-path="websocket"]', { visible: true, }) await driver.page.waitForSelector('.tree__row--expanded [data-tree-path="websocket"]', { visible: true, }) await driver.assertWindowLocation( '/github.com/sourcegraph/jsonrpc2@c6c7b9aa99fb76ee5460ccd3912ba35d419d493d' ) }) test('does navigation on directory row click', async () => { await driver.page.goto( sourcegraphBaseUrl + '/github.com/sourcegraph/jsonrpc2@c6c7b9aa99fb76ee5460ccd3912ba35d419d493d' ) await driver.page.waitForSelector('.tree__row-label', { visible: true }) await driver.page.click('.tree__row-label') await driver.page.waitForSelector('.tree__row--selected [data-tree-path="websocket"]', { visible: true, }) await driver.page.waitForSelector('.tree__row--expanded [data-tree-path="websocket"]', { visible: true, }) await driver.assertWindowLocation( '/github.com/sourcegraph/jsonrpc2@c6c7b9aa99fb76ee5460ccd3912ba35d419d493d/-/tree/websocket' ) }) test('selects the current file', async () => { await driver.page.goto( sourcegraphBaseUrl + '/github.com/sourcegraph/jsonrpc2@c6c7b9aa99fb76ee5460ccd3912ba35d419d493d/-/blob/async.go' ) await driver.page.waitForSelector('.tree__row--active [data-tree-path="async.go"]', { visible: true, }) }) test('shows partial tree when opening directory', async () => { await driver.page.goto( sourcegraphBaseUrl + '/github.com/sourcegraph/jsonrpc2@c6c7b9aa99fb76ee5460ccd3912ba35d419d493d/-/tree/websocket' ) await driver.page.waitForSelector('.tree__row', { visible: true }) expect(await driver.page.evaluate(() => document.querySelectorAll('.tree__row').length)).toEqual(1) }) test('responds to keyboard shortcuts', async () => { const assertNumberRowsExpanded = async (expectedCount: number): Promise<void> => { expect( await driver.page.evaluate(() => document.querySelectorAll('.tree__row--expanded').length) ).toEqual(expectedCount) } await driver.page.goto( sourcegraphBaseUrl + '/github.com/sourcegraph/go-diff@3f415a150aec0685cb81b73cc201e762e075006d/-/blob/.travis.yml' ) await driver.page.waitForSelector('.tree__row', { visible: true }) // waitForSelector for tree to render await driver.page.click('.test-repo-revision-sidebar .tree') await driver.page.keyboard.press('ArrowUp') // arrow up to 'diff' directory await driver.page.waitForSelector('.tree__row--selected [data-tree-path="diff"]', { visible: true }) await driver.page.keyboard.press('ArrowRight') // arrow right (expand 'diff' directory) await driver.page.waitForSelector('.tree__row--selected [data-tree-path="diff"]', { visible: true }) await driver.page.waitForSelector('.tree__row--expanded [data-tree-path="diff"]', { visible: true }) await driver.page.waitForSelector('.tree__row [data-tree-path="diff/testdata"]', { visible: true }) await driver.page.keyboard.press('ArrowRight') // arrow right (move to nested 'diff/testdata' directory) await driver.page.waitForSelector('.tree__row--selected [data-tree-path="diff/testdata"]', { visible: true, }) await assertNumberRowsExpanded(1) // only `diff` directory is expanded, though `diff/testdata` is expanded await driver.page.keyboard.press('ArrowRight') // arrow right (expand 'diff/testdata' directory) await driver.page.waitForSelector('.tree__row--selected [data-tree-path="diff/testdata"]', { visible: true, }) await driver.page.waitForSelector('.tree__row--expanded [data-tree-path="diff/testdata"]', { visible: true, }) await assertNumberRowsExpanded(2) // `diff` and `diff/testdata` directories expanded await driver.page.waitForSelector('.tree__row [data-tree-path="diff/testdata/empty.diff"]', { visible: true, }) // select some file nested under `diff/testdata` await driver.page.keyboard.press('ArrowDown') // arrow down await driver.page.keyboard.press('ArrowDown') // arrow down await driver.page.keyboard.press('ArrowDown') // arrow down await driver.page.keyboard.press('ArrowDown') // arrow down await driver.page.waitForSelector( '.tree__row--selected [data-tree-path="diff/testdata/empty_orig.diff"]', { visible: true, } ) await driver.page.keyboard.press('ArrowLeft') // arrow left (navigate immediately up to parent directory `diff/testdata`) await driver.page.waitForSelector('.tree__row--selected [data-tree-path="diff/testdata"]', { visible: true, }) await assertNumberRowsExpanded(2) // `diff` and `diff/testdata` directories expanded await driver.page.keyboard.press('ArrowLeft') // arrow left await driver.page.waitForSelector('.tree__row--selected [data-tree-path="diff/testdata"]', { visible: true, }) // `diff/testdata` still selected await assertNumberRowsExpanded(1) // only `diff` directory expanded }) }) describe('symbol sidebar', () => { const listSymbolsTests = [ { name: 'lists symbols in file for Go', filePath: '/github.com/sourcegraph/go-diff@3f415a150aec0685cb81b73cc201e762e075006d/-/blob/cmd/go-diff/go-diff.go', symbolNames: ['main', 'stdin', 'diffPath', 'fileIdx', 'main'], symbolTypes: ['package', 'constant', 'variable', 'variable', 'function'], }, { name: 'lists symbols in another file for Go', filePath: '/github.com/sourcegraph/go-diff@3f415a150aec0685cb81b73cc201e762e075006d/-/blob/diff/diff.go', symbolNames: [ 'diff', 'Stat', 'Stat', 'hunkPrefix', 'hunkHeader', 'diffTimeParseLayout', 'diffTimeFormatLayout', 'add', ], symbolTypes: [ 'package', 'function', 'function', 'variable', 'constant', 'constant', 'constant', 'function', ], }, { name: 'lists symbols in file for Python', filePath: '/github.com/sourcegraph/appdash@ebfcffb1b5c00031ce797183546746715a3cfe87/-/blob/python/appdash/sockcollector.py', symbolNames: [ 'RemoteCollector', 'sock', '_debug', '__init__', '_log', 'connect', 'collect', 'close', ], symbolTypes: ['class', 'variable', 'variable', 'field', 'field', 'field', 'field', 'field'], }, { name: 'lists symbols in file for TypeScript', filePath: '/github.com/sourcegraph/sourcegraph-typescript@a7b7a61e31af76dad3543adec359fa68737a58a1/-/blob/server/src/cancellation.ts', symbolNames: [ 'createAbortError', 'isAbortError', 'throwIfCancelled', 'tryCancel', 'toAxiosCancelToken', 'source', ], symbolTypes: ['constant', 'constant', 'function', 'function', 'function', 'constant'], }, { name: 'lists symbols in file for Java', filePath: '/github.com/sourcegraph/java-langserver@03efbe9558acc532e88f5288b4e6cfa155c6f2dc/-/blob/src/main/java/com/sourcegraph/common/Config.java', symbolNames: [ 'com.sourcegraph.common', 'Config', 'LIGHTSTEP_INCLUDE_SENSITIVE', 'LIGHTSTEP_PROJECT', 'LIGHTSTEP_TOKEN', 'ANDROID_JAR_PATH', 'IGNORE_DEPENDENCY_RESOLUTION_CACHE', 'LSP_TIMEOUT', 'LANGSERVER_ROOT', 'LOCAL_REPOSITORY', 'EXECUTE_GRADLE_ORIGINAL_ROOT_PATHS', 'shouldExecuteGradle', 'PRIVATE_REPO_ID', 'PRIVATE_REPO_URL', 'PRIVATE_REPO_USERNAME', 'PRIVATE_REPO_PASSWORD', 'log', 'checkEnv', 'ConfigException', ], symbolTypes: [ 'package', 'class', 'field', 'field', 'field', 'field', 'field', 'field', 'field', 'field', 'field', 'method', 'field', 'field', 'field', 'field', 'field', 'method', 'class', ], }, ] for (const symbolTest of listSymbolsTests) { test(symbolTest.name, async () => { await driver.page.goto(sourcegraphBaseUrl + symbolTest.filePath) await (await driver.page.waitForSelector('[data-test-tab="symbols"]')).click() await driver.page.waitForSelector('.test-symbol-name', { visible: true }) const symbolNames = await driver.page.evaluate(() => [...document.querySelectorAll('.test-symbol-name')].map(name => name.textContent || '') ) const symbolTypes = await driver.page.evaluate(() => [...document.querySelectorAll('.test-symbol-icon')].map( icon => icon.getAttribute('data-tooltip') || '' ) ) expect(sortBy(symbolNames)).toEqual(sortBy(symbolTest.symbolNames)) expect(sortBy(symbolTypes)).toEqual(sortBy(symbolTest.symbolTypes)) }) } const navigateToSymbolTests = [ { name: 'navigates to file on symbol click for Go', repoPath: '/github.com/sourcegraph/go-diff@3f415a150aec0685cb81b73cc201e762e075006d', filePath: '/tree/cmd', symbolPath: '/blob/cmd/go-diff/go-diff.go#L19:2-19:10', }, { name: 'navigates to file on symbol click for Java', repoPath: '/github.com/sourcegraph/java-langserver@03efbe9558acc532e88f5288b4e6cfa155c6f2dc', filePath: '/tree/src/main/java/com/sourcegraph/common', symbolPath: '/blob/src/main/java/com/sourcegraph/common/Config.java#L14:20-14:26', skip: true, }, { name: 'displays valid symbols at different file depths for Go (./examples/cmd/webapp-opentracing/main.go.go)', repoPath: '/github.com/sourcegraph/appdash@ebfcffb1b5c00031ce797183546746715a3cfe87', filePath: '/tree/examples', symbolPath: '/blob/examples/cmd/webapp-opentracing/main.go#L26:6-26:10', skip: true, }, { name: 'displays valid symbols at different file depths for Go (./sqltrace/sql.go)', repoPath: '/github.com/sourcegraph/appdash@ebfcffb1b5c00031ce797183546746715a3cfe87', filePath: '/tree/sqltrace', symbolPath: '/blob/sqltrace/sql.go#L14:2-14:5', skip: true, }, ] for (const navigationTest of navigateToSymbolTests) { const testFunc = navigationTest.skip ? test.skip : test testFunc(navigationTest.name, async () => { const repoBaseURL = sourcegraphBaseUrl + navigationTest.repoPath + '/-' await driver.page.goto(repoBaseURL + navigationTest.filePath) await (await driver.page.waitForSelector('[data-test-tab="symbols"]')).click() await driver.page.waitForSelector('.test-symbol-name', { visible: true }) await ( await driver.page.waitForSelector(`.test-symbol-link[href*="${navigationTest.symbolPath}"]`, { visible: true, }) ).click() await driver.assertWindowLocation(repoBaseURL + navigationTest.symbolPath, true) }) } const highlightSymbolTests = [ { name: 'highlights correct line for Go', filePath: '/github.com/sourcegraph/go-diff@3f415a150aec0685cb81b73cc201e762e075006d/-/blob/diff/diff.go', index: 5, line: 65, }, { name: 'highlights correct line for TypeScript', filePath: '/github.com/sourcegraph/sourcegraph-typescript@a7b7a61e31af76dad3543adec359fa68737a58a1/-/blob/server/src/cancellation.ts', index: 2, line: 17, }, ] for (const { name, filePath, index, line } of highlightSymbolTests) { test(name, async () => { await driver.page.goto(sourcegraphBaseUrl + filePath) await driver.page.waitForSelector('[data-test-tab="symbols"]') await driver.page.click('[data-test-tab="symbols"]') await driver.page.waitForSelector('.test-symbol-name', { visible: true }) await driver.page.click(`.filtered-connection__nodes li:nth-child(${index + 1}) a`) await driver.page.waitForSelector('.test-blob .selected .line') const selectedLineNumber = await driver.page.evaluate(() => { const element = document.querySelector<HTMLElement>('.test-blob .selected .line') return element?.dataset.line && parseInt(element.dataset.line, 10) }) expect(selectedLineNumber).toEqual(line) }) } }) describe('directory page', () => { it('shows a row for each file in the directory', async () => { await driver.page.goto( sourcegraphBaseUrl + '/github.com/gorilla/securecookie@e59506cc896acb7f7bf732d4fdf5e25f7ccd8983' ) await driver.page.waitForSelector('.test-tree-entries', { visible: true }) await retry(async () => assert.equal( await driver.page.evaluate( () => document.querySelectorAll('.test-tree-entry-directory').length ), 1 ) ) await retry(async () => assert.equal( await driver.page.evaluate(() => document.querySelectorAll('.test-tree-entry-file').length), 7 ) ) }) test('shows commit information on a row', async () => { await driver.page.goto( sourcegraphBaseUrl + '/github.com/sourcegraph/go-diff@3f415a150aec0685cb81b73cc201e762e075006d', { waitUntil: 'domcontentloaded', } ) await driver.page.waitForSelector('.test-tree-page-no-recent-commits') await driver.page.click('.test-tree-page-show-all-commits') await driver.page.waitForSelector('.git-commit-node__message', { visible: true }) await retry(async () => expect( await driver.page.evaluate( () => document.querySelectorAll('.git-commit-node__message')[3].textContent ) ).toContain('Add support for new/removed binary files.') ) await retry(async () => expect( await driver.page.evaluate(() => document.querySelectorAll('.git-commit-node-byline')[3].textContent!.trim() ) ).toContain('Dmitri Shuralyov') ) await retry(async () => expect( await driver.page.evaluate( () => document.querySelectorAll('.git-commit-node__oid')[3].textContent ) ).toEqual('2083912') ) }) it('navigates when clicking on a row', async () => { await driver.page.goto( sourcegraphBaseUrl + '/github.com/sourcegraph/jsonrpc2@c6c7b9aa99fb76ee5460ccd3912ba35d419d493d' ) // click on directory await driver.page.waitForSelector('.tree-entry', { visible: true }) await driver.page.click('.tree-entry') await driver.assertWindowLocation( '/github.com/sourcegraph/jsonrpc2@c6c7b9aa99fb76ee5460ccd3912ba35d419d493d/-/tree/websocket' ) }) }) describe('revision resolution', () => { test('shows clone in progress interstitial page', async () => { await driver.page.goto(sourcegraphBaseUrl + '/github.com/sourcegraphtest/AlwaysCloningTest') await driver.page.waitForSelector('.hero-page__subtitle', { visible: true }) await retry(async () => expect( await driver.page.evaluate(() => document.querySelector('.hero-page__subtitle')!.textContent) ).toEqual('Cloning in progress') ) }) test('resolves default branch when unspecified', async () => { await driver.page.goto(sourcegraphBaseUrl + '/github.com/sourcegraph/go-diff/-/blob/diff/diff.go') await driver.page.waitForSelector('#repo-revision-popover', { visible: true }) await retry(async () => { expect( await driver.page.evaluate(() => document.querySelector('.test-revision')!.textContent!.trim()) ).toEqual('master') }) // Verify file contents are loaded. await driver.page.waitForSelector(blobTableSelector) }) test('updates revision with switcher', async () => { await driver.page.goto(sourcegraphBaseUrl + '/github.com/sourcegraph/go-diff/-/blob/diff/diff.go') // Open revision switcher await driver.page.waitForSelector('#repo-revision-popover', { visible: true }) await driver.page.click('#repo-revision-popover') // Click "Tags" tab await driver.page.click('.revisions-popover [data-test-tab="tags"]') await driver.page.waitForSelector('a.git-ref-node[href*="0.5.0"]', { visible: true }) await driver.page.click('a.git-ref-node[href*="0.5.0"]') await driver.assertWindowLocation('/github.com/sourcegraph/[email protected]/-/blob/diff/diff.go') }) }) describe('hovers', () => { describe('Blob', () => { test('gets displayed and updates URL when clicking on a token', async () => { await driver.page.goto( sourcegraphBaseUrl + '/github.com/gorilla/mux@15a353a636720571d19e37b34a14499c3afa9991/-/blob/mux.go' ) await driver.page.waitForSelector(blobTableSelector) const selector = 'td[data-line="24"] + td .hl-storage.hl-type.hl-go:not(.hl-keyword)' await driver.page.waitForSelector(selector, { visible: true }) await driver.page.click(selector) await driver.assertWindowLocation( '/github.com/gorilla/mux@15a353a636720571d19e37b34a14499c3afa9991/-/blob/mux.go#L24:19' ) await getHoverContents() // verify there is a hover await percySnapshot(driver.page, 'Code intel hover tooltip') }) test('gets displayed when navigating to a URL with a token position', async () => { await driver.page.goto( sourcegraphBaseUrl + '/github.com/gorilla/mux@15a353a636720571d19e37b34a14499c3afa9991/-/blob/mux.go#L151:23' ) await assertHoverContentContains( 'ErrMethodMismatch is returned when the method in the request does not match' ) }) describe('jump to definition', () => { test('noops when on the definition', async () => { await driver.page.goto( sourcegraphBaseUrl + '/github.com/sourcegraph/go-diff@3f415a150aec0685cb81b73cc201e762e075006d/-/blob/diff/parse.go#L29:6' ) await clickHoverJ2D() await driver.assertWindowLocation( '/github.com/sourcegraph/go-diff@3f415a150aec0685cb81b73cc201e762e075006d/-/blob/diff/parse.go#L29:6' ) }) test('does navigation (same repo, same file)', async () => { await driver.page.goto( sourcegraphBaseUrl + '/github.com/sourcegraph/go-diff@3f415a150aec0685cb81b73cc201e762e075006d/-/blob/diff/parse.go#L25:10' ) await clickHoverJ2D() await driver.assertWindowLocation( '/github.com/sourcegraph/go-diff@3f415a150aec0685cb81b73cc201e762e075006d/-/blob/diff/parse.go#L29:6' ) }) test('does navigation (same repo, different file)', async () => { await driver.page.goto( sourcegraphBaseUrl + '/github.com/sourcegraph/go-diff@3f415a150aec0685cb81b73cc201e762e075006d/-/blob/diff/print.go#L13:31' ) await clickHoverJ2D() await driver.assertWindowLocation( '/github.com/sourcegraph/go-diff@3f415a150aec0685cb81b73cc201e762e075006d/-/blob/diff/diff.pb.go#L38:6' ) // Verify file tree is highlighting the new path. await driver.page.waitForSelector('.tree__row--active [data-tree-path="diff/diff.pb.go"]', { visible: true, }) }) // basic code intel doesn't support cross-repo jump-to-definition yet. // If this test gets re-enabled `sourcegraph/vcsstore` and // `sourcegraph/go-vcs` need to be cloned. test.skip('does navigation (external repo)', async () => { await driver.page.goto( sourcegraphBaseUrl + '/github.com/sourcegraph/vcsstore@267289226b15e5b03adedc9746317455be96e44c/-/blob/server/diff.go#L27:30' ) await clickHoverJ2D() await driver.assertWindowLocation( '/github.com/sourcegraph/go-vcs@aa7c38442c17a3387b8a21f566788d8555afedd0/-/blob/vcs/repository.go#L103:6' ) }) }) describe('find references', () => { test('opens widget and fetches local references', async function () { this.timeout(120000) await driver.page.goto( sourcegraphBaseUrl + '/github.com/sourcegraph/go-diff@3f415a150aec0685cb81b73cc201e762e075006d/-/blob/diff/parse.go#L29:6' ) await clickHoverFindReferences() await driver.assertWindowLocation( '/github.com/sourcegraph/go-diff@3f415a150aec0685cb81b73cc201e762e075006d/-/blob/diff/parse.go#L29:6&tab=references' ) await driver.assertNonemptyLocalRefs() // verify the appropriate # of references are fetched await driver.page.waitForSelector('.panel__tabs-content .file-match-children', { visible: true, }) await retry(async () => expect( await driver.page.evaluate( () => document.querySelectorAll('.panel__tabs-content .file-match-children__item') .length ) ).toEqual( // Basic code intel finds 8 references with some overlapping context, resulting in 4 hunks. 4 ) ) // verify all the matches highlight a `MultiFileDiffReader` token await driver.assertAllHighlightedTokens('MultiFileDiffReader') }) // TODO unskip this once basic-code-intel looks for external // references even when local references are found. test.skip('opens widget and fetches external references', async () => { await driver.page.goto( sourcegraphBaseUrl + '/github.com/sourcegraph/go-diff@3f415a150aec0685cb81b73cc201e762e075006d/-/blob/diff/parse.go#L32:16&tab=references' ) // verify some external refs are fetched (we cannot assert how many, but we can check that the matched results // look like they're for the appropriate token) await driver.assertNonemptyExternalRefs() // verify all the matches highlight a `Reader` token await driver.assertAllHighlightedTokens('Reader') }) }) }) }) describe.skip('godoc.org "Uses" links', () => { test('resolves standard library function', async () => { // https://godoc.org/bytes#Compare await driver.page.goto(sourcegraphBaseUrl + '/-/godoc/refs?def=Compare&pkg=bytes&repo=') await driver.assertWindowLocationPrefix('/github.com/golang/go/-/blob/src/bytes/bytes_decl.go') await driver.assertStickyHighlightedToken('Compare') await driver.assertNonemptyLocalRefs() await driver.assertAllHighlightedTokens('Compare') }) test('resolves standard library function (from stdlib repo)', async () => { // https://godoc.org/github.com/golang/go/src/bytes#Compare await driver.page.goto( sourcegraphBaseUrl + '/-/godoc/refs?def=Compare&pkg=github.com%2Fgolang%2Fgo%2Fsrc%2Fbytes&repo=github.com%2Fgolang%2Fgo' ) await driver.assertWindowLocationPrefix('/github.com/golang/go/-/blob/src/bytes/bytes_decl.go') await driver.assertStickyHighlightedToken('Compare') await driver.assertNonemptyLocalRefs() await driver.assertAllHighlightedTokens('Compare') }) test('resolves external package function (from gorilla/mux)', async () => { // https://godoc.org/github.com/gorilla/mux#Router await driver.page.goto( sourcegraphBaseUrl + '/-/godoc/refs?def=Router&pkg=github.com%2Fgorilla%2Fmux&repo=github.com%2Fgorilla%2Fmux' ) await driver.assertWindowLocationPrefix('/github.com/gorilla/mux/-/blob/mux.go') await driver.assertStickyHighlightedToken('Router') await driver.assertNonemptyLocalRefs() await driver.assertAllHighlightedTokens('Router') }) }) }) describe('Search component', () => { test('regexp toggle appears and updates patternType query parameter when clicked', async () => { await driver.page.goto(sourcegraphBaseUrl + '/search?q=test&patternType=literal') // Wait for monaco query input to load to avoid race condition with the intermediate input await driver.page.waitForSelector('#monaco-query-input') await driver.page.waitForSelector('.test-regexp-toggle') await driver.page.click('.test-regexp-toggle') await driver.page.goto(sourcegraphBaseUrl + '/search?q=test&patternType=regexp') // Wait for monaco query input to load to avoid race condition with the intermediate input await driver.page.waitForSelector('#monaco-query-input') await driver.page.waitForSelector('.test-regexp-toggle') await driver.page.click('.test-regexp-toggle') await driver.page.goto(sourcegraphBaseUrl + '/search?q=test&patternType=literal') }) }) describe('Search result type tabs', () => { test('Search results type tabs appear', async () => { await driver.page.goto( sourcegraphBaseUrl + '/search?q=repo:%5Egithub.com/gorilla/mux%24&patternType=regexp' ) await driver.page.waitForSelector('.test-search-result-type-tabs', { visible: true }) await driver.page.waitForSelector('.test-search-result-tab--active', { visible: true }) const tabs = await driver.page.evaluate(() => [...document.querySelectorAll('.test-search-result-tab')].map(tab => tab.textContent) ) expect(tabs.length).toEqual(6) expect(tabs).toStrictEqual(['Code', 'Diffs', 'Commits', 'Symbols', 'Repositories', 'Filenames']) const activeTab = await driver.page.evaluate( () => document.querySelectorAll('.test-search-result-tab--active').length ) expect(activeTab).toEqual(1) const label = await driver.page.evaluate( () => document.querySelector('.test-search-result-tab--active')!.textContent || '' ) expect(label).toEqual('Code') }) }) describe('Saved searches', () => { test('Save search from search results page', async () => { await driver.page.goto(sourcegraphBaseUrl + '/search?q=test') await driver.page.waitForSelector('.test-save-search-link', { visible: true }) await driver.page.click('.test-save-search-link') await driver.page.waitForSelector('.test-saved-search-modal') await driver.page.waitForSelector('.test-saved-search-modal-save-button') await driver.page.click('.test-saved-search-modal-save-button') await driver.assertWindowLocation('/users/test/searches/add?query=test&patternType=literal') await driver.page.waitForSelector('.test-saved-search-form-input-description', { visible: true }) await driver.page.click('.test-saved-search-form-input-description') await driver.page.keyboard.type('test query') await driver.page.waitForSelector('.test-saved-search-form-submit-button', { visible: true }) await driver.page.click('.test-saved-search-form-submit-button') await driver.assertWindowLocation('/users/test/searches') const nodes = await driver.page.evaluate( () => document.querySelectorAll('.test-saved-search-list-page-row').length ) expect(nodes).toEqual(1) expect( await driver.page.evaluate( () => document.querySelector('.test-saved-search-list-page-row-title')!.textContent ) ).toEqual('test query') }) test('Delete saved search', async () => { await driver.page.goto(sourcegraphBaseUrl + '/users/test/searches') await driver.page.waitForSelector('.test-delete-saved-search-button', { visible: true }) driver.page.on('dialog', async dialog => { await dialog.accept() }) await driver.page.click('.test-delete-saved-search-button') await driver.page.waitFor(() => !document.querySelector('.test-saved-search-list-page-row')) const nodes = await driver.page.evaluate( () => document.querySelectorAll('.test-saved-search-list-page-row').length ) expect(nodes).toEqual(0) }) test('Save search from saved searches page', async () => { await driver.page.goto(sourcegraphBaseUrl + '/users/test/searches') await driver.page.waitForSelector('.test-add-saved-search-button', { visible: true }) await driver.page.click('.test-add-saved-search-button') await driver.assertWindowLocation('/users/test/searches/add') await driver.page.waitForSelector('.test-saved-search-form-input-description', { visible: true }) await driver.page.click('.test-saved-search-form-input-description') await driver.page.keyboard.type('test query 2') await driver.page.waitForSelector('.test-saved-search-form-input-query', { visible: true }) await driver.page.click('.test-saved-search-form-input-query') await driver.page.keyboard.type('test patternType:literal') await driver.page.waitForSelector('.test-saved-search-form-submit-button', { visible: true }) await driver.page.click('.test-saved-search-form-submit-button') await driver.assertWindowLocation('/users/test/searches') const nodes = await driver.page.evaluate( () => document.querySelectorAll('.test-saved-search-list-page-row').length ) expect(nodes).toEqual(1) expect( await driver.page.evaluate( () => document.querySelector('.test-saved-search-list-page-row-title')!.textContent ) ).toEqual('test query 2') }) test('Edit saved search', async () => { await driver.page.goto(sourcegraphBaseUrl + '/users/test/searches') await driver.page.waitForSelector('.test-edit-saved-search-button', { visible: true }) await driver.page.click('.test-edit-saved-search-button') await driver.page.waitForSelector('.test-saved-search-form-input-description', { visible: true }) await driver.page.click('.test-saved-search-form-input-description') await driver.page.keyboard.type(' edited') await driver.page.waitForSelector('.test-saved-search-form-submit-button', { visible: true }) await driver.page.click('.test-saved-search-form-submit-button') await driver.page.goto(sourcegraphBaseUrl + '/users/test/searches') await driver.page.waitForSelector('.test-saved-search-list-page-row-title') expect( await driver.page.evaluate( () => document.querySelector('.test-saved-search-list-page-row-title')!.textContent ) ).toEqual('test query 2 edited') }) }) describe('Search statistics', () => { beforeEach(async () => { await driver.setUserSettings<Settings>({ experimentalFeatures: { searchStats: true } }) }) afterEach(async () => { await driver.resetUserSettings() }) // This is a substring that appears in the sourcegraph/go-diff repository, which is present // in the external service added for the e2e test. It is OK if it starts to appear in other // repositories (such as sourcegraph/sourcegraph now that it's mentioned here); the test // just checks that it is found in at least 1 Go file. const uniqueString = 'Incomplete-' const uniqueStringPostfix = 'Lines' test('button on search results page', async () => { await driver.page.goto(`${sourcegraphBaseUrl}/search?q=${uniqueString}`) await driver.page.waitForSelector(`a[href="/stats?q=${uniqueString}"]`) }) test('page', async () => { await driver.page.goto(`${sourcegraphBaseUrl}/stats?q=${uniqueString}`) // Ensure the global navbar hides the search input (to avoid confusion with the one on // the stats page). await driver.page.waitForSelector('.global-navbar a.nav-link[href="/search"]') assert.strictEqual( await driver.page.evaluate(() => document.querySelectorAll('#monaco-query-input').length), 0 ) const queryInputValue = () => driver.page.evaluate(() => { const input = document.querySelector<HTMLInputElement>('.test-stats-query') return input ? input.value : null }) // Check for a Go result (the sample repositories have Go files). await driver.page.waitForSelector(`a[href*="${uniqueString}+lang:go"]`) assert.strictEqual(await queryInputValue(), uniqueString) await percySnapshot(driver.page, 'Search stats') // Update the query and rerun the computation. await driver.page.type('.test-stats-query', uniqueStringPostfix) // the uniqueString is followed by 'Incomplete-Lines' in go-diff const wantQuery = `${uniqueString}${uniqueStringPostfix}` assert.strictEqual(await queryInputValue(), wantQuery) await driver.page.click('.test-stats-query-update') await driver.page.waitForSelector(`a[href*="${wantQuery}+lang:go"]`) assert.ok(driver.page.url().endsWith(`/stats?q=${wantQuery}`)) }) }) })
]) await driver.page.waitFor( (name: string) => !document.querySelector(`[data-test-access-token-description="${name}"]`),
grant.rs
//! Data structure to store a list of userspace applications. use core::marker::PhantomData; use core::mem::{align_of, size_of}; use core::ops::{Deref, DerefMut}; use core::ptr::{write, write_volatile, Unique}; use crate::callback::AppId; use crate::process::{Error, ProcessType}; use crate::sched::Kernel; /// Region of process memory reserved for the kernel. pub struct Grant<T: Default> { crate kernel: &'static Kernel, grant_num: usize, ptr: PhantomData<T>, } pub struct AppliedGrant<T> { appid: AppId, grant: *mut T, _phantom: PhantomData<T>, } impl<T> AppliedGrant<T> { pub fn enter<F, R>(self, fun: F) -> R where F: FnOnce(&mut Owned<T>, &mut Allocator) -> R, R: Copy, { let mut allocator = Allocator { appid: self.appid }; let mut root = unsafe { Owned::new(self.grant, self.appid) }; fun(&mut root, &mut allocator) } } pub struct Allocator { appid: AppId, } pub struct Owned<T: ?Sized> { data: Unique<T>, appid: AppId, } impl<T: ?Sized> Owned<T> { unsafe fn new(data: *mut T, appid: AppId) -> Owned<T> { Owned { data: Unique::new_unchecked(data), appid: appid, } } pub fn appid(&self) -> AppId { self.appid } } impl<T: ?Sized> Drop for Owned<T> { fn drop(&mut self) { unsafe { let data = self.data.as_ptr() as *mut u8; self.appid.kernel.process_map_or((), self.appid, |process| { process.free(data); }); } } } impl<T: ?Sized> Deref for Owned<T> { type Target = T; fn deref(&self) -> &T { unsafe { self.data.as_ref() } } } impl<T: ?Sized> DerefMut for Owned<T> { fn deref_mut(&mut self) -> &mut T { unsafe { self.data.as_mut() } } } impl Allocator { pub fn alloc<T>(&mut self, data: T) -> Result<Owned<T>, Error> { unsafe { self.appid .kernel .process_map_or(Err(Error::NoSuchApp), self.appid, |process| { process.alloc(size_of::<T>(), align_of::<T>()).map_or( Err(Error::OutOfMemory), |arr| { let ptr = arr.as_mut_ptr() as *mut T; // We use `ptr::write` to avoid `Drop`ping the uninitialized memory in // case `T` implements the `Drop` trait. write(ptr, data); Ok(Owned::new(ptr, self.appid)) }, ) }) } } } pub struct Borrowed<'a, T: 'a + ?Sized> { data: &'a mut T, appid: AppId, } impl<T: 'a + ?Sized> Borrowed<'a, T> { pub fn new(data: &'a mut T, appid: AppId) -> Borrowed<'a, T> { Borrowed { data: data, appid: appid, } } pub fn appid(&self) -> AppId { self.appid } } impl<T: 'a + ?Sized> Deref for Borrowed<'a, T> { type Target = T; fn deref(&self) -> &T { self.data } } impl<T: 'a + ?Sized> DerefMut for Borrowed<'a, T> { fn deref_mut(&mut self) -> &mut T { self.data } } impl<T: Default> Grant<T> { crate fn new(kernel: &'static Kernel, grant_index: usize) -> Grant<T> { Grant { kernel: kernel, grant_num: grant_index, ptr: PhantomData, } } pub fn grant(&self, appid: AppId) -> Option<AppliedGrant<T>> { unsafe { appid.kernel.process_map_or(None, appid, |process| { if let Some(grant_ptr_ref) = process.grant_ptr(self.grant_num) { let cntr = *(grant_ptr_ref as *mut *mut T); if cntr.is_null() { None } else { Some(AppliedGrant { appid: appid, grant: cntr, _phantom: PhantomData, }) } } else { None } }) } } pub fn enter<F, R>(&self, appid: AppId, fun: F) -> Result<R, Error> where F: FnOnce(&mut Borrowed<T>, &mut Allocator) -> R, R: Copy, { unsafe { appid .kernel .process_map_or(Err(Error::NoSuchApp), appid, |process| { // Here is an example of how the grants are laid out in a // process's memory: // // Mem. Addr. // 0x0040000 ┌──────────────────── // │ GrantPointer0 [0x003FFC8] // │ GrantPointer1 [0x003FFC0] // │ ... // │ GrantPointerN [0x0000000 (NULL)] // 0x003FFE0 ├──────────────────── // │ GrantRegion0 // 0x003FFC8 ├──────────────────── // │ GrantRegion1 // 0x003FFC0 ├──────────────────── // │ // │ --unallocated-- // │ // └──────────────────── // // An array of pointers (one per possible grant region) // point to where the actual grant memory is allocated // inside of the process. The grant memory is not allocated // until the actual grant region is actually used. // // This function provides the app access to the specific // grant memory, and allocates the grant region in the // process memory if needed. // // Get a pointer to where the grant pointer is stored in the // process memory. if let Some(grant_ptr_ref) = process.grant_ptr(self.grant_num) { let ctr_ptr = grant_ptr_ref as *mut *mut T; // If the pointer at that location is NULL then the grant // memory needs to be allocated. let new_grant = if (*ctr_ptr).is_null() { process .alloc(size_of::<T>(), align_of::<T>()) .map(|root_arr| { let root_ptr = root_arr.as_mut_ptr() as *mut T; // Initialize the grant contents using ptr::write, to // ensure that we don't try to drop the contents of // uninitialized memory when T implements Drop. write(root_ptr, Default::default()); // Record the location in the grant pointer. write_volatile(ctr_ptr, root_ptr); root_ptr }) } else { Some(*ctr_ptr) }; // If the grant region already exists or there was enough // memory to allocate it, call the passed in closure with // the borrowed grant region. new_grant.map_or(Err(Error::OutOfMemory), move |root_ptr| { let root_ptr = root_ptr as *mut T; let mut root = Borrowed::new(&mut *root_ptr, appid); let mut allocator = Allocator { appid: appid }; let res = fun(&mut root, &mut allocator); Ok(res) }) } else { Err(Error::InactiveApp) } }) } } pub fn each<F>(&self, fun: F) where F: Fn(&mut Owned<T>), { self.kernel.process_each(|process| unsafe { if let Some(grant_ptr_ref) = process.grant_ptr(self.grant_num) { let root_ptr = *(grant_ptr_ref as *mut *mut T); if !root_ptr.is_n
ess_iter(), } } } pub struct Iter<'a, T: 'a + Default> { grant: &'a Grant<T>, subiter: core::slice::Iter<'a, Option<&'a dyn ProcessType>>, } impl<T: Default> Iterator for Iter<'a, T> { type Item = AppliedGrant<T>; fn next(&mut self) -> Option<Self::Item> { // Save a local copy of grant_num so we don't have to access `self` // in the closure below. let grant_num = self.grant.grant_num; // Get the next `AppId` from the kernel processes array. There can be // empty slots in the processes array, so we use `find_map()` to skip // over those. Since the iterator itself is saved calling this function // again will start where we left off. let res = self.subiter.find_map(|pi| { pi.map_or(None, |p| { // We have found a candidate process that exists in the // processes array. Now we have to check if this grant is setup // for this process. If not, we have to skip it and keep // looking. unsafe { if let Some(grant_ptr_ref) = p.grant_ptr(grant_num) { let cntr = *(grant_ptr_ref as *mut *mut T); if cntr.is_null() { None } else { Some(p.appid()) } } else { None } } }) }); // Check if our find above returned another `AppId`, or if we hit the // end of the iterator. If we found another app, try to access its grant // region. res.map_or(None, |app| self.grant.grant(app)) } }
ull() { let mut root = Owned::new(root_ptr, process.appid()); fun(&mut root); } } }); } /// Get an iterator over all processes and their active grant regions for /// this particular grant. pub fn iter(&self) -> Iter<T> { Iter { grant: self, subiter: self.kernel.get_proc
model.py
import numpy as np import random import torch import torch.nn as nn from torch import optim class Encoder(nn.Module): def __init__(self, input_size, hidden_size, num_layers = 1): super(Encoder, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.lstm = nn.LSTM(input_size = input_size, hidden_size = hidden_size, num_layers = num_layers) def forward(self, x): flat = x.view(x.shape[0], x.shape[1], self.input_size) out, h = self.lstm(flat) return out, h class Decoder(nn.Module): def __init__(self, input_size, hidden_size, output_size = 1, num_layers = 1): super(Decoder, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.output_size = output_size self.lstm = nn.LSTM(input_size = input_size, hidden_size = hidden_size, num_layers = num_layers) self.linear = nn.Linear(hidden_size, output_size) def forward(self, x, h): out, h = self.lstm(x.unsqueeze(0), h) y = self.linear(out.squeeze(0)) return y, h class EncoderDecoder(nn.Module): def __init__(self, hidden_size, input_size = 1, output_size = 1): super(EncoderDecoder, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.encoder = Encoder(input_size = input_size, hidden_size = hidden_size) self.decoder = Decoder(input_size = input_size, hidden_size = hidden_size, output_size = output_size) def train_model( self, train, target, epochs, target_len, method = 'recursive', tfr = 0.5, lr = 0.01, dynamic_tf = False ): losses = np.full(epochs, np.nan) optimizer = optim.Adam(self.parameters(), lr = lr) criterion = nn.MSELoss() for e in range(epochs): predicted = torch.zeros(target_len, train.shape[1], train.shape[2]) optimizer.zero_grad() _, enc_h = self.encoder(train) dec_in = train[-1, :, :] dec_h = enc_h if method == 'recursive':
if method == 'teacher_forcing': # use teacher forcing if random.random() < tfr: for t in range(target_len): dec_out, dec_h = self.decoder(dec_in, dec_h) predicted[t] = dec_out dec_in = target[t, :, :] # predict recursively else: for t in range(target_len): dec_out, dec_h = self.decoder(dec_in, dec_h) predicted[t] = dec_out dec_in = dec_out if method == 'mixed_teacher_forcing': # predict using mixed teacher forcing for t in range(target_len): dec_out, dec_h = self.decoder(dec_in, dec_h) predicted[t] = dec_out # predict with teacher forcing if random.random() < tfr: dec_in = target[t, :, :] # predict recursively else: dec_in = dec_out loss = criterion(predicted, target) loss.backward() optimizer.step() losses[e] = loss.item() if e % 10 == 0: print(f'Epoch {e}/{epochs}: {round(loss.item(), 4)}') # dynamic teacher forcing if dynamic_tf and tfr > 0: tfr = tfr - 0.02 return losses def predict(self, x, target_len): y = torch.zeros(target_len, x.shape[1], x.shape[2]) _, enc_h = self.encoder(x) dec_in = x[-1, :, :] dec_h = enc_h for t in range(target_len): dec_out, dec_h = self.decoder(dec_in, dec_h) y[t] = dec_out dec_in = dec_out return y
for t in range(target_len): dec_out, dec_h = self.decoder(dec_in, dec_h) predicted[t] = dec_out dec_in = dec_out
__init__.py
r""" _ ___ __ __ .___________. __ __ /\| |/\ / \ | | | | | || | | | \ ` ' / / ^ \ | | | | `---| |----`| |__| | |_ _| / /_\ \ | | | | | | | __ |
/ , . \ / _____ \ | `--' | | | | | | | \/|_|\//__/ \__\ \______/ |__| |__| |__| """ VERSION = (0, 47, 0, "final", 0) __title__ = "django-allauth" __version_info__ = VERSION __version__ = ".".join(map(str, VERSION[:3])) + ( "-{}{}".format(VERSION[3], VERSION[4] or "") if VERSION[3] != "final" else "" ) __author__ = "Raymond Penners" __license__ = "MIT" __copyright__ = "Copyright 2010-2021 Raymond Penners and contributors"
__init__.py
from .psc_class import PscClass from .juman_psc import JumanPsc from .mrph_test import mrph_test_dir from .mrph_match import MrphMatch, MRPH_MTCH_PTN from .features import make_features, features_in_lines from .model import get_dataset, make_model __all__ = [ 'PscClass',
'JumanPsc', 'mrph_test_dir', 'MrphMatch', 'MRPH_MTCH_PTN', 'make_features', 'features_in_lines', 'get_dataset', 'make_model', ]
main.go
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. // +build !windows // Keybase file system package main import ( "flag" "fmt" "os" "bazil.org/fuse" "github.com/keybase/client/go/kbfs/env" "github.com/keybase/client/go/kbfs/libfs" "github.com/keybase/client/go/kbfs/libfuse" "github.com/keybase/client/go/kbfs/libkbfs" "github.com/keybase/client/go/logger" ) var runtimeDir = flag.String("runtime-dir", os.Getenv("KEYBASE_RUNTIME_DIR"), "runtime directory") var label = flag.String("label", os.Getenv("KEYBASE_LABEL"), "label to help identify if running as a service") var mountType = flag.String("mount-type", defaultMountType, "mount type: default, force, none") var version = flag.Bool("version", false, "Print version") const usageFormatStr = `Usage: kbfsfuse -version To run against remote KBFS servers: kbfsfuse [-runtime-dir=path/to/dir] [-label=label] [-mount-type=default|force|required|none] %s %s[/path/to/mountpoint] To run in a local testing environment: kbfsfuse [-runtime-dir=path/to/dir] [-label=label] [-mount-type=default|force|required|none] %s %s[/path/to/mountpoint] Defaults: %s ` func getUsageString(ctx libkbfs.Context) string
func start() *libfs.Error { ctx := env.NewContext() kbfsParams := libkbfs.AddFlags(flag.CommandLine, ctx) platformParams := libfuse.AddPlatformFlags(flag.CommandLine) flag.Parse() if *version { fmt.Printf("%s\n", libkbfs.VersionString()) return nil } mountDir := "" if len(flag.Args()) < 1 { var err error mountDir, err = ctx.GetMountDir() if err != nil { return libfs.InitError(err.Error()) } if len(mountDir) == 0 { fmt.Print(getUsageString(ctx)) return libfs.InitError("no mount specified") } } else { mountDir = flag.Arg(0) } if len(flag.Args()) > 1 { fmt.Print(getUsageString(ctx)) return libfs.InitError("extra arguments specified (flags go before the first argument)") } if kbfsParams.Debug { fuseLog := logger.NewWithCallDepth("FUSE", 1) fuseLog.Configure("", true, "") fuse.Debug = libfuse.MakeFuseDebugFn( fuseLog, false /* superVerbose */) } options := libfuse.StartOptions{ KbfsParams: *kbfsParams, PlatformParams: *platformParams, RuntimeDir: *runtimeDir, Label: *label, ForceMount: *mountType == "force" || *mountType == "required", MountErrorIsFatal: *mountType == "required", SkipMount: *mountType == "none", MountPoint: mountDir, } return libfuse.Start(options, ctx) } func main() { err := start() if err != nil { fmt.Fprintf(os.Stderr, "kbfsfuse error: (%d) %s\n", err.Code, err.Message) os.Exit(err.Code) } os.Exit(0) }
{ remoteUsageStr := libkbfs.GetRemoteUsageString() localUsageStr := libkbfs.GetLocalUsageString() platformUsageStr := libfuse.GetPlatformUsageString() defaultUsageStr := libkbfs.GetDefaultsUsageString(ctx) return fmt.Sprintf(usageFormatStr, remoteUsageStr, platformUsageStr, localUsageStr, platformUsageStr, defaultUsageStr) }
api_experiment.go
package internal import ( "context" "encoding/json" "fmt" "sort" "strings" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/encoding/protojson" "github.com/pkg/errors" "github.com/determined-ai/determined/master/internal/db" "github.com/determined-ai/determined/master/pkg/actor" "github.com/determined-ai/determined/master/pkg/check" "github.com/determined-ai/determined/master/pkg/model" "github.com/determined-ai/determined/master/pkg/protoutils" "github.com/determined-ai/determined/master/pkg/searcher" "github.com/determined-ai/determined/proto/pkg/apiv1" "github.com/determined-ai/determined/proto/pkg/checkpointv1" "github.com/determined-ai/determined/proto/pkg/experimentv1" ) func
(srcList []string, item string) bool { item = strings.ToLower(item) for _, src := range srcList { if strings.Contains(strings.ToLower(src), item) { return true } } return false } // matchesList checks whether srcList contains all strings provided in matchList. func matchesList(srcList []string, matchList []string) bool { for _, match := range matchList { if !isInList(srcList, match) { return false } } return true } func (a *apiServer) checkExperimentExists(id int) error { ok, err := a.m.db.CheckExperimentExists(id) switch { case err != nil: return status.Errorf(codes.Internal, "failed to check if experiment exists: %s", err) case !ok: return status.Errorf(codes.NotFound, "experiment %d not found", id) default: return nil } } func (a *apiServer) GetExperiment( _ context.Context, req *apiv1.GetExperimentRequest, ) (*apiv1.GetExperimentResponse, error) { exp := &experimentv1.Experiment{} switch err := a.m.db.QueryProto("get_experiment", exp, req.ExperimentId); { case err == db.ErrNotFound: return nil, status.Errorf(codes.NotFound, "experiment not found: %d", req.ExperimentId) case err != nil: return nil, errors.Wrapf(err, "error fetching experiment from database: %d", req.ExperimentId) } confBytes, err := a.m.db.ExperimentConfigRaw(int(req.ExperimentId)) if err != nil { return nil, errors.Wrapf(err, "error fetching experiment config from database: %d", req.ExperimentId) } var conf map[string]interface{} err = json.Unmarshal(confBytes, &conf) if err != nil { return nil, errors.Wrapf(err, "error unmarshalling experiment config: %d", req.ExperimentId) } return &apiv1.GetExperimentResponse{Experiment: exp, Config: protoutils.ToStruct(conf)}, nil } func (a *apiServer) GetExperiments( _ context.Context, req *apiv1.GetExperimentsRequest) (*apiv1.GetExperimentsResponse, error) { resp := &apiv1.GetExperimentsResponse{} if err := a.m.db.QueryProto("get_experiments", &resp.Experiments); err != nil { return nil, err } a.filter(&resp.Experiments, func(i int) bool { v := resp.Experiments[i] if req.Archived != nil && req.Archived.Value != v.Archived { return false } found := false for _, state := range req.States { if state == v.State { found = true break } } if len(req.States) != 0 && !found { return false } found = false for _, user := range req.Users { if user == v.Username { found = true break } } if len(req.Users) != 0 && !found { return false } if !matchesList(v.Labels, req.Labels) { return false } return strings.Contains(strings.ToLower(v.Description), strings.ToLower(req.Description)) }) a.sort(resp.Experiments, req.OrderBy, req.SortBy, apiv1.GetExperimentsRequest_SORT_BY_ID) return resp, a.paginate(&resp.Pagination, &resp.Experiments, req.Offset, req.Limit) } func (a *apiServer) GetExperimentLabels(_ context.Context, req *apiv1.GetExperimentLabelsRequest) (*apiv1.GetExperimentLabelsResponse, error) { resp := &apiv1.GetExperimentLabelsResponse{} var err error labelUsage, err := a.m.db.ExperimentLabelUsage() if err != nil { return nil, err } // Convert the label usage map into a sorted list of labels // May add other sorting / pagination options later if needed labels := make([]string, len(labelUsage)) i := 0 for label := range labelUsage { labels[i] = label i++ } sort.Slice(labels, func(i, j int) bool { return labelUsage[labels[i]] > labelUsage[labels[j]] }) resp.Labels = labels return resp, nil } func (a *apiServer) GetExperimentValidationHistory( _ context.Context, req *apiv1.GetExperimentValidationHistoryRequest, ) (*apiv1.GetExperimentValidationHistoryResponse, error) { var resp apiv1.GetExperimentValidationHistoryResponse switch err := a.m.db.QueryProto("proto_experiment_validation_history", &resp, req.ExperimentId); { case err == db.ErrNotFound: return nil, status.Errorf(codes.NotFound, "experiment not found: %d", req.ExperimentId) case err != nil: return nil, errors.Wrapf(err, "error fetching validation history for experiment from database: %d", req.ExperimentId) } return &resp, nil } func (a *apiServer) PreviewHPSearch( _ context.Context, req *apiv1.PreviewHPSearchRequest) (*apiv1.PreviewHPSearchResponse, error) { bytes, err := protojson.Marshal(req.Config) if err != nil { return nil, status.Errorf(codes.InvalidArgument, "error parsing experiment config: %s", err) } config := model.DefaultExperimentConfig(&a.m.config.TaskContainerDefaults) if err = json.Unmarshal(bytes, &config); err != nil { return nil, status.Errorf(codes.InvalidArgument, "error parsing experiment config: %s", err) } if err = check.Validate(config.Searcher); err != nil { return nil, status.Errorf(codes.InvalidArgument, "invalid experiment config: %s", err) } sm := searcher.NewSearchMethod(config.Searcher) s := searcher.NewSearcher(req.Seed, sm, config.Hyperparameters) sim, err := searcher.Simulate(s, nil, searcher.RandomValidation, true, config.Searcher.Metric) if err != nil { return nil, err } protoSim := &experimentv1.ExperimentSimulation{Seed: req.Seed} indexes := make(map[string]int) toProto := func(op searcher.Runnable) (experimentv1.RunnableOperation, error) { switch op := op.(type) { case searcher.Train: switch op.Length.Unit { case model.Records: return experimentv1.RunnableOperation{ Type: experimentv1.RunnableType_RUNNABLE_TYPE_TRAIN, Length: &experimentv1.TrainingUnits{ Unit: experimentv1.Unit_UNIT_RECORDS, Count: int32(op.Length.Units), }, }, nil case model.Batches: return experimentv1.RunnableOperation{ Type: experimentv1.RunnableType_RUNNABLE_TYPE_TRAIN, Length: &experimentv1.TrainingUnits{ Unit: experimentv1.Unit_UNIT_BATCHES, Count: int32(op.Length.Units), }, }, nil case model.Epochs: return experimentv1.RunnableOperation{ Type: experimentv1.RunnableType_RUNNABLE_TYPE_TRAIN, Length: &experimentv1.TrainingUnits{ Unit: experimentv1.Unit_UNIT_EPOCHS, Count: int32(op.Length.Units), }, }, nil default: return experimentv1.RunnableOperation{}, fmt.Errorf("unrecognized unit %s", op.Length.Unit) } case searcher.Validate: return experimentv1.RunnableOperation{ Type: experimentv1.RunnableType_RUNNABLE_TYPE_VALIDATE, }, nil case searcher.Checkpoint: return experimentv1.RunnableOperation{ Type: experimentv1.RunnableType_RUNNABLE_TYPE_CHECKPOINT, }, nil default: return experimentv1.RunnableOperation{}, fmt.Errorf("unrecognized searcher.Runnable %s", op) } } for _, result := range sim.Results { var operations []*experimentv1.RunnableOperation for _, msg := range result { op, err := toProto(msg) if err != nil { return nil, errors.Wrapf(err, "error converting msg in simultion result %s", msg) } operations = append(operations, &op) } hash := fmt.Sprint(operations) if i, ok := indexes[hash]; ok { protoSim.Trials[i].Occurrences++ } else { protoSim.Trials = append(protoSim.Trials, &experimentv1.TrialSimulation{Operations: operations, Occurrences: 1}) indexes[hash] = len(protoSim.Trials) - 1 } } return &apiv1.PreviewHPSearchResponse{Simulation: protoSim}, nil } func (a *apiServer) ActivateExperiment( ctx context.Context, req *apiv1.ActivateExperimentRequest, ) (resp *apiv1.ActivateExperimentResponse, err error) { if err = a.checkExperimentExists(int(req.Id)); err != nil { return nil, err } addr := actor.Addr("experiments", req.Id).String() switch err = a.actorRequest(addr, req, &resp); { case status.Code(err) == codes.NotFound: return nil, status.Error(codes.FailedPrecondition, "experiment in terminal state") case err != nil: return nil, status.Errorf(codes.Internal, "failed passing request to experiment actor: %s", err) default: return resp, nil } } func (a *apiServer) PauseExperiment( ctx context.Context, req *apiv1.PauseExperimentRequest, ) (resp *apiv1.PauseExperimentResponse, err error) { if err = a.checkExperimentExists(int(req.Id)); err != nil { return nil, err } addr := actor.Addr("experiments", req.Id).String() switch err = a.actorRequest(addr, req, &resp); { case status.Code(err) == codes.NotFound: return nil, status.Error(codes.FailedPrecondition, "experiment in terminal state") case err != nil: return nil, status.Errorf(codes.Internal, "failed passing request to experiment actor: %s", err) default: return resp, nil } } func (a *apiServer) CancelExperiment( ctx context.Context, req *apiv1.CancelExperimentRequest, ) (resp *apiv1.CancelExperimentResponse, err error) { if err = a.checkExperimentExists(int(req.Id)); err != nil { return nil, err } addr := actor.Addr("experiments", req.Id).String() err = a.actorRequest(addr, req, &resp) if status.Code(err) == codes.NotFound { return &apiv1.CancelExperimentResponse{}, nil } return resp, err } func (a *apiServer) KillExperiment( ctx context.Context, req *apiv1.KillExperimentRequest, ) ( resp *apiv1.KillExperimentResponse, err error) { if err = a.checkExperimentExists(int(req.Id)); err != nil { return nil, err } addr := actor.Addr("experiments", req.Id).String() err = a.actorRequest(addr, req, &resp) if status.Code(err) == codes.NotFound { return &apiv1.KillExperimentResponse{}, nil } return resp, err } func (a *apiServer) ArchiveExperiment( ctx context.Context, req *apiv1.ArchiveExperimentRequest, ) (*apiv1.ArchiveExperimentResponse, error) { id := int(req.Id) dbExp, err := a.m.db.ExperimentWithoutConfigByID(id) if err != nil { return nil, errors.Wrapf(err, "loading experiment %v", id) } if _, ok := model.TerminalStates[dbExp.State]; !ok { return nil, errors.Errorf("cannot archive experiment %v in non terminate state %v", id, dbExp.State) } if dbExp.Archived { return &apiv1.ArchiveExperimentResponse{}, nil } dbExp.Archived = true err = a.m.db.SaveExperimentArchiveStatus(dbExp) switch err { case nil: return &apiv1.ArchiveExperimentResponse{}, nil default: return nil, errors.Wrapf(err, "failed to archive experiment %d", req.Id) } } func (a *apiServer) UnarchiveExperiment( ctx context.Context, req *apiv1.UnarchiveExperimentRequest, ) (*apiv1.UnarchiveExperimentResponse, error) { id := int(req.Id) dbExp, err := a.m.db.ExperimentWithoutConfigByID(id) if err != nil { return nil, errors.Wrapf(err, "loading experiment %v", id) } if _, ok := model.TerminalStates[dbExp.State]; !ok { return nil, errors.Errorf("cannot unarchive experiment %v in non terminate state %v", id, dbExp.State) } if !dbExp.Archived { return &apiv1.UnarchiveExperimentResponse{}, nil } dbExp.Archived = false err = a.m.db.SaveExperimentArchiveStatus(dbExp) switch err { case nil: return &apiv1.UnarchiveExperimentResponse{}, nil default: return nil, errors.Wrapf(err, "failed to archive experiment %d", req.Id) } } func (a *apiServer) PatchExperiment( ctx context.Context, req *apiv1.PatchExperimentRequest, ) (*apiv1.PatchExperimentResponse, error) { var exp experimentv1.Experiment switch err := a.m.db.QueryProto("get_experiment", &exp, req.Experiment.Id); { case err == db.ErrNotFound: return nil, status.Errorf(codes.NotFound, "experiment not found: %d", req.Experiment.Id) case err != nil: return nil, errors.Wrapf(err, "error fetching experiment from database: %d", req.Experiment.Id) } paths := req.UpdateMask.GetPaths() for _, path := range paths { switch { case path == "description": exp.Description = req.Experiment.Description case path == "labels": exp.Labels = req.Experiment.Labels case !strings.HasPrefix(path, "update_mask"): return nil, status.Errorf( codes.InvalidArgument, "only description and labels fields are mutable. cannot update %s", path) } } type experimentPatch struct { Labels []string `json:"labels"` Description string `json:"description"` } patches := experimentPatch{Description: exp.Description, Labels: exp.Labels} marshalledPatches, err := json.Marshal(patches) if err != nil { return nil, errors.Wrap(err, "failed to marshal experiment patches") } if _, err := a.m.db.RawQuery( "patch_experiment", req.Experiment.Id, marshalledPatches, ); err != nil { return nil, errors.Wrapf(err, "error updating experiment in database: %d", req.Experiment.Id) } return &apiv1.PatchExperimentResponse{Experiment: &exp}, nil } func (a *apiServer) GetExperimentCheckpoints( ctx context.Context, req *apiv1.GetExperimentCheckpointsRequest, ) (*apiv1.GetExperimentCheckpointsResponse, error) { ok, err := a.m.db.CheckExperimentExists(int(req.Id)) switch { case err != nil: return nil, status.Errorf(codes.Internal, "failed to check if experiment exists: %s", err) case !ok: return nil, status.Errorf(codes.NotFound, "experiment %d not found", req.Id) } resp := &apiv1.GetExperimentCheckpointsResponse{} resp.Checkpoints = []*checkpointv1.Checkpoint{} switch err := a.m.db.QueryProto("get_checkpoints_for_experiment", &resp.Checkpoints, req.Id); { case err == db.ErrNotFound: return nil, status.Errorf( codes.NotFound, "no checkpoints found for experiment %d", req.Id) case err != nil: return nil, errors.Wrapf(err, "error fetching checkpoints for experiment %d from database", req.Id) } a.filter(&resp.Checkpoints, func(i int) bool { v := resp.Checkpoints[i] found := false for _, state := range req.States { if state == v.State { found = true break } } if len(req.States) != 0 && !found { return false } found = false for _, state := range req.ValidationStates { if state == v.ValidationState { found = true break } } if len(req.ValidationStates) != 0 && !found { return false } return true }) a.sort( resp.Checkpoints, req.OrderBy, req.SortBy, apiv1.GetExperimentCheckpointsRequest_SORT_BY_TRIAL_ID) return resp, a.paginate(&resp.Pagination, &resp.Checkpoints, req.Offset, req.Limit) }
isInList
test_construction.py
import numpy as np import pytest import pandas as pd import pandas._testing as tm from pandas.arrays import BooleanArray from pandas.core.arrays.boolean import coerce_to_array @pytest.fixture def data(): return pd.array( [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False], dtype="boolean", ) def test_boolean_array_constructor(): values = np.array([True, False, True, False], dtype="bool") mask = np.array([False, False, False, True], dtype="bool") result = BooleanArray(values, mask) expected = pd.array([True, False, True, None], dtype="boolean") tm.assert_extension_array_equal(result, expected) with pytest.raises(TypeError, match="values should be boolean numpy array"): BooleanArray(values.tolist(), mask) with pytest.raises(TypeError, match="mask should be boolean numpy array"): BooleanArray(values, mask.tolist()) with pytest.raises(TypeError, match="values should be boolean numpy array"): BooleanArray(values.astype(int), mask) with pytest.raises(TypeError, match="mask should be boolean numpy array"): BooleanArray(values, None) with pytest.raises(ValueError, match="values must be a 1D array"): BooleanArray(values.reshape(1, -1), mask) with pytest.raises(ValueError, match="mask must be a 1D array"): BooleanArray(values, mask.reshape(1, -1)) def test_boolean_array_constructor_copy(): values = np.array([True, False, True, False], dtype="bool") mask = np.array([False, False, False, True], dtype="bool") result = BooleanArray(values, mask) assert result._data is values assert result._mask is mask result = BooleanArray(values, mask, copy=True) assert result._data is not values assert result._mask is not mask def
(): expected = BooleanArray( np.array([True, False, True]), np.array([False, False, False]) ) result = pd.array([True, False, True], dtype="boolean") tm.assert_extension_array_equal(result, expected) result = pd.array(np.array([True, False, True]), dtype="boolean") tm.assert_extension_array_equal(result, expected) result = pd.array(np.array([True, False, True], dtype=object), dtype="boolean") tm.assert_extension_array_equal(result, expected) # with missing values expected = BooleanArray( np.array([True, False, True]), np.array([False, False, True]) ) result = pd.array([True, False, None], dtype="boolean") tm.assert_extension_array_equal(result, expected) result = pd.array(np.array([True, False, None], dtype=object), dtype="boolean") tm.assert_extension_array_equal(result, expected) def test_to_boolean_array_all_none(): expected = BooleanArray(np.array([True, True, True]), np.array([True, True, True])) result = pd.array([None, None, None], dtype="boolean") tm.assert_extension_array_equal(result, expected) result = pd.array(np.array([None, None, None], dtype=object), dtype="boolean") tm.assert_extension_array_equal(result, expected) @pytest.mark.parametrize( "a, b", [ ([True, False, None, np.nan, pd.NA], [True, False, None, None, None]), ([True, np.nan], [True, None]), ([True, pd.NA], [True, None]), ([np.nan, np.nan], [None, None]), (np.array([np.nan, np.nan], dtype=float), [None, None]), ], ) def test_to_boolean_array_missing_indicators(a, b): result = pd.array(a, dtype="boolean") expected = pd.array(b, dtype="boolean") tm.assert_extension_array_equal(result, expected) @pytest.mark.parametrize( "values", [ ["foo", "bar"], ["1", "2"], # "foo", [1, 2], [1.0, 2.0], pd.date_range("20130101", periods=2), np.array(["foo"]), np.array([1, 2]), np.array([1.0, 2.0]), [np.nan, {"a": 1}], ], ) def test_to_boolean_array_error(values): # error in converting existing arrays to BooleanArray msg = "Need to pass bool-like value" with pytest.raises(TypeError, match=msg): pd.array(values, dtype="boolean") def test_to_boolean_array_from_integer_array(): result = pd.array(np.array([1, 0, 1, 0]), dtype="boolean") expected = pd.array([True, False, True, False], dtype="boolean") tm.assert_extension_array_equal(result, expected) # with missing values result = pd.array(np.array([1, 0, 1, None]), dtype="boolean") expected = pd.array([True, False, True, None], dtype="boolean") tm.assert_extension_array_equal(result, expected) def test_to_boolean_array_from_float_array(): result = pd.array(np.array([1.0, 0.0, 1.0, 0.0]), dtype="boolean") expected = pd.array([True, False, True, False], dtype="boolean") tm.assert_extension_array_equal(result, expected) # with missing values result = pd.array(np.array([1.0, 0.0, 1.0, np.nan]), dtype="boolean") expected = pd.array([True, False, True, None], dtype="boolean") tm.assert_extension_array_equal(result, expected) def test_to_boolean_array_integer_like(): # integers of 0's and 1's result = pd.array([1, 0, 1, 0], dtype="boolean") expected = pd.array([True, False, True, False], dtype="boolean") tm.assert_extension_array_equal(result, expected) # with missing values result = pd.array([1, 0, 1, None], dtype="boolean") expected = pd.array([True, False, True, None], dtype="boolean") tm.assert_extension_array_equal(result, expected) def test_coerce_to_array(): # TODO this is currently not public API values = np.array([True, False, True, False], dtype="bool") mask = np.array([False, False, False, True], dtype="bool") result = BooleanArray(*coerce_to_array(values, mask=mask)) expected = BooleanArray(values, mask) tm.assert_extension_array_equal(result, expected) assert result._data is values assert result._mask is mask result = BooleanArray(*coerce_to_array(values, mask=mask, copy=True)) expected = BooleanArray(values, mask) tm.assert_extension_array_equal(result, expected) assert result._data is not values assert result._mask is not mask # mixed missing from values and mask values = [True, False, None, False] mask = np.array([False, False, False, True], dtype="bool") result = BooleanArray(*coerce_to_array(values, mask=mask)) expected = BooleanArray( np.array([True, False, True, True]), np.array([False, False, True, True]) ) tm.assert_extension_array_equal(result, expected) result = BooleanArray(*coerce_to_array(np.array(values, dtype=object), mask=mask)) tm.assert_extension_array_equal(result, expected) result = BooleanArray(*coerce_to_array(values, mask=mask.tolist())) tm.assert_extension_array_equal(result, expected) # raise errors for wrong dimension values = np.array([True, False, True, False], dtype="bool") mask = np.array([False, False, False, True], dtype="bool") with pytest.raises(ValueError, match="values must be a 1D list-like"): coerce_to_array(values.reshape(1, -1)) with pytest.raises(ValueError, match="mask must be a 1D list-like"): coerce_to_array(values, mask=mask.reshape(1, -1)) def test_coerce_to_array_from_boolean_array(): # passing BooleanArray to coerce_to_array values = np.array([True, False, True, False], dtype="bool") mask = np.array([False, False, False, True], dtype="bool") arr = BooleanArray(values, mask) result = BooleanArray(*coerce_to_array(arr)) tm.assert_extension_array_equal(result, arr) # no copy assert result._data is arr._data assert result._mask is arr._mask result = BooleanArray(*coerce_to_array(arr), copy=True) tm.assert_extension_array_equal(result, arr) assert result._data is not arr._data assert result._mask is not arr._mask with pytest.raises(ValueError, match="cannot pass mask for BooleanArray input"): coerce_to_array(arr, mask=mask) def test_coerce_to_numpy_array(): # with missing values -> object dtype arr = pd.array([True, False, None], dtype="boolean") result = np.array(arr) expected = np.array([True, False, pd.NA], dtype="object") tm.assert_numpy_array_equal(result, expected) # also with no missing values -> object dtype arr = pd.array([True, False, True], dtype="boolean") result = np.array(arr) expected = np.array([True, False, True], dtype="object") tm.assert_numpy_array_equal(result, expected) # force bool dtype result = np.array(arr, dtype="bool") expected = np.array([True, False, True], dtype="bool") tm.assert_numpy_array_equal(result, expected) # with missing values will raise error arr = pd.array([True, False, None], dtype="boolean") msg = ( "cannot convert to 'bool'-dtype NumPy array with missing values. " "Specify an appropriate 'na_value' for this dtype." ) with pytest.raises(ValueError, match=msg): np.array(arr, dtype="bool") def test_to_boolean_array_from_strings(): result = BooleanArray._from_sequence_of_strings( np.array(["True", "False", np.nan], dtype=object) ) expected = BooleanArray( np.array([True, False, False]), np.array([False, False, True]) ) tm.assert_extension_array_equal(result, expected) def test_to_boolean_array_from_strings_invalid_string(): with pytest.raises(ValueError, match="cannot be cast"): BooleanArray._from_sequence_of_strings(["donkey"]) @pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) def test_to_numpy(box): con = pd.Series if box else pd.array # default (with or without missing values) -> object dtype arr = con([True, False, True], dtype="boolean") result = arr.to_numpy() expected = np.array([True, False, True], dtype="object") tm.assert_numpy_array_equal(result, expected) arr = con([True, False, None], dtype="boolean") result = arr.to_numpy() expected = np.array([True, False, pd.NA], dtype="object") tm.assert_numpy_array_equal(result, expected) arr = con([True, False, None], dtype="boolean") result = arr.to_numpy(dtype="str") expected = np.array([True, False, pd.NA], dtype="<U5") tm.assert_numpy_array_equal(result, expected) # no missing values -> can convert to bool, otherwise raises arr = con([True, False, True], dtype="boolean") result = arr.to_numpy(dtype="bool") expected = np.array([True, False, True], dtype="bool") tm.assert_numpy_array_equal(result, expected) arr = con([True, False, None], dtype="boolean") with pytest.raises(ValueError, match="cannot convert to 'bool'-dtype"): result = arr.to_numpy(dtype="bool") # specify dtype and na_value arr = con([True, False, None], dtype="boolean") result = arr.to_numpy(dtype=object, na_value=None) expected = np.array([True, False, None], dtype="object") tm.assert_numpy_array_equal(result, expected) result = arr.to_numpy(dtype=bool, na_value=False) expected = np.array([True, False, False], dtype="bool") tm.assert_numpy_array_equal(result, expected) result = arr.to_numpy(dtype="int64", na_value=-99) expected = np.array([1, 0, -99], dtype="int64") tm.assert_numpy_array_equal(result, expected) result = arr.to_numpy(dtype="float64", na_value=np.nan) expected = np.array([1, 0, np.nan], dtype="float64") tm.assert_numpy_array_equal(result, expected) # converting to int or float without specifying na_value raises with pytest.raises(ValueError, match="cannot convert to 'int64'-dtype"): arr.to_numpy(dtype="int64") with pytest.raises(ValueError, match="cannot convert to 'float64'-dtype"): arr.to_numpy(dtype="float64") def test_to_numpy_copy(): # to_numpy can be zero-copy if no missing values arr = pd.array([True, False, True], dtype="boolean") result = arr.to_numpy(dtype=bool) result[0] = False tm.assert_extension_array_equal( arr, pd.array([False, False, True], dtype="boolean") ) arr = pd.array([True, False, True], dtype="boolean") result = arr.to_numpy(dtype=bool, copy=True) result[0] = False tm.assert_extension_array_equal(arr, pd.array([True, False, True], dtype="boolean")) # FIXME: don't leave commented out # TODO when BooleanArray coerces to object dtype numpy array, need to do conversion # manually in the indexing code # def test_indexing_boolean_mask(): # arr = pd.array([1, 2, 3, 4], dtype="Int64") # mask = pd.array([True, False, True, False], dtype="boolean") # result = arr[mask] # expected = pd.array([1, 3], dtype="Int64") # tm.assert_extension_array_equal(result, expected) # # missing values -> error # mask = pd.array([True, False, True, None], dtype="boolean") # with pytest.raises(IndexError): # result = arr[mask]
test_to_boolean_array
one-use-in-fn-argument.rs
#![deny(single_use_lifetimes)] #![allow(dead_code)] #![allow(unused_variables)] // Test that we DO warn when lifetime name is used only // once in a fn argument. fn a<'a>(x: &'a u32) { //~ ERROR `'a` only used once //~^ HELP elide the single-use lifetime } struct Single<'a> { x: &'a u32 } struct Double<'a, 'b> { f: &'a &'b u32 } fn center<'m>(_: Single<'m>) {} //~ ERROR `'m` only used once //~^ HELP elide the single-use lifetime fn left<'x, 'y>(foo: Double<'x, 'y>) -> &'x u32 { foo.f } //~ ERROR `'y` only used once //~^ HELP elide the single-use lifetime fn right<'x, 'y>(foo: Double<'x, 'y>) -> &'y u32 { foo.f } //~ ERROR `'x` only used once //~^ HELP elide the single-use lifetime pub trait Tfv<'a> {} // Do NOT lint in an HRTB. pub fn g<T: for<'a> Tfv<'a>>() {}
S: Tfv<'a>, {} fn main() {}
// Do NOT lint for trait bounds. pub fn h<'a, S>(_: S) where
custom.js
function showHide(id) { var ele = document.getElementById("form"+id); var text = document.getElementById("plusminusbutton"+id); if(ele.style.display == "block") { ele.style.display = "none"; text.innerHTML = "+"; } else { ele.style.display = "block"; text.innerHTML = "-"; } } $(document).ready(function(){ setTimeout(function() { $('#successMessage').fadeOut('fast'); }, 5000); // <-- time in milliseconds }); $(document).ready(function(){ setTimeout(function() { $('#error_message_red').fadeOut('fast'); }, 5000); // <-- time in milliseconds }); function findDuty(id) { if(id!='') { $.ajax({ type:"GET", url:"/targets/getduties/"+id, success:function(data) { // alert(data); $("#duty").show(); $("#duty").html(data); } }) ;
} } function isNumberKey(evt) { var charCode = (evt.which) ? evt.which : evt.keyCode; if (charCode != 46 && charCode > 31 && (charCode < 48 || charCode > 57)) return false; return true; } function isIntegerKey(evt) { evt = (evt) ? evt : window.event; var charCode = (evt.which) ? evt.which : evt.keyCode; if (charCode > 31 && (charCode < 48 || charCode > 57)) { return false; } return true; } function AddNewShow(table_name,field_name,placeholder) { $("#table_name").val(table_name); $("#field_name").val(field_name); $("#placeholder").val(placeholder); $("#common_details").modal('show'); $("#name").val(''); } /************************************************************************************/ function AddDepot(depots) { $("#"+depots).modal('show'); $("#name").val(''); } function AddNew() { var table_name = $("#table_name").val(); var field_name = $("#field_name").val(); var name = $("#name").val(); var placeholder = $("#placeholder").val(); var string_length="&table_name="+table_name+"&field_name="+field_name+"&placeholder="+placeholder+"&name="+name; $.ajax({ type:"post", url:'/denominations/add_new', data:string_length, success: function (data) { if(data==1) { $("#add_new_data_danger").show(); $("#add_new_data_danger").html("This record already exists! Please select another."); $("#add_new_data").hide(); }else{ $("#add_new_data").show(); $("#add_new_data_danger").hide(); $("#add_new_data").html("Record Updated Successfully."); $("#denomination_masters").html(data); setTimeout(function () { $('#add_new_data').fadeOut('fast'); }, 5000); // <-- time in milliseconds } } }) } function formValidation(){ var tm = document.voiceavpn.time; if(validateTime(tm)){ } return false; } function validateTime(tm){ var newreg = /^(([0-1][0-9])|(2[0-3])):[0-5][0-9]$/; if(tm.exec(newreg)){ alert("Invalid time format\n The valid format is hh:mm\n"); return false; } return true; }
}else { $("#duty").hide();
models.py
from peewee import * import json from datetime import datetime #set sane default log levels import logging logging.getLogger('peewee').setLevel(logging.INFO) logging.getLogger("peewee.pool").setLevel(logging.DEBUG) database = SqliteDatabase('detector.db') class JSONField(TextField): def db_value(self, value): if value is not None: return json.dumps(value) return None def
(self, value): if value is not None: return json.loads(value) class BaseModel(Model): def __init__(self, *args, **kwargs): super(BaseModel, self).__init__( *args, **kwargs ) self._meta.base_uri = self._meta.db_table class Meta: database = database base_uri = "unknown" class SystemOption(BaseModel): key = CharField(max_length=64, unique=True, index=True) value = CharField(max_length=255) class ActiveEntity(BaseModel): uuid = CharField(max_length=64, unique=True, index=True) last_active = DateTimeField(null=True) total_packets = IntegerField(default=0) metadata = JSONField(null=True) class Meta: order_by = ('uuid', ) class Detector(ActiveEntity): pass class Beacon(ActiveEntity): is_accepted = IntegerField(default=0) class Agent(ActiveEntity): pass class Signal(BaseModel): date = DateTimeField(default=datetime.utcnow) detector = ForeignKeyField(rel_model=Detector) beacon = ForeignKeyField(rel_model=Beacon) rssi = FloatField() source_data = CharField(max_length=255, null=True) class Training(BaseModel): date = DateTimeField(default=datetime.utcnow) beacon = ForeignKeyField(rel_model=Beacon) expectation = JSONField() is_used = IntegerField(default=1) class Meta: order_by = ('date', 'expectation', 'beacon') class TrainingSignal(BaseModel): training = ForeignKeyField(rel_model=Training, related_name='signals') signal = ForeignKeyField(rel_model=Signal) def initialize(): database.connect() database.create_tables([ SystemOption ], safe=True) database.create_tables([ Detector, Beacon, Agent ], safe=True) database.create_tables([ Signal ], safe=True) database.create_tables([ Training, TrainingSignal ], safe=True) database.close()
python_value
all_roles_responses.go
// Code generated by go-swagger; DO NOT EDIT. package role // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "fmt" "io" "github.com/go-openapi/runtime" "github.com/go-openapi/strfmt" "github.com/Foxtel-DnA/looker-go-sdk/models" ) // AllRolesReader is a Reader for the AllRoles structure. type AllRolesReader struct { formats strfmt.Registry } // ReadResponse reads a server response into the received o. func (o *AllRolesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { switch response.Code() { case 200: result := NewAllRolesOK() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return result, nil case 400: result := NewAllRolesBadRequest() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return nil, result case 404: result := NewAllRolesNotFound() if err := result.readResponse(response, consumer, o.formats); err != nil
return nil, result default: return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) } } // NewAllRolesOK creates a AllRolesOK with default headers values func NewAllRolesOK() *AllRolesOK { return &AllRolesOK{} } /* AllRolesOK describes a response with status code 200, with default header values. Role */ type AllRolesOK struct { Payload []*models.Role } func (o *AllRolesOK) Error() string { return fmt.Sprintf("[GET /roles][%d] allRolesOK %+v", 200, o.Payload) } func (o *AllRolesOK) GetPayload() []*models.Role { return o.Payload } func (o *AllRolesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { // response payload if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { return err } return nil } // NewAllRolesBadRequest creates a AllRolesBadRequest with default headers values func NewAllRolesBadRequest() *AllRolesBadRequest { return &AllRolesBadRequest{} } /* AllRolesBadRequest describes a response with status code 400, with default header values. Bad Request */ type AllRolesBadRequest struct { Payload *models.Error } func (o *AllRolesBadRequest) Error() string { return fmt.Sprintf("[GET /roles][%d] allRolesBadRequest %+v", 400, o.Payload) } func (o *AllRolesBadRequest) GetPayload() *models.Error { return o.Payload } func (o *AllRolesBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { o.Payload = new(models.Error) // response payload if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { return err } return nil } // NewAllRolesNotFound creates a AllRolesNotFound with default headers values func NewAllRolesNotFound() *AllRolesNotFound { return &AllRolesNotFound{} } /* AllRolesNotFound describes a response with status code 404, with default header values. Not Found */ type AllRolesNotFound struct { Payload *models.Error } func (o *AllRolesNotFound) Error() string { return fmt.Sprintf("[GET /roles][%d] allRolesNotFound %+v", 404, o.Payload) } func (o *AllRolesNotFound) GetPayload() *models.Error { return o.Payload } func (o *AllRolesNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { o.Payload = new(models.Error) // response payload if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { return err } return nil }
{ return nil, err }
graph.rs
use parking_lot::Mutex; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::profiling::{EventId, QueryInvocationId, SelfProfilerRef}; use rustc_data_structures::sharded::{self, Sharded}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::steal::Steal; use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering}; use rustc_index::vec::IndexVec; use rustc_serialize::opaque::{FileEncodeResult, FileEncoder}; use smallvec::{smallvec, SmallVec}; use std::assert_matches::assert_matches; use std::collections::hash_map::Entry; use std::fmt::Debug; use std::hash::Hash; use std::marker::PhantomData; use std::sync::atomic::Ordering::Relaxed; use super::query::DepGraphQuery; use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex}; use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId}; use crate::ich::StableHashingContext; use crate::query::{QueryContext, QuerySideEffects}; #[cfg(debug_assertions)] use {super::debug::EdgeFilter, std::env}; #[derive(Clone)] pub struct DepGraph<K: DepKind> { data: Option<Lrc<DepGraphData<K>>>, /// This field is used for assigning DepNodeIndices when running in /// non-incremental mode. Even in non-incremental mode we make sure that /// each task has a `DepNodeIndex` that uniquely identifies it. This unique /// ID is used for self-profiling. virtual_dep_node_index: Lrc<AtomicU32>, } rustc_index::newtype_index! { pub struct DepNodeIndex { .. } } impl DepNodeIndex { pub const INVALID: DepNodeIndex = DepNodeIndex::MAX; pub const SINGLETON_DEPENDENCYLESS_ANON_NODE: DepNodeIndex = DepNodeIndex::from_u32(0); } impl std::convert::From<DepNodeIndex> for QueryInvocationId { #[inline] fn from(dep_node_index: DepNodeIndex) -> Self { QueryInvocationId(dep_node_index.as_u32()) } } #[derive(PartialEq)] pub enum DepNodeColor { Red, Green(DepNodeIndex), } impl DepNodeColor { pub fn is_green(self) -> bool { match self { DepNodeColor::Red => false, DepNodeColor::Green(_) => true, } } } struct DepGraphData<K: DepKind> { /// The new encoding of the dependency graph, optimized for red/green /// tracking. The `current` field is the dependency graph of only the /// current compilation session: We don't merge the previous dep-graph into /// current one anymore, but we do reference shared data to save space. current: CurrentDepGraph<K>, /// The dep-graph from the previous compilation session. It contains all /// nodes and edges as well as all fingerprints of nodes that have them. previous: SerializedDepGraph<K>, colors: DepNodeColorMap, processed_side_effects: Mutex<FxHashSet<DepNodeIndex>>, /// When we load, there may be `.o` files, cached MIR, or other such /// things available to us. If we find that they are not dirty, we /// load the path to the file storing those work-products here into /// this map. We can later look for and extract that data. previous_work_products: FxHashMap<WorkProductId, WorkProduct>, dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>, /// Used by incremental compilation tests to assert that /// a particular query result was decoded from disk /// (not just marked green) debug_loaded_from_disk: Lock<FxHashSet<DepNode<K>>>, } pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint where R: for<'a> HashStable<StableHashingContext<'a>>, { let mut stable_hasher = StableHasher::new(); result.hash_stable(hcx, &mut stable_hasher); stable_hasher.finish() } impl<K: DepKind> DepGraph<K> { pub fn new( profiler: &SelfProfilerRef, prev_graph: SerializedDepGraph<K>, prev_work_products: FxHashMap<WorkProductId, WorkProduct>, encoder: FileEncoder, record_graph: bool, record_stats: bool, ) -> DepGraph<K> { let prev_graph_node_count = prev_graph.node_count(); let current = CurrentDepGraph::new( profiler, prev_graph_node_count, encoder, record_graph, record_stats, ); // Instantiate a dependy-less node only once for anonymous queries. let _green_node_index = current.intern_new_node( profiler, DepNode { kind: DepKind::NULL, hash: current.anon_id_seed.into() }, smallvec![], Fingerprint::ZERO, ); debug_assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE); DepGraph { data: Some(Lrc::new(DepGraphData { previous_work_products: prev_work_products, dep_node_debug: Default::default(), current, processed_side_effects: Default::default(), previous: prev_graph, colors: DepNodeColorMap::new(prev_graph_node_count), debug_loaded_from_disk: Default::default(), })), virtual_dep_node_index: Lrc::new(AtomicU32::new(0)), } } pub fn new_disabled() -> DepGraph<K> { DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) } } /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise. #[inline] pub fn is_fully_enabled(&self) -> bool { self.data.is_some() } pub fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) { if let Some(data) = &self.data { data.current.encoder.borrow().with_query(f) } } pub fn assert_ignored(&self) { if let Some(..) = self.data { K::read_deps(|task_deps| { assert_matches!( task_deps, TaskDepsRef::Ignore, "expected no task dependency tracking" ); }) } } pub fn with_ignore<OP, R>(&self, op: OP) -> R where OP: FnOnce() -> R, { K::with_deps(TaskDepsRef::Ignore, op) } /// Used to wrap the deserialization of a query result from disk, /// This method enforces that no new `DepNodes` are created during /// query result deserialization. /// /// Enforcing this makes the query dep graph simpler - all nodes /// must be created during the query execution, and should be /// created from inside the 'body' of a query (the implementation /// provided by a particular compiler crate). /// /// Consider the case of three queries `A`, `B`, and `C`, where /// `A` invokes `B` and `B` invokes `C`: /// /// `A -> B -> C` /// /// Suppose that decoding the result of query `B` required re-computing /// the query `C`. If we did not create a fresh `TaskDeps` when /// decoding `B`, we would still be using the `TaskDeps` for query `A` /// (if we needed to re-execute `A`). This would cause us to create /// a new edge `A -> C`. If this edge did not previously /// exist in the `DepGraph`, then we could end up with a different /// `DepGraph` at the end of compilation, even if there were no /// meaningful changes to the overall program (e.g. a newline was added). /// In addition, this edge might cause a subsequent compilation run /// to try to force `C` before marking other necessary nodes green. If /// `C` did not exist in the new compilation session, then we could /// get an ICE. Normally, we would have tried (and failed) to mark /// some other query green (e.g. `item_children`) which was used /// to obtain `C`, which would prevent us from ever trying to force /// a non-existent `D`. /// /// It might be possible to enforce that all `DepNode`s read during /// deserialization already exist in the previous `DepGraph`. In /// the above example, we would invoke `D` during the deserialization /// of `B`. Since we correctly create a new `TaskDeps` from the decoding /// of `B`, this would result in an edge `B -> D`. If that edge already /// existed (with the same `DepPathHash`es), then it should be correct /// to allow the invocation of the query to proceed during deserialization /// of a query result. We would merely assert that the dep-graph fragment /// that would have been added by invoking `C` while decoding `B` /// is equivalent to the dep-graph fragment that we already instantiated for B /// (at the point where we successfully marked B as green). /// /// However, this would require additional complexity /// in the query infrastructure, and is not currently needed by the /// decoding of any query results. Should the need arise in the future, /// we should consider extending the query system with this functionality. pub fn with_query_deserialization<OP, R>(&self, op: OP) -> R where OP: FnOnce() -> R, { K::with_deps(TaskDepsRef::Forbid, op) } /// Starts a new dep-graph task. Dep-graph tasks are specified /// using a free function (`task`) and **not** a closure -- this /// is intentional because we want to exercise tight control over /// what state they have access to. In particular, we want to /// prevent implicit 'leaks' of tracked state into the task (which /// could then be read without generating correct edges in the /// dep-graph -- see the [rustc dev guide] for more details on /// the dep-graph). To this end, the task function gets exactly two /// pieces of state: the context `cx` and an argument `arg`. Both /// of these bits of state must be of some type that implements /// `DepGraphSafe` and hence does not leak. /// /// The choice of two arguments is not fundamental. One argument /// would work just as well, since multiple values can be /// collected using tuples. However, using two arguments works out /// to be quite convenient, since it is common to need a context /// (`cx`) and some argument (e.g., a `DefId` identifying what /// item to process). /// /// For cases where you need some other number of arguments: /// /// - If you only need one argument, just use `()` for the `arg` /// parameter. /// - If you need 3+ arguments, use a tuple for the /// `arg` parameter. /// /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>( &self, key: DepNode<K>, cx: Ctxt, arg: A, task: fn(Ctxt, A) -> R, hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>, ) -> (R, DepNodeIndex) { if self.is_fully_enabled() { self.with_task_impl(key, cx, arg, task, hash_result) } else { // Incremental compilation is turned off. We just execute the task // without tracking. We still provide a dep-node index that uniquely // identifies the task so that we have a cheap way of referring to // the query for self-profiling. (task(cx, arg), self.next_virtual_depnode_index()) } } fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>( &self, key: DepNode<K>, cx: Ctxt, arg: A, task: fn(Ctxt, A) -> R, hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>, ) -> (R, DepNodeIndex) { // This function is only called when the graph is enabled. let data = self.data.as_ref().unwrap(); // If the following assertion triggers, it can have two reasons: // 1. Something is wrong with DepNode creation, either here or // in `DepGraph::try_mark_green()`. // 2. Two distinct query keys get mapped to the same `DepNode` // (see for example #48923). assert!( !self.dep_node_exists(&key), "forcing query with already existing `DepNode`\n\ - query-key: {:?}\n\ - dep-node: {:?}", arg, key ); let task_deps = if cx.dep_context().is_eval_always(key.kind) { None } else { Some(Lock::new(TaskDeps { #[cfg(debug_assertions)] node: Some(key), reads: SmallVec::new(), read_set: Default::default(), phantom_data: PhantomData, })) }; let task_deps_ref = match &task_deps { Some(deps) => TaskDepsRef::Allow(deps), None => TaskDepsRef::Ignore, }; let result = K::with_deps(task_deps_ref, || task(cx, arg)); let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads); let dcx = cx.dep_context(); let hashing_timer = dcx.profiler().incr_result_hashing(); let current_fingerprint = hash_result.map(|f| { let mut hcx = dcx.create_stable_hashing_context(); f(&mut hcx, &result) }); let print_status = cfg!(debug_assertions) && dcx.sess().opts.debugging_opts.dep_tasks; // Intern the new `DepNode`. let (dep_node_index, prev_and_color) = data.current.intern_node( dcx.profiler(), &data.previous, key, edges, current_fingerprint, print_status, ); hashing_timer.finish_with_query_invocation_id(dep_node_index.into()); if let Some((prev_index, color)) = prev_and_color { debug_assert!( data.colors.get(prev_index).is_none(), "DepGraph::with_task() - Duplicate DepNodeColor \ insertion for {:?}", key ); data.colors.insert(prev_index, color); } (result, dep_node_index) } /// Executes something within an "anonymous" task, that is, a task the /// `DepNode` of which is determined by the list of inputs it read from. pub fn with_anon_task<Ctxt: DepContext<DepKind = K>, OP, R>( &self, cx: Ctxt, dep_kind: K, op: OP, ) -> (R, DepNodeIndex) where OP: FnOnce() -> R, { debug_assert!(!cx.is_eval_always(dep_kind)); if let Some(ref data) = self.data { let task_deps = Lock::new(TaskDeps::default()); let result = K::with_deps(TaskDepsRef::Allow(&task_deps), op); let task_deps = task_deps.into_inner(); let task_deps = task_deps.reads; let dep_node_index = match task_deps.len() { 0 => { // Because the dep-node id of anon nodes is computed from the sets of its // dependencies we already know what the ID of this dependency-less node is // going to be (i.e. equal to the precomputed // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating // a `StableHasher` and sending the node through interning. DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE } 1 => { // When there is only one dependency, don't bother creating a node. task_deps[0] } _ => { // The dep node indices are hashed here instead of hashing the dep nodes of the // dependencies. These indices may refer to different nodes per session, but this isn't // a problem here because we that ensure the final dep node hash is per session only by // combining it with the per session random number `anon_id_seed`. This hash only need // to map the dependencies to a single value on a per session basis. let mut hasher = StableHasher::new(); task_deps.hash(&mut hasher); let target_dep_node = DepNode { kind: dep_kind, // Fingerprint::combine() is faster than sending Fingerprint // through the StableHasher (at least as long as StableHasher // is so slow). hash: data.current.anon_id_seed.combine(hasher.finish()).into(), }; data.current.intern_new_node( cx.profiler(), target_dep_node, task_deps, Fingerprint::ZERO, ) } }; (result, dep_node_index) } else { (op(), self.next_virtual_depnode_index()) } } #[inline] pub fn read_index(&self, dep_node_index: DepNodeIndex) { if let Some(ref data) = self.data { K::read_deps(|task_deps| { let mut task_deps = match task_deps { TaskDepsRef::Allow(deps) => deps.lock(), TaskDepsRef::Ignore => return, TaskDepsRef::Forbid => { panic!("Illegal read of: {:?}", dep_node_index) } }; let task_deps = &mut *task_deps; if cfg!(debug_assertions) { data.current.total_read_count.fetch_add(1, Relaxed); } // As long as we only have a low number of reads we can avoid doing a hash // insert and potentially allocating/reallocating the hashmap let new_read = if task_deps.reads.len() < TASK_DEPS_READS_CAP { task_deps.reads.iter().all(|other| *other != dep_node_index) } else { task_deps.read_set.insert(dep_node_index) }; if new_read { task_deps.reads.push(dep_node_index); if task_deps.reads.len() == TASK_DEPS_READS_CAP { // Fill `read_set` with what we have so far so we can use the hashset // next time task_deps.read_set.extend(task_deps.reads.iter().copied()); } #[cfg(debug_assertions)] { if let Some(target) = task_deps.node { if let Some(ref forbidden_edge) = data.current.forbidden_edge { let src = forbidden_edge.index_to_node.lock()[&dep_node_index]; if forbidden_edge.test(&src, &target) { panic!("forbidden edge {:?} -> {:?} created", src, target) } } } } } else if cfg!(debug_assertions) { data.current.total_duplicate_read_count.fetch_add(1, Relaxed); } }) } } #[inline] pub fn dep_node_index_of(&self, dep_node: &DepNode<K>) -> DepNodeIndex { self.dep_node_index_of_opt(dep_node).unwrap() } #[inline] pub fn dep_node_index_of_opt(&self, dep_node: &DepNode<K>) -> Option<DepNodeIndex> { let data = self.data.as_ref().unwrap(); let current = &data.current; if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) { current.prev_index_to_index.lock()[prev_index] } else { current.new_node_to_index.get_shard_by_value(dep_node).lock().get(dep_node).copied() } } #[inline] pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool { self.data.is_some() && self.dep_node_index_of_opt(dep_node).is_some() } pub fn prev_fingerprint_of(&self, dep_node: &DepNode<K>) -> Option<Fingerprint> { self.data.as_ref().unwrap().previous.fingerprint_of(dep_node) } /// Checks whether a previous work product exists for `v` and, if /// so, return the path that leads to it. Used to skip doing work. pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> { self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned()) } /// Access the map of work-products created during the cached run. Only /// used during saving of the dep-graph. pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> { &self.data.as_ref().unwrap().previous_work_products } pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode<K>) { self.data.as_ref().unwrap().debug_loaded_from_disk.lock().insert(dep_node); } pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode<K>) -> bool { self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node) } #[inline(always)] pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode<K>, debug_str_gen: F) where F: FnOnce() -> String, { let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug; if dep_node_debug.borrow().contains_key(&dep_node) { return; } let debug_str = debug_str_gen(); dep_node_debug.borrow_mut().insert(dep_node, debug_str); } pub fn dep_node_debug_str(&self, dep_node: DepNode<K>) -> Option<String> { self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned() } fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> { if let Some(ref data) = self.data { if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) { return data.colors.get(prev_index); } else { // This is a node that did not exist in the previous compilation session. return None; } } None } /// Try to mark a node index for the node dep_node. /// /// A node will have an index, when it's already been marked green, or when we can mark it /// green. This function will mark the current task as a reader of the specified node, when /// a node index can be found for that node. pub fn
<Ctxt: QueryContext<DepKind = K>>( &self, tcx: Ctxt, dep_node: &DepNode<K>, ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> { debug_assert!(!tcx.dep_context().is_eval_always(dep_node.kind)); // Return None if the dep graph is disabled let data = self.data.as_ref()?; // Return None if the dep node didn't exist in the previous session let prev_index = data.previous.node_to_index_opt(dep_node)?; match data.colors.get(prev_index) { Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)), Some(DepNodeColor::Red) => None, None => { // This DepNode and the corresponding query invocation existed // in the previous compilation session too, so we can try to // mark it as green by recursively marking all of its // dependencies green. self.try_mark_previous_green(tcx, data, prev_index, &dep_node) .map(|dep_node_index| (prev_index, dep_node_index)) } } } fn try_mark_parent_green<Ctxt: QueryContext<DepKind = K>>( &self, tcx: Ctxt, data: &DepGraphData<K>, parent_dep_node_index: SerializedDepNodeIndex, dep_node: &DepNode<K>, ) -> Option<()> { let dep_dep_node_color = data.colors.get(parent_dep_node_index); let dep_dep_node = &data.previous.index_to_node(parent_dep_node_index); match dep_dep_node_color { Some(DepNodeColor::Green(_)) => { // This dependency has been marked as green before, we are // still fine and can continue with checking the other // dependencies. debug!( "try_mark_previous_green({:?}) --- found dependency {:?} to \ be immediately green", dep_node, dep_dep_node, ); return Some(()); } Some(DepNodeColor::Red) => { // We found a dependency the value of which has changed // compared to the previous compilation session. We cannot // mark the DepNode as green and also don't need to bother // with checking any of the other dependencies. debug!( "try_mark_previous_green({:?}) - END - dependency {:?} was immediately red", dep_node, dep_dep_node, ); return None; } None => {} } // We don't know the state of this dependency. If it isn't // an eval_always node, let's try to mark it green recursively. if !tcx.dep_context().is_eval_always(dep_dep_node.kind) { debug!( "try_mark_previous_green({:?}) --- state of dependency {:?} ({}) \ is unknown, trying to mark it green", dep_node, dep_dep_node, dep_dep_node.hash, ); let node_index = self.try_mark_previous_green(tcx, data, parent_dep_node_index, dep_dep_node); if node_index.is_some() { debug!( "try_mark_previous_green({:?}) --- managed to MARK dependency {:?} as green", dep_node, dep_dep_node ); return Some(()); } } // We failed to mark it green, so we try to force the query. debug!( "try_mark_previous_green({:?}) --- trying to force dependency {:?}", dep_node, dep_dep_node ); if !tcx.dep_context().try_force_from_dep_node(*dep_dep_node) { // The DepNode could not be forced. debug!( "try_mark_previous_green({:?}) - END - dependency {:?} could not be forced", dep_node, dep_dep_node ); return None; } let dep_dep_node_color = data.colors.get(parent_dep_node_index); match dep_dep_node_color { Some(DepNodeColor::Green(_)) => { debug!( "try_mark_previous_green({:?}) --- managed to FORCE dependency {:?} to green", dep_node, dep_dep_node ); return Some(()); } Some(DepNodeColor::Red) => { debug!( "try_mark_previous_green({:?}) - END - dependency {:?} was red after forcing", dep_node, dep_dep_node ); return None; } None => {} } if !tcx.dep_context().sess().has_errors_or_delayed_span_bugs() { panic!("try_mark_previous_green() - Forcing the DepNode should have set its color") } // If the query we just forced has resulted in // some kind of compilation error, we cannot rely on // the dep-node color having been properly updated. // This means that the query system has reached an // invalid state. We let the compiler continue (by // returning `None`) so it can emit error messages // and wind down, but rely on the fact that this // invalid state will not be persisted to the // incremental compilation cache because of // compilation errors being present. debug!( "try_mark_previous_green({:?}) - END - dependency {:?} resulted in compilation error", dep_node, dep_dep_node ); return None; } /// Try to mark a dep-node which existed in the previous compilation session as green. fn try_mark_previous_green<Ctxt: QueryContext<DepKind = K>>( &self, tcx: Ctxt, data: &DepGraphData<K>, prev_dep_node_index: SerializedDepNodeIndex, dep_node: &DepNode<K>, ) -> Option<DepNodeIndex> { debug!("try_mark_previous_green({:?}) - BEGIN", dep_node); #[cfg(not(parallel_compiler))] { debug_assert!(!self.dep_node_exists(dep_node)); debug_assert!(data.colors.get(prev_dep_node_index).is_none()); } // We never try to mark eval_always nodes as green debug_assert!(!tcx.dep_context().is_eval_always(dep_node.kind)); debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node); let prev_deps = data.previous.edge_targets_from(prev_dep_node_index); for &dep_dep_node_index in prev_deps { self.try_mark_parent_green(tcx, data, dep_dep_node_index, dep_node)? } // If we got here without hitting a `return` that means that all // dependencies of this DepNode could be marked as green. Therefore we // can also mark this DepNode as green. // There may be multiple threads trying to mark the same dep node green concurrently // We allocating an entry for the node in the current dependency graph and // adding all the appropriate edges imported from the previous graph let dep_node_index = data.current.promote_node_and_deps_to_current( tcx.dep_context().profiler(), &data.previous, prev_dep_node_index, ); // ... emitting any stored diagnostic ... // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere // Maybe store a list on disk and encode this fact in the DepNodeState let side_effects = tcx.load_side_effects(prev_dep_node_index); #[cfg(not(parallel_compiler))] debug_assert!( data.colors.get(prev_dep_node_index).is_none(), "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \ insertion for {:?}", dep_node ); if unlikely!(!side_effects.is_empty()) { self.emit_side_effects(tcx, data, dep_node_index, side_effects); } // ... and finally storing a "Green" entry in the color map. // Multiple threads can all write the same color here data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index)); debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node); Some(dep_node_index) } /// Atomically emits some loaded diagnostics. /// This may be called concurrently on multiple threads for the same dep node. #[cold] #[inline(never)] fn emit_side_effects<Ctxt: QueryContext<DepKind = K>>( &self, tcx: Ctxt, data: &DepGraphData<K>, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects, ) { let mut processed = data.processed_side_effects.lock(); if processed.insert(dep_node_index) { // We were the first to insert the node in the set so this thread // must process side effects // Promote the previous diagnostics to the current session. tcx.store_side_effects(dep_node_index, side_effects.clone()); let handle = tcx.dep_context().sess().diagnostic(); for mut diagnostic in side_effects.diagnostics { handle.emit_diagnostic(&mut diagnostic); } } } // Returns true if the given node has been marked as red during the // current compilation session. Used in various assertions pub fn is_red(&self, dep_node: &DepNode<K>) -> bool { self.node_color(dep_node) == Some(DepNodeColor::Red) } // Returns true if the given node has been marked as green during the // current compilation session. Used in various assertions pub fn is_green(&self, dep_node: &DepNode<K>) -> bool { self.node_color(dep_node).map_or(false, |c| c.is_green()) } // This method loads all on-disk cacheable query results into memory, so // they can be written out to the new cache file again. Most query results // will already be in memory but in the case where we marked something as // green but then did not need the value, that value will never have been // loaded from disk. // // This method will only load queries that will end up in the disk cache. // Other queries will not be executed. pub fn exec_cache_promotions<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) { let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion"); let data = self.data.as_ref().unwrap(); for prev_index in data.colors.values.indices() { match data.colors.get(prev_index) { Some(DepNodeColor::Green(_)) => { let dep_node = data.previous.index_to_node(prev_index); tcx.try_load_from_on_disk_cache(dep_node); } None | Some(DepNodeColor::Red) => { // We can skip red nodes because a node can only be marked // as red if the query result was recomputed and thus is // already in memory. } } } } pub fn print_incremental_info(&self) { if let Some(data) = &self.data { data.current.encoder.borrow().print_incremental_info( data.current.total_read_count.load(Relaxed), data.current.total_duplicate_read_count.load(Relaxed), ) } } pub fn encode(&self, profiler: &SelfProfilerRef) -> FileEncodeResult { if let Some(data) = &self.data { data.current.encoder.steal().finish(profiler) } else { Ok(()) } } pub(crate) fn next_virtual_depnode_index(&self) -> DepNodeIndex { let index = self.virtual_dep_node_index.fetch_add(1, Relaxed); DepNodeIndex::from_u32(index) } } /// A "work product" is an intermediate result that we save into the /// incremental directory for later re-use. The primary example are /// the object files that we save for each partition at code /// generation time. /// /// Each work product is associated with a dep-node, representing the /// process that produced the work-product. If that dep-node is found /// to be dirty when we load up, then we will delete the work-product /// at load time. If the work-product is found to be clean, then we /// will keep a record in the `previous_work_products` list. /// /// In addition, work products have an associated hash. This hash is /// an extra hash that can be used to decide if the work-product from /// a previous compilation can be re-used (in addition to the dirty /// edges check). /// /// As the primary example, consider the object files we generate for /// each partition. In the first run, we create partitions based on /// the symbols that need to be compiled. For each partition P, we /// hash the symbols in P and create a `WorkProduct` record associated /// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols /// in P. /// /// The next time we compile, if the `DepNode::CodegenUnit(P)` is /// judged to be clean (which means none of the things we read to /// generate the partition were found to be dirty), it will be loaded /// into previous work products. We will then regenerate the set of /// symbols in the partition P and hash them (note that new symbols /// may be added -- for example, new monomorphizations -- even if /// nothing in P changed!). We will compare that hash against the /// previous hash. If it matches up, we can reuse the object file. #[derive(Clone, Debug, Encodable, Decodable)] pub struct WorkProduct { pub cgu_name: String, /// Saved file associated with this CGU. pub saved_file: Option<String>, } // Index type for `DepNodeData`'s edges. rustc_index::newtype_index! { struct EdgeIndex { .. } } /// `CurrentDepGraph` stores the dependency graph for the current session. It /// will be populated as we run queries or tasks. We never remove nodes from the /// graph: they are only added. /// /// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes /// in memory. This is important, because these graph structures are some of the /// largest in the compiler. /// /// For this reason, we avoid storing `DepNode`s more than once as map /// keys. The `new_node_to_index` map only contains nodes not in the previous /// graph, and we map nodes in the previous graph to indices via a two-step /// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`, /// and the `prev_index_to_index` vector (which is more compact and faster than /// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`. /// /// This struct uses three locks internally. The `data`, `new_node_to_index`, /// and `prev_index_to_index` fields are locked separately. Operations that take /// a `DepNodeIndex` typically just access the `data` field. /// /// We only need to manipulate at most two locks simultaneously: /// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When /// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index` /// first, and `data` second. pub(super) struct CurrentDepGraph<K: DepKind> { encoder: Steal<GraphEncoder<K>>, new_node_to_index: Sharded<FxHashMap<DepNode<K>, DepNodeIndex>>, prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>, /// Used to trap when a specific edge is added to the graph. /// This is used for debug purposes and is only active with `debug_assertions`. #[cfg(debug_assertions)] forbidden_edge: Option<EdgeFilter<K>>, /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of /// their edges. This has the beneficial side-effect that multiple anonymous /// nodes can be coalesced into one without changing the semantics of the /// dependency graph. However, the merging of nodes can lead to a subtle /// problem during red-green marking: The color of an anonymous node from /// the current session might "shadow" the color of the node with the same /// ID from the previous session. In order to side-step this problem, we make /// sure that anonymous `NodeId`s allocated in different sessions don't overlap. /// This is implemented by mixing a session-key into the ID fingerprint of /// each anon node. The session-key is just a random number generated when /// the `DepGraph` is created. anon_id_seed: Fingerprint, /// These are simple counters that are for profiling and /// debugging and only active with `debug_assertions`. total_read_count: AtomicU64, total_duplicate_read_count: AtomicU64, /// The cached event id for profiling node interning. This saves us /// from having to look up the event id every time we intern a node /// which may incur too much overhead. /// This will be None if self-profiling is disabled. node_intern_event_id: Option<EventId>, } impl<K: DepKind> CurrentDepGraph<K> { fn new( profiler: &SelfProfilerRef, prev_graph_node_count: usize, encoder: FileEncoder, record_graph: bool, record_stats: bool, ) -> CurrentDepGraph<K> { use std::time::{SystemTime, UNIX_EPOCH}; let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); let nanos = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64; let mut stable_hasher = StableHasher::new(); nanos.hash(&mut stable_hasher); #[cfg(debug_assertions)] let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") { Ok(s) => match EdgeFilter::new(&s) { Ok(f) => Some(f), Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err), }, Err(_) => None, }; // We store a large collection of these in `prev_index_to_index` during // non-full incremental builds, and want to ensure that the element size // doesn't inadvertently increase. static_assert_size!(Option<DepNodeIndex>, 4); let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200; let node_intern_event_id = profiler .get_or_alloc_cached_string("incr_comp_intern_dep_graph_node") .map(EventId::from_label); CurrentDepGraph { encoder: Steal::new(GraphEncoder::new( encoder, prev_graph_node_count, record_graph, record_stats, )), new_node_to_index: Sharded::new(|| { FxHashMap::with_capacity_and_hasher( new_node_count_estimate / sharded::SHARDS, Default::default(), ) }), prev_index_to_index: Lock::new(IndexVec::from_elem_n(None, prev_graph_node_count)), anon_id_seed: stable_hasher.finish(), #[cfg(debug_assertions)] forbidden_edge, total_read_count: AtomicU64::new(0), total_duplicate_read_count: AtomicU64::new(0), node_intern_event_id, } } #[cfg(debug_assertions)] fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode<K>) { if let Some(forbidden_edge) = &self.forbidden_edge { forbidden_edge.index_to_node.lock().insert(dep_node_index, key); } } /// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it. /// Assumes that this is a node that has no equivalent in the previous dep-graph. fn intern_new_node( &self, profiler: &SelfProfilerRef, key: DepNode<K>, edges: EdgesVec, current_fingerprint: Fingerprint, ) -> DepNodeIndex { match self.new_node_to_index.get_shard_by_value(&key).lock().entry(key) { Entry::Occupied(entry) => *entry.get(), Entry::Vacant(entry) => { let dep_node_index = self.encoder.borrow().send(profiler, key, current_fingerprint, edges); entry.insert(dep_node_index); #[cfg(debug_assertions)] self.record_edge(dep_node_index, key); dep_node_index } } } fn intern_node( &self, profiler: &SelfProfilerRef, prev_graph: &SerializedDepGraph<K>, key: DepNode<K>, edges: EdgesVec, fingerprint: Option<Fingerprint>, print_status: bool, ) -> (DepNodeIndex, Option<(SerializedDepNodeIndex, DepNodeColor)>) { let print_status = cfg!(debug_assertions) && print_status; // Get timer for profiling `DepNode` interning let _node_intern_timer = self.node_intern_event_id.map(|eid| profiler.generic_activity_with_event_id(eid)); if let Some(prev_index) = prev_graph.node_to_index_opt(&key) { // Determine the color and index of the new `DepNode`. if let Some(fingerprint) = fingerprint { if fingerprint == prev_graph.fingerprint_by_index(prev_index) { if print_status { eprintln!("[task::green] {:?}", key); } // This is a green node: it existed in the previous compilation, // its query was re-executed, and it has the same result as before. let mut prev_index_to_index = self.prev_index_to_index.lock(); let dep_node_index = match prev_index_to_index[prev_index] { Some(dep_node_index) => dep_node_index, None => { let dep_node_index = self.encoder.borrow().send(profiler, key, fingerprint, edges); prev_index_to_index[prev_index] = Some(dep_node_index); dep_node_index } }; #[cfg(debug_assertions)] self.record_edge(dep_node_index, key); (dep_node_index, Some((prev_index, DepNodeColor::Green(dep_node_index)))) } else { if print_status { eprintln!("[task::red] {:?}", key); } // This is a red node: it existed in the previous compilation, its query // was re-executed, but it has a different result from before. let mut prev_index_to_index = self.prev_index_to_index.lock(); let dep_node_index = match prev_index_to_index[prev_index] { Some(dep_node_index) => dep_node_index, None => { let dep_node_index = self.encoder.borrow().send(profiler, key, fingerprint, edges); prev_index_to_index[prev_index] = Some(dep_node_index); dep_node_index } }; #[cfg(debug_assertions)] self.record_edge(dep_node_index, key); (dep_node_index, Some((prev_index, DepNodeColor::Red))) } } else { if print_status { eprintln!("[task::unknown] {:?}", key); } // This is a red node, effectively: it existed in the previous compilation // session, its query was re-executed, but it doesn't compute a result hash // (i.e. it represents a `no_hash` query), so we have no way of determining // whether or not the result was the same as before. let mut prev_index_to_index = self.prev_index_to_index.lock(); let dep_node_index = match prev_index_to_index[prev_index] { Some(dep_node_index) => dep_node_index, None => { let dep_node_index = self.encoder.borrow().send(profiler, key, Fingerprint::ZERO, edges); prev_index_to_index[prev_index] = Some(dep_node_index); dep_node_index } }; #[cfg(debug_assertions)] self.record_edge(dep_node_index, key); (dep_node_index, Some((prev_index, DepNodeColor::Red))) } } else { if print_status { eprintln!("[task::new] {:?}", key); } let fingerprint = fingerprint.unwrap_or(Fingerprint::ZERO); // This is a new node: it didn't exist in the previous compilation session. let dep_node_index = self.intern_new_node(profiler, key, edges, fingerprint); (dep_node_index, None) } } fn promote_node_and_deps_to_current( &self, profiler: &SelfProfilerRef, prev_graph: &SerializedDepGraph<K>, prev_index: SerializedDepNodeIndex, ) -> DepNodeIndex { self.debug_assert_not_in_new_nodes(prev_graph, prev_index); let mut prev_index_to_index = self.prev_index_to_index.lock(); match prev_index_to_index[prev_index] { Some(dep_node_index) => dep_node_index, None => { let key = prev_graph.index_to_node(prev_index); let dep_node_index = self.encoder.borrow().send( profiler, key, prev_graph.fingerprint_by_index(prev_index), prev_graph .edge_targets_from(prev_index) .iter() .map(|i| prev_index_to_index[*i].unwrap()) .collect(), ); prev_index_to_index[prev_index] = Some(dep_node_index); #[cfg(debug_assertions)] self.record_edge(dep_node_index, key); dep_node_index } } } #[inline] fn debug_assert_not_in_new_nodes( &self, prev_graph: &SerializedDepGraph<K>, prev_index: SerializedDepNodeIndex, ) { let node = &prev_graph.index_to_node(prev_index); debug_assert!( !self.new_node_to_index.get_shard_by_value(node).lock().contains_key(node), "node from previous graph present in new node collection" ); } } /// The capacity of the `reads` field `SmallVec` const TASK_DEPS_READS_CAP: usize = 8; type EdgesVec = SmallVec<[DepNodeIndex; TASK_DEPS_READS_CAP]>; #[derive(Debug, Clone, Copy)] pub enum TaskDepsRef<'a, K: DepKind> { /// New dependencies can be added to the /// `TaskDeps`. This is used when executing a 'normal' query /// (no `eval_always` modifier) Allow(&'a Lock<TaskDeps<K>>), /// New dependencies are ignored. This is used when /// executing an `eval_always` query, since there's no /// need to track dependencies for a query that's always /// re-executed. This is also used for `dep_graph.with_ignore` Ignore, /// Any attempt to add new dependencies will cause a panic. /// This is used when decoding a query result from disk, /// to ensure that the decoding process doesn't itself /// require the execution of any queries. Forbid, } #[derive(Debug)] pub struct TaskDeps<K: DepKind> { #[cfg(debug_assertions)] node: Option<DepNode<K>>, reads: EdgesVec, read_set: FxHashSet<DepNodeIndex>, phantom_data: PhantomData<DepNode<K>>, } impl<K: DepKind> Default for TaskDeps<K> { fn default() -> Self { Self { #[cfg(debug_assertions)] node: None, reads: EdgesVec::new(), read_set: FxHashSet::default(), phantom_data: PhantomData, } } } // A data structure that stores Option<DepNodeColor> values as a contiguous // array, using one u32 per entry. struct DepNodeColorMap { values: IndexVec<SerializedDepNodeIndex, AtomicU32>, } const COMPRESSED_NONE: u32 = 0; const COMPRESSED_RED: u32 = 1; const COMPRESSED_FIRST_GREEN: u32 = 2; impl DepNodeColorMap { fn new(size: usize) -> DepNodeColorMap { DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() } } #[inline] fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> { match self.values[index].load(Ordering::Acquire) { COMPRESSED_NONE => None, COMPRESSED_RED => Some(DepNodeColor::Red), value => { Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN))) } } } fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) { self.values[index].store( match color { DepNodeColor::Red => COMPRESSED_RED, DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN, }, Ordering::Release, ) } }
try_mark_green
chart-data.ts
import { types, Instance } from "mobx-state-tree"; import { ChartDataSetModel, ChartDataSetModelType, ChartColors } from "./chart-data-set"; import { ChartAnnotationModel, ChartAnnotationType } from "./chart-annotation"; export const ChartDataModel = types .model("ChartData", { name: types.string, dataSets: types.array(ChartDataSetModel), labels: types.array(types.array(types.string)), annotations: types.array(ChartAnnotationModel), viewHeight: 400, }) .views(self => ({ get visibleDataSets() { return self.dataSets.filter(d => d.display); } })) .views(self => ({ get chartLabels() { if (self.labels && self.labels.length > 0) { return self.labels; } else return []; }, // labels for a data point - essential for a bar graph, optional for a line get dataLabels() { if (self.visibleDataSets && self.visibleDataSets.length > 0) { return self.visibleDataSets[0].dataLabels; } else return []; }, get dataLabelRotation() { if (self.visibleDataSets && self.visibleDataSets.length > 0) { return self.visibleDataSets[0].fixedLabelRotation; } else return; }, get minMaxAll() { const maxA1Values: number[] = []; const maxA2Values: number[] = []; const minA1Values: number[] = []; const minA2Values: number[] = []; self.visibleDataSets.forEach((d) => { maxA1Values.push(d.maxA1 || 100); maxA2Values.push(d.maxA2 || 100); minA1Values.push(d.minA1 || 0); minA2Values.push(d.minA2 || 0); }); return { maxA1: Math.max(...maxA1Values), maxA2: Math.max(...maxA2Values), minA1: Math.min(...minA1Values), minA2: Math.min(...minA2Values), }; }, get nextDataSeriesColor() { return ChartColors[self.dataSets.length]; }, get maxPoints() { return self.visibleDataSets[0].maxPoints; }, get pointCount() { return self.visibleDataSets[0].dataPoints.length; }, get subsetIdx() { return self.visibleDataSets[0].dataStartIdx; }, get axisLabelA1() { return self.visibleDataSets[0].axisLabelA1; }, get axisLabelA2() { return self.visibleDataSets[0].axisLabelA2; }, get formattedAnnotations() { return self.annotations.map(a => a.formatted); } })) .extend(self => { // actions function addDataSet(dataSet: ChartDataSetModelType) { self.dataSets.push(dataSet); } // If we want to scrub back and forth along a timeline of data points, but still need // to limit our data point quantity for performance, pass a start index and // the number of required points to filter the data function setDataSetSubset(idx: number, maxPoints: number) { self.dataSets.forEach(d => { d.subsetPoints(idx); if (self.maxPoints !== maxPoints) { d.setMaxDataPoints(maxPoints); } }); } // To fetch all data from all datasets, remove any subset index points and set the max number of points to -1 // to ensure all data is returned unfiltered function allData() { self.dataSets.forEach(d => { d.subsetPoints(-1); d.setMaxDataPoints(-1); }); } function addAnnotation(annotation: ChartAnnotationType) { annotation.setViewHeight(self.viewHeight); self.annotations.push(annotation); } function
(annotation: ChartAnnotationType) { self.annotations.remove(annotation); } function clearAnnotations() { self.annotations.clear(); } function setViewHeight(height: number) { self.viewHeight = height; // inform existing annotations of max height: self.annotations.forEach(a => a.setViewHeight(height)); } return { actions: { allData, addDataSet, setDataSetSubset, addAnnotation, removeAnnotation, clearAnnotations, setViewHeight, } }; }); export type ChartDataModelType = Instance<typeof ChartDataModel>;
removeAnnotation
encode.test.ts
import { assertEquals, } from "https://deno.land/[email protected]/testing/asserts.ts"; import { encode } from "./../../mod.ts"; Deno.test("Encoding Utility", () => { assertEquals( encode.hex("676f645f63727970746f20726f636b7321").toString(), "god_crypto rocks!", ); assertEquals( encode.string("god_crypto rocks!").hex(), "676f645f63727970746f20726f636b7321", ); assertEquals( encode.base64("SGVsbG8gZ29kX2NyeXB0bw==").toString(), "Hello god_crypto", ); assertEquals( encode.string("Hello god_crypto").base64(), "SGVsbG8gZ29kX2NyeXB0bw==", );
}); Deno.test("Encoding Base32", () => { assertEquals( encode.base32("GZ7QQLQXMAQB6NK3KZOF4MC2PBHQS3Z2DAGDY3LKHFIU4UCCEZJA").hex(), "367f082e1760201f355b565c5e305a784f096f3a180c3c6d6a39514e50422652", ); assertEquals( encode.hex( "367f082e1760201f355b565c5e305a784f096f3a180c3c6d6a39514e50422652", ).base32(), "GZ7QQLQXMAQB6NK3KZOF4MC2PBHQS3Z2DAGDY3LKHFIU4UCCEZJA", ); assertEquals( encode.hex("187f234b").base32(), "DB7SGSY", ); assertEquals( encode.base32("DB7SGSY"), [24, 127, 35, 75], ); });
p4.go
package main import "fmt" func foo1() { for i := 0; i <= 3; i++ { defer fmt.Println(i) } // 3 2 1 0 } func
() { for i := 0; i <= 3; i++ { defer func(n int) { fmt.Println(n) }(i) } // 3 2 1 0 } func foo3() { for i := 0; i <= 3; i++ { defer func() { fmt.Println(i) }() } // 4 4 4 4 } func main() { fmt.Println("foo1 result:") foo1() fmt.Println("\nfoo2 result:") foo2() fmt.Println("\nfoo3 result:") foo3() }
foo2
stickyTable.js
// This plugin changes a tables css and html // so that the table's header is sticky on scroll // It is derivitive of the below jsFiddle by D-Pixie // https://jsfiddle.net/dPixie/byB9d/3/light/ // But unlike the fiddle this allows you to keep your // HTML sheet clean and abstract the details by applying the DOM changes, styling // dynamically after the page loads. // Only works on Modern browsers so no IE <= 9 (function ($) { $.fn.stickyTable = function( options ) { // optional arguments options = options || {}; var height = options.height || null; // Class constants var fakeThClass = 'fake-table-header'; var scrollWrapperClass = 'scroll-wrapper'; // Temporarily hold whatever was here so we can // add it back just in case another plugin is called stealStyles var stealStylesPlaceholder = $.fn.stealStyles; // Helper function for moving styles from one element to another $.fn.stealStyles = function ( $victim, styles ) { // Bind self because js var self = this; $.each(styles, function ( i, style ) { self.css( style, $victim.css( style ) ); }); return this; }; // Helper function for measuring the browser's scroll bar width var getScrollBarWidth = function () { var $measure; var measureClass = "measure-class"; var scrollBarWidth; $("<div>") .addClass( measureClass ) .css( "position", "absolute" ) .css( "top", -99999 ) .css( 'overflow', 'scroll' ) .width( 100 ) .height( 100 ) .appendTo( "body" ); $measure = $("." + measureClass); scrollBarWidth = $measure[0].offsetWidth - $measure[0].clientWidth; $measure.remove(); return scrollBarWidth; }; var marginOffset; // We need to account for the scroll bar var scrollBarWidth; var $fakeThs; var $thead = this.find( "thead" ); var $thColumns = $thead.find( "th" ); var $fixedWrapper = $( "<div>") .css( "position", "relative" ) .css( "display", "inline-block"); // The fake header allows makes it so that the color from the header // Doesn't affect the table in the case where the tds do not have a // background color var $fakeheader = $( "<div>" ) .stealStyles( $thead, ['background-color', 'height'] ); var $scrollWrapper = $( "<div>" ) .addClass( scrollWrapperClass ) .css( "overflow-y", "auto" ) // Make the wrapper shrink to fit the containing div .css( "display", "inline-block" ) .css( "height", height ) // Display inline will get the width of the containing div without // accounting for the scrollbar so we add the scroll width as padding here .css("padding-right", getScrollBarWidth()); $( this ).wrap( $fixedWrapper ); $( this ).before( $fakeheader ); $( this ).wrap( $scrollWrapper ); $thColumns.each(function ( i, thColumn ) { var $thColumn = $( thColumn ); $( "<div>" ) .text( $thColumn.text() ) .addClass( fakeThClass ) .stealStyles( $thColumn, ['color', // Get each padding individually, IE // returns "" if we do a straight call for 'padding' 'padding-top', 'padding-left', 'padding-right', 'padding-bottom', 'height', 'line-height'] ) .appendTo( $thColumn ); }); $fakeThs = $("." + fakeThClass); marginOffset = parseInt( $fakeThs.css( "padding-left" ), 10 ); $fakeThs .css( 'position', 'absolute' ) .css('margin-left', -marginOffset ) .css( 'top', 0 ); $thead.css( 'background', 'transparent' ); $thColumns .css( 'background-color', 'transparent' ) .css( 'height', 0 ) .css( 'line-height', 0 ) .css( 'padding-top', 0 ) .css( 'padding-bottom', 0 ) .css( 'border-color', 'transparent' );
return this; }; })( jQuery );
// Put whatever was called stealStyles back in place should it matter $.fn.stealStyles = stealStylesPlaceholder;