file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
postcodesearch.go | package api
import (
"encoding/json"
"net/http"
"strconv"
"strings"
errs "github.com/ONSdigital/dp-census-search-prototypes/apierrors"
"github.com/ONSdigital/dp-census-search-prototypes/helpers"
"github.com/ONSdigital/dp-census-search-prototypes/models"
"github.com/ONSdigital/log.go/log"
"github.com/gorilla/mux"
)
const (
defaultLimit = 50
defaultOffset = 0
defaultSegments = 30
defaultRelation = "within"
postcodeNotFound = "postcode not found"
internalError = "internal server error"
exceedsDefaultMaximum = "the maximum offset has been reached, the offset cannot be more than"
invalidDistanceParam = "invalid distance value"
invalidRelationParam = "incorrect relation value"
)
func (api *SearchAPI) getPostcodeSearch(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vars := mux.Vars(r)
setAccessControl(w, http.MethodGet)
if r.Method == http.MethodOptions {
w.WriteHeader(http.StatusNoContent)
return
}
var err error
postcode := vars["postcode"]
p := strings.ReplaceAll(postcode, " ", "")
lcPostcode := strings.ToLower(p)
distance := r.FormValue("distance")
requestedLimit := r.FormValue("limit")
requestedOffset := r.FormValue("offset")
requestedRelation := r.FormValue("relation")
logData := log.Data{
"postcode": lcPostcode,
"postcode_raw": postcode,
"distance": distance,
"requested_limit": requestedLimit,
"requested_offset": requestedOffset,
"requested_relation": requestedRelation,
}
log.Event(ctx, "getPostcodeSearch endpoint: incoming request", log.INFO, logData)
limit := defaultLimit
if requestedLimit != "" {
limit, err = strconv.Atoi(requestedLimit)
if err != nil {
log.Event(ctx, "getPostcodeSearch endpoint: request limit parameter error", log.ERROR, log.Error(err), logData)
setErrorCode(w, errs.ErrParsingQueryParameters)
return
}
}
offset := defaultOffset
if requestedOffset != "" {
offset, err = strconv.Atoi(requestedOffset)
if err != nil {
log.Event(ctx, "getPostcodeSearch endpoint: request offset parameter error", log.ERROR, log.Error(err), logData)
setErrorCode(w, errs.ErrParsingQueryParameters)
return
}
}
relation := defaultRelation
if requestedRelation != "" {
relation, err = models.ValidateRelation(requestedRelation)
if err != nil {
log.Event(ctx, "getPostcodeSearch endpoint: request relation parameter error", log.ERROR, log.Error(err), logData)
setErrorCode(w, err)
return
}
}
page := &models.PageVariables{
DefaultMaxResults: api.defaultMaxResults,
Limit: limit,
Offset: offset,
}
distObj, err := models.ValidateDistance(distance)
if err != nil {
log.Event(ctx, "getPostcodeSearch endpoint: validate query param, distance", log.ERROR, log.Error(err), logData)
setErrorCode(w, err)
return
}
if err = page.Validate(); err != nil {
log.Event(ctx, "getPostcodeSearch endpoint: validate pagination", log.ERROR, log.Error(err), logData)
setErrorCode(w, err)
return
}
logData["limit"] = page.Limit
logData["offset"] = page.Offset
log.Event(ctx, "getPostcodeSearch endpoint: just before querying search index", log.INFO, logData)
// lookup postcode
postcodeResponse, _, err := api.elasticsearch.GetPostcodes(ctx, api.postcodeIndex, lcPostcode)
if err != nil {
log.Event(ctx, "getPostcodeSearch endpoint: failed to search for postcode", log.ERROR, log.Error(err), logData)
setErrorCode(w, err)
return
}
if len(postcodeResponse.Hits.Hits) < 1 {
log.Event(ctx, "getPostcodeSearch endpoint: failed to find postcode", log.ERROR, log.Error(errs.ErrPostcodeNotFound), logData)
setErrorCode(w, errs.ErrPostcodeNotFound)
return
}
// calculate distance (in metres) based on distObj
dist := distObj.CalculateDistanceInMetres(ctx)
pcCoordinate := helpers.Coordinate{
Lat: postcodeResponse.Hits.Hits[0].Source.Pin.Location.Lat,
Lon: postcodeResponse.Hits.Hits[0].Source.Pin.Location.Lon,
}
// build polygon from circle using long/lat of postcod and distance
polygonShape, err := helpers.CircleToPolygon(pcCoordinate, dist, defaultSegments)
if err != nil {
setErrorCode(w, err)
}
var coordinates [][][]float64
geoLocation := &models.GeoLocation{
Type: "polygon", // TODO make constant variable?
Coordinates: append(coordinates, polygonShape.Coordinates),
}
// query dataset index with polygon search (intersect)
response, _, err := api.elasticsearch.QueryGeoLocation(ctx, api.datasetIndex, geoLocation, page.Limit, page.Offset, relation)
if err != nil {
log.Event(ctx, "getPostcodeSearch endpoint: failed to query elastic search index", log.ERROR, log.Error(err), logData)
setErrorCode(w, err)
return
}
searchResults := &models.SearchResults{
TotalCount: response.Hits.Total,
Limit: page.Limit,
Offset: page.Offset,
}
for _, result := range response.Hits.HitList {
doc := result.Source
searchResults.Items = append(searchResults.Items, doc)
}
searchResults.Count = len(searchResults.Items)
b, err := json.Marshal(searchResults)
if err != nil {
log.Event(ctx, "getPostcodeSearch endpoint: failed to marshal search resource into bytes", log.ERROR, log.Error(err), logData)
setErrorCode(w, errs.ErrInternalServer)
return
}
_, err = w.Write(b)
if err != nil {
log.Event(ctx, "error writing response", log.ERROR, log.Error(err), logData)
http.Error(w, err.Error(), http.StatusInternalServerError)
}
log.Event(ctx, "getPostcodeSearch endpoint: successfully searched index", log.INFO, logData)
}
func | (w http.ResponseWriter, err error) {
switch {
case errs.NotFoundMap[err]:
http.Error(w, err.Error(), http.StatusNotFound)
case errs.BadRequestMap[err]:
http.Error(w, err.Error(), http.StatusBadRequest)
case strings.Contains(err.Error(), exceedsDefaultMaximum):
http.Error(w, err.Error(), http.StatusBadRequest)
case strings.Contains(err.Error(), invalidDistanceParam):
http.Error(w, err.Error(), http.StatusBadRequest)
case strings.Contains(err.Error(), invalidRelationParam):
http.Error(w, err.Error(), http.StatusBadRequest)
default:
http.Error(w, internalError, http.StatusInternalServerError)
}
}
| setErrorCode |
issue-6801.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Creating a stack closure which references a box and then
// transferring ownership of the box before invoking the stack
// closure results in a crash.
#![feature(box_syntax)]
fn twice(x: Box<usize>) -> usize {
*x * 2
}
fn invoke<F>(f: F) where F: FnOnce() -> usize {
f();
}
fn | () {
let x : Box<usize> = box 9;
let sq = || { *x * *x };
twice(x); //~ ERROR: cannot move out of
invoke(sq);
}
| main |
eventcontent.go | /* Copyright 2016-2017 Vector Creations Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gomatrixserverlib
import (
"encoding/json"
"strconv"
"strings"
)
// CreateContent is the JSON content of a m.room.create event along with
// the top level keys needed for auth.
// See https://matrix.org/docs/spec/client_server/r0.2.0.html#m-room-create for descriptions of the fields.
type CreateContent struct {
// We need the domain of the create event when checking federatability.
senderDomain string
// We need the roomID to check that events are in the same room as the create event.
roomID string
// We need the eventID to check the first join event in the room.
eventID string
// The "m.federate" flag tells us whether the room can be federated to other servers.
Federate *bool `json:"m.federate,omitempty"`
// The creator of the room tells us what the default power levels are.
Creator string `json:"creator"`
// The version of the room. Should be treated as "1" when the key doesn't exist.
RoomVersion *RoomVersion `json:"room_version,omitempty"`
// The predecessor of the room.
Predecessor PreviousRoom `json:"predecessor,omitempty"`
}
// PreviousRoom is the "Previous Room" structure defined at https://matrix.org/docs/spec/client_server/r0.5.0#m-room-create
type PreviousRoom struct {
RoomID string `json:"room_id"`
EventID string `json:"event_id"`
}
// NewCreateContentFromAuthEvents loads the create event content from the create event in the
// auth events.
func NewCreateContentFromAuthEvents(authEvents AuthEventProvider) (c CreateContent, err error) {
var createEvent *Event
if createEvent, err = authEvents.Create(); err != nil {
return
}
if createEvent == nil |
if err = json.Unmarshal(createEvent.Content(), &c); err != nil {
err = errorf("unparsable create event content: %s", err.Error())
return
}
c.roomID = createEvent.RoomID()
c.eventID = createEvent.EventID()
if c.senderDomain, err = domainFromID(createEvent.Sender()); err != nil {
return
}
return
}
// DomainAllowed checks whether the domain is allowed in the room by the
// "m.federate" flag.
func (c *CreateContent) DomainAllowed(domain string) error {
if domain == c.senderDomain {
// If the domain matches the domain of the create event then the event
// is always allowed regardless of the value of the "m.federate" flag.
return nil
}
if c.Federate == nil || *c.Federate {
// The m.federate field defaults to true.
// If the domains are different then event is only allowed if the
// "m.federate" flag is absent or true.
return nil
}
return errorf("room is unfederatable")
}
// UserIDAllowed checks whether the domain part of the user ID is allowed in
// the room by the "m.federate" flag.
func (c *CreateContent) UserIDAllowed(id string) error {
domain, err := domainFromID(id)
if err != nil {
return err
}
return c.DomainAllowed(domain)
}
// domainFromID returns everything after the first ":" character to extract
// the domain part of a matrix ID.
func domainFromID(id string) (string, error) {
// IDs have the format: SIGIL LOCALPART ":" DOMAIN
// Split on the first ":" character since the domain can contain ":"
// characters.
parts := strings.SplitN(id, ":", 2)
if len(parts) != 2 {
// The ID must have a ":" character.
return "", errorf("invalid ID: %q", id)
}
// Return everything after the first ":" character.
return parts[1], nil
}
// MemberContent is the JSON content of a m.room.member event needed for auth checks.
// See https://matrix.org/docs/spec/client_server/r0.2.0.html#m-room-member for descriptions of the fields.
type MemberContent struct {
// We use the membership key in order to check if the user is in the room.
Membership string `json:"membership"`
DisplayName string `json:"displayname,omitempty"`
AvatarURL string `json:"avatar_url,omitempty"`
Reason string `json:"reason,omitempty"`
IsDirect bool `json:"is_direct,omitempty"`
// We use the third_party_invite key to special case thirdparty invites.
ThirdPartyInvite *MemberThirdPartyInvite `json:"third_party_invite,omitempty"`
}
// MemberThirdPartyInvite is the "Invite" structure defined at http://matrix.org/docs/spec/client_server/r0.2.0.html#m-room-member
type MemberThirdPartyInvite struct {
DisplayName string `json:"display_name"`
Signed MemberThirdPartyInviteSigned `json:"signed"`
}
// MemberThirdPartyInviteSigned is the "signed" structure defined at http://matrix.org/docs/spec/client_server/r0.2.0.html#m-room-member
type MemberThirdPartyInviteSigned struct {
MXID string `json:"mxid"`
Signatures map[string]map[string]string `json:"signatures"`
Token string `json:"token"`
}
// NewMemberContentFromAuthEvents loads the member content from the member event for the user ID in the auth events.
// Returns an error if there was an error loading the member event or parsing the event content.
func NewMemberContentFromAuthEvents(authEvents AuthEventProvider, userID string) (c MemberContent, err error) {
var memberEvent *Event
if memberEvent, err = authEvents.Member(userID); err != nil {
return
}
if memberEvent == nil {
// If there isn't a member event then the membership for the user
// defaults to leave.
c.Membership = Leave
return
}
return NewMemberContentFromEvent(*memberEvent)
}
// NewMemberContentFromEvent parse the member content from an event.
// Returns an error if the content couldn't be parsed.
func NewMemberContentFromEvent(event Event) (c MemberContent, err error) {
if err = json.Unmarshal(event.Content(), &c); err != nil {
err = errorf("unparsable member event content: %s", err.Error())
return
}
return
}
// ThirdPartyInviteContent is the JSON content of a m.room.third_party_invite event needed for auth checks.
// See https://matrix.org/docs/spec/client_server/r0.2.0.html#m-room-third-party-invite for descriptions of the fields.
type ThirdPartyInviteContent struct {
DisplayName string `json:"display_name"`
KeyValidityURL string `json:"key_validity_url"`
PublicKey string `json:"public_key"`
// Public keys are used to verify the signature of a m.room.member event that
// came from a m.room.third_party_invite event
PublicKeys []PublicKey `json:"public_keys"`
}
// PublicKey is the "PublicKeys" structure defined at https://matrix.org/docs/spec/client_server/r0.5.0#m-room-third-party-invite
type PublicKey struct {
PublicKey Base64Bytes `json:"public_key"`
KeyValidityURL string `json:"key_validity_url"`
}
// NewThirdPartyInviteContentFromAuthEvents loads the third party invite content from the third party invite event for the state key (token) in the auth events.
// Returns an error if there was an error loading the third party invite event or parsing the event content.
func NewThirdPartyInviteContentFromAuthEvents(authEvents AuthEventProvider, token string) (t ThirdPartyInviteContent, err error) {
var thirdPartyInviteEvent *Event
if thirdPartyInviteEvent, err = authEvents.ThirdPartyInvite(token); err != nil {
return
}
if thirdPartyInviteEvent == nil {
// If there isn't a third_party_invite event, then we return with an error
err = errorf("Couldn't find third party invite event")
return
}
if err = json.Unmarshal(thirdPartyInviteEvent.Content(), &t); err != nil {
err = errorf("unparsable third party invite event content: %s", err.Error())
}
return
}
// HistoryVisibilityContent is the JSON content of a m.room.history_visibility event.
// See https://matrix.org/docs/spec/client_server/r0.6.0#room-history-visibility for descriptions of the fields.
type HistoryVisibilityContent struct {
HistoryVisibility string `json:"history_visibility"`
}
// JoinRuleContent is the JSON content of a m.room.join_rules event needed for auth checks.
// See https://matrix.org/docs/spec/client_server/r0.2.0.html#m-room-join-rules for descriptions of the fields.
type JoinRuleContent struct {
// We use the join_rule key to check whether join m.room.member events are allowed.
JoinRule string `json:"join_rule"`
}
// NewJoinRuleContentFromAuthEvents loads the join rule content from the join rules event in the auth event.
// Returns an error if there was an error loading the join rule event or parsing the content.
func NewJoinRuleContentFromAuthEvents(authEvents AuthEventProvider) (c JoinRuleContent, err error) {
var joinRulesEvent *Event
if joinRulesEvent, err = authEvents.JoinRules(); err != nil {
return
}
if joinRulesEvent == nil {
// Default to "invite"
// https://github.com/matrix-org/synapse/blob/v0.18.5/synapse/api/auth.py#L368
c.JoinRule = Invite
return
}
if err = json.Unmarshal(joinRulesEvent.Content(), &c); err != nil {
err = errorf("unparsable join_rules event content: %s", err.Error())
return
}
return
}
// PowerLevelContent is the JSON content of a m.room.power_levels event needed for auth checks.
// Typically the user calls NewPowerLevelContentFromAuthEvents instead of
// unmarshalling the content directly from JSON so defaults can be applied.
// However, the JSON key names are still preserved so it's possible to marshal
// the struct into JSON easily.
// See https://matrix.org/docs/spec/client_server/r0.2.0.html#m-room-power-levels for descriptions of the fields.
type PowerLevelContent struct {
Ban int64 `json:"ban"`
Invite int64 `json:"invite"`
Kick int64 `json:"kick"`
Redact int64 `json:"redact"`
Users map[string]int64 `json:"users"`
UsersDefault int64 `json:"users_default"`
Events map[string]int64 `json:"events"`
EventsDefault int64 `json:"events_default"`
StateDefault int64 `json:"state_default"`
Notifications map[string]int64 `json:"notifications"`
}
// UserLevel returns the power level a user has in the room.
func (c *PowerLevelContent) UserLevel(userID string) int64 {
level, ok := c.Users[userID]
if ok {
return level
}
return c.UsersDefault
}
// EventLevel returns the power level needed to send an event in the room.
func (c *PowerLevelContent) EventLevel(eventType string, isState bool) int64 {
if eventType == MRoomThirdPartyInvite {
// Special case third_party_invite events to have the same level as
// m.room.member invite events.
// https://github.com/matrix-org/synapse/blob/v0.18.5/synapse/api/auth.py#L182
return c.Invite
}
level, ok := c.Events[eventType]
if ok {
return level
}
if isState {
return c.StateDefault
}
return c.EventsDefault
}
// UserLevel returns the power level a user has in the room.
func (c *PowerLevelContent) NotificationLevel(notification string) int64 {
level, ok := c.Notifications[notification]
if ok {
return level
}
// https://matrix.org/docs/spec/client_server/r0.6.1#m-room-power-levels
// room integer The level required to trigger an @room notification. Defaults to 50 if unspecified.
return 50
}
// NewPowerLevelContentFromAuthEvents loads the power level content from the
// power level event in the auth events or returns the default values if there
// is no power level event.
func NewPowerLevelContentFromAuthEvents(authEvents AuthEventProvider, creatorUserID string) (c PowerLevelContent, err error) {
powerLevelsEvent, err := authEvents.PowerLevels()
if err != nil {
return
}
if powerLevelsEvent != nil {
return NewPowerLevelContentFromEvent(*powerLevelsEvent)
}
// If there are no power levels then fall back to defaults.
c.Defaults()
// If there is no power level event then the creator gets level 100
// https://github.com/matrix-org/synapse/blob/v0.18.5/synapse/api/auth.py#L569
c.Users = map[string]int64{creatorUserID: 100}
// If there is no power level event then the state_default is level 0
// https://github.com/matrix-org/synapse/blob/v0.18.5/synapse/api/auth.py#L997
c.StateDefault = 0
return
}
// Defaults sets the power levels to their default values.
func (c *PowerLevelContent) Defaults() {
// Default invite level is 0.
// https://github.com/matrix-org/synapse/blob/v0.18.5/synapse/api/auth.py#L426
c.Invite = 0
// Default ban, kick and redacts levels are 50
// https://github.com/matrix-org/synapse/blob/v0.18.5/synapse/api/auth.py#L376
// https://github.com/matrix-org/synapse/blob/v0.18.5/synapse/api/auth.py#L456
// https://github.com/matrix-org/synapse/blob/v0.18.5/synapse/api/auth.py#L1041
c.Ban = 50
c.Kick = 50
c.Redact = 50
// Default user level is 0
// https://github.com/matrix-org/synapse/blob/v0.18.5/synapse/api/auth.py#L558
c.UsersDefault = 0
// Default event level is 0, Default state level is 50
// https://github.com/matrix-org/synapse/blob/v0.18.5/synapse/api/auth.py#L987
// https://github.com/matrix-org/synapse/blob/v0.18.5/synapse/api/auth.py#L991
c.EventsDefault = 0
c.StateDefault = 50
}
// NewPowerLevelContentFromEvent loads the power level content from an event.
func NewPowerLevelContentFromEvent(event Event) (c PowerLevelContent, err error) {
// Set the levels to their default values.
c.Defaults()
// We can't extract the JSON directly to the powerLevelContent because we
// need to convert string values to int values.
var content struct {
InviteLevel levelJSONValue `json:"invite"`
BanLevel levelJSONValue `json:"ban"`
KickLevel levelJSONValue `json:"kick"`
RedactLevel levelJSONValue `json:"redact"`
UserLevels map[string]levelJSONValue `json:"users"`
UsersDefaultLevel levelJSONValue `json:"users_default"`
EventLevels map[string]levelJSONValue `json:"events"`
StateDefaultLevel levelJSONValue `json:"state_default"`
EventDefaultLevel levelJSONValue `json:"event_default"`
}
if err = json.Unmarshal(event.Content(), &content); err != nil {
err = errorf("unparsable power_levels event content: %s", err.Error())
return
}
// Update the levels with the values that are present in the event content.
content.InviteLevel.assignIfExists(&c.Invite)
content.BanLevel.assignIfExists(&c.Ban)
content.KickLevel.assignIfExists(&c.Kick)
content.RedactLevel.assignIfExists(&c.Redact)
content.UsersDefaultLevel.assignIfExists(&c.UsersDefault)
content.StateDefaultLevel.assignIfExists(&c.StateDefault)
content.EventDefaultLevel.assignIfExists(&c.EventsDefault)
for k, v := range content.UserLevels {
if c.Users == nil {
c.Users = make(map[string]int64)
}
c.Users[k] = v.value
}
for k, v := range content.EventLevels {
if c.Events == nil {
c.Events = make(map[string]int64)
}
c.Events[k] = v.value
}
return
}
// A levelJSONValue is used for unmarshalling power levels from JSON.
// It is intended to replicate the effects of x = int(content["key"]) in python.
type levelJSONValue struct {
// Was a value loaded from the JSON?
exists bool
// The integer value of the power level.
value int64
}
func (v *levelJSONValue) UnmarshalJSON(data []byte) error {
var stringValue string
var int64Value int64
var floatValue float64
var err error
// First try to unmarshal as an int64.
if err = json.Unmarshal(data, &int64Value); err != nil {
// If unmarshalling as an int64 fails try as a string.
if err = json.Unmarshal(data, &stringValue); err != nil {
// If unmarshalling as a string fails try as a float.
if err = json.Unmarshal(data, &floatValue); err != nil {
return err
}
int64Value = int64(floatValue)
} else {
// If we managed to get a string, try parsing the string as an int.
int64Value, err = strconv.ParseInt(stringValue, 10, 64)
if err != nil {
return err
}
}
}
v.exists = true
v.value = int64Value
return nil
}
// assign the power level if a value was present in the JSON.
func (v *levelJSONValue) assignIfExists(to *int64) {
if v.exists {
*to = v.value
}
}
// Check if the user ID is a valid user ID.
func isValidUserID(userID string) bool {
// TODO: Do we want to add anymore checks beyond checking the sigil and that it has a domain part?
return userID[0] == '@' && strings.IndexByte(userID, ':') != -1
}
| {
err = errorf("missing create event")
return
} |
format-routing.rs | #![feature(proc_macro_hygiene)]
#[macro_use] extern crate rocket;
use rocket::config::{Environment, Config, LoggingLevel};
#[get("/", format = "application/json")]
fn get() -> &'static str { "get" }
#[post("/", format = "application/json")]
fn post() -> &'static str |
fn rocket() -> rocket::Rocket {
let config = Config::build(Environment::Production).log_level(LoggingLevel::Off);
rocket::custom(config.unwrap()).mount("/", routes![get, post])
}
mod benches {
extern crate test;
use super::rocket;
use self::test::Bencher;
use rocket::local::Client;
use rocket::http::{Accept, ContentType};
#[bench]
fn accept_format(b: &mut Bencher) {
let client = Client::new(rocket()).unwrap();
let mut request = client.get("/").header(Accept::JSON);
b.iter(|| { request.mut_dispatch(); });
}
#[bench]
fn wrong_accept_format(b: &mut Bencher) {
let client = Client::new(rocket()).unwrap();
let mut request = client.get("/").header(Accept::HTML);
b.iter(|| { request.mut_dispatch(); });
}
#[bench]
fn content_type_format(b: &mut Bencher) {
let client = Client::new(rocket()).unwrap();
let mut request = client.post("/").header(ContentType::JSON);
b.iter(|| { request.mut_dispatch(); });
}
#[bench]
fn wrong_content_type_format(b: &mut Bencher) {
let client = Client::new(rocket()).unwrap();
let mut request = client.post("/").header(ContentType::Plain);
b.iter(|| { request.mut_dispatch(); });
}
}
| { "post" } |
arrSlice.js | /**
* array-slice <https://github.com/jonschlinkert/array-slice>
*/
function slice(arr, start, end) {
let len = arr.length;
let range = [];
start = idx(len, start);
end = idx(len, end, len); | }
return range;
}
function idx(len, pos, end) {
if (pos == null) {
pos = end || 0;
} else if (pos < 0) {
pos = Math.max(len + pos, 0);
} else {
pos = Math.min(pos, len);
}
return pos;
} |
while (start < end) {
range.push(arr[start++]); |
oop.py | class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def | (self):
return "{" + self.name + " " + str(self.age) + "}"
p1 = Person("John", 36)
print(p1) | __str__ |
lib.rs | pub mod file;
pub mod shell;
pub mod string;
pub mod logging;
pub mod crypt;
pub use serde_yaml;
pub use serde;
pub use regex;
pub use log;
pub use simple_logger;
pub use itertools;
pub use clap;
pub use aes_gcm;
pub use rand;
pub use rand_chacha; | #[cfg(test)]
mod tests {
#[test]
fn it_works() {
let result = 2 + 2;
assert_eq!(result, 4);
}
} | pub use hex;
|
balances.go | package bitfinex
type BalancesService struct {
client *Client
}
type WalletBalance struct {
Type string
Currency string
Amount string
Available string
}
// GET balances
func (b *BalancesService) All() ([]WalletBalance, error) {
req, err := b.client.newAuthenticatedRequest("GET", "balances", nil)
if err != nil {
return nil, err
}
| balances := make([]WalletBalance, 3)
_, err = b.client.do(req, &balances)
if err != nil {
return nil, err
}
return balances, nil
} | |
setup.py | #!/usr/bin/env python
# coding=utf-8
from distutils.core import setup
version = '0.3'
setup(
name='jefferson',
version=version,
description='JFFS2 filesystem extraction tool originally released by Stefan Viehböck. Python3 update by Jaan Klouman',
author='Stefan Viehböck & Jaan Klouman',
url='https://github.com/sviehb/jefferson',
license='MIT',
requires=['cstruct'],
packages=['jefferson'], | scripts=['src/scripts/jefferson'],
) | package_dir={'jefferson': 'src/jefferson'}, |
assetfile.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains implementations for asset files
"""
from __future__ import print_function, division, absolute_import
__author__ = "Tomas Poveda"
__license__ = "MIT"
__maintainer__ = "Tomas Poveda"
__email__ = "[email protected]"
import os
import logging
import tpDcc as tp
from tpDcc.libs.python import osplatform, path as path_utils
import artellapipe
from artellapipe.core import defines, file
LOGGER = logging.getLogger('artellapipe')
class ArtellaAssetFile(file.ArtellaFile, object):
def __init__(self, file_asset=None, file_path=None, file_name=None):
self._asset = file_asset
file_name = file_name or self._asset.get_name() if self._asset else None
super(ArtellaAssetFile, self).__init__(file_name=file_name, file_path=file_path)
@property
def asset(self):
"""
Returns asset linked to this file type
:return: ArtellaAssset
"""
return self._asset
def has_valid_object(self):
"""
Implements base ArtellaFile has_valid_object function
Returns whether valid object is attached to this file
:return: bool
"""
return bool(self._asset)
def get_template_dict(self, **kwargs):
"""
Returns dictionary with the template data for this file
:param extension: str
:return: dict
"""
template_dict = {
'project_id': self._project.id,
'project_id_number': self._project.id_number,
'asset_name': self._asset.get_name(),
'asset_type': self._asset.get_category(),
'file_extension': kwargs.get('extension', self.FILE_EXTENSIONS[0])
}
return template_dict
def get_project(self):
"""
Implements base ArtellaFile get_project function
Returns project where this asset file belongs to
:return: ArtellaProject
"""
return self._asset.project
def get_file(
self, status=defines.ArtellaFileStatus.WORKING, extension=None, fix_path=False, version=None, **kwargs):
"""
Implements base ArtellaFile get_file function
Returns file of the attached object
:param file_type: str
:param status: str
:param extension: str
:param fix_path: bool
:param version: str
:return: str
"""
template_dict = self.get_template_dict()
return self._asset.get_file(
file_type=self.FILE_TYPE, status=status, extension=extension, fix_path=fix_path,
version=version, extra_dict=template_dict)
def get_path(self):
"""
Implements base ArtellaFile get_path function
Returns path of the attached object
:return: str
"""
return self._asset.get_path()
def get_name(self):
"""
Returns name of the attached object
:return: str
"""
return self._asset.get_name()
def get_extension(self):
"""
Returns the extension of the aseet file
:return: str
"""
return self.get_project().assets_library_file_types.get()
def get_latest_published_versions(self):
"""
Implements base ArtellaFile get_path function
Returns latest published version of file
:return: str
"""
file_path = self.get_path()
return artellapipe.AssetsMgr().get_latest_published_versions(file_path, file_type=self.FILE_TYPE)
def get_file_paths(self, return_first=False, fix_path=True, **kwargs):
if self.FILE_TYPE not in self._asset.FILES:
LOGGER.warning(
'FileType "{}" is not a valid file for Assets of type "{}"'.format(
self.FILE_TYPE, self._asset.FILE_TYPE))
return list()
file_paths = super(
ArtellaAssetFile, self).get_file_paths(return_first=return_first, fix_path=fix_path, **kwargs)
if file_paths:
return file_paths
status = kwargs.get('status', defines.ArtellaFileStatus.PUBLISHED)
if status == defines.ArtellaFileStatus.WORKING:
file_path = self.get_working_path()
else:
file_path = self.get_latest_local_published_path()
if not file_path:
return None if return_first else file_paths
if fix_path:
file_path = artellapipe.FilesMgr().fix_path(file_path)
if return_first:
|
else:
return [file_path]
def _open_file(self, file_path):
if file_path and os.path.isfile(file_path):
if path_utils.clean_path(tp.Dcc.scene_name()) == path_utils.clean_path(file_path):
return True
tp.Dcc.open_file(file_path)
return True
elif file_path and os.path.isdir(file_path):
osplatform.open_file(file_path)
return True
else:
if file_path:
folder_path = os.path.dirname(file_path)
if os.path.isdir(folder_path):
osplatform.open_file(folder_path)
return True
LOGGER.warning('Impossible to open file: "{}"'.format(file_path))
return False
| return file_path |
lib.rs | #![cfg_attr(not(test), no_std)]
#![feature(nll)]
#![deny(non_snake_case)]
// import macros from log
use log::*;
extern crate alloc;
mod addr;
pub mod cow;
pub mod memory_set;
pub mod no_mmu;
pub mod paging;
//pub mod swap;
pub use crate::addr::*;
pub enum | {
InvalidPtr,
}
pub type VMResult<T> = Result<T, VMError>;
| VMError |
extract.py | import argparse
import numpy as np
import os
import sys
import numpy, scipy, sklearn
from model.trainer import Trainer
from misc.utils import Params
from dataset.kaldi_io import FeatureReader, open_or_fd, read_mat_ark, write_vec_flt
from six.moves import range
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--gpu", type=int, default=-1, help="The GPU id. GPU disabled if -1.")
parser.add_argument("-m", "--min-chunk-size", type=int, default=25, help="The minimum length of the segments. Any segment shorted than this value will be ignored.")
parser.add_argument("-s", "--chunk-size", type=int, default=10000, help="The length of the segments used to extract the embeddings. "
"Segments longer than this value will be splited before extraction. "
"Then the splited embeddings will be averaged to get the final embedding. "
"L2 normalizaion will be applied before the averaging if specified.")
parser.add_argument("-n", "--normalize", action="store_true", help="Normalize the embedding before averaging and output.")
parser.add_argument("--node", type=str, default="", help="The node to output the embeddings.")
parser.add_argument("model_dir", type=str, help="The model directory.")
parser.add_argument("rspecifier", type=str, help="Kaldi feature rspecifier (or ark file).")
parser.add_argument("wspecifier", type=str, help="Kaldi output wspecifier (or ark file).")
args = parser.parse_args()
if args.gpu == -1:
# Disable GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# In the GPU situation, it is difficult to know how to specify the GPU id.
# If the program is launched locally, you can set CUDA_VISIBLE_DEVICES to the id.
# However, if SGE is used, we cannot simply set CUDA_VISIBLE_DEVICES.
# So it is better to specify the GPU id outside the program.
# Give an arbitrary number (except for -1) to --gpu can enable it. Leave it blank if you want to disable gpu.
import tensorflow as tf
if __name__ == '__main__':
tf.reset_default_graph()
tf.logging.set_verbosity(tf.logging.INFO)
nnet_dir = os.path.join(args.model_dir, "nnet")
config_json = os.path.join(args.model_dir, "nnet/config.json")
if not os.path.isfile(config_json):
sys.exit("Cannot find params.json in %s" % config_json)
params = Params(config_json)
# Change the output node if necessary
if len(args.node) != 0:
params.embedding_node = args.node | dim = int(f.readline().strip())
#trainer = Trainer(params, args.model_dir, dim, single_cpu=True)
trainer = Trainer(params, args.model_dir, dim)
trainer.build("predict")
if args.rspecifier.rsplit(".", 1)[1] == "scp":
# The rspecifier cannot be scp
sys.exit("The rspecifier must be ark or input pipe")
fp_out = open_or_fd(args.wspecifier, "wb")
# import pdb;pdb.set_trace()
# args.rspecifier=args.rspecifier.replace('JOB', '1')
for index, (key, feature) in enumerate(read_mat_ark(args.rspecifier)):
if feature.shape[0] < args.min_chunk_size:
tf.logging.info("[INFO] Key %s length too short, %d < %d, skip." % (key, feature.shape[0], args.min_chunk_size))
continue
if feature.shape[0] > args.chunk_size:
feature_array = []
feature_length = []
num_chunks = int(np.ceil(float(feature.shape[0] - args.chunk_size) / (args.chunk_size / 2))) + 1
tf.logging.info("[INFO] Key %s length %d > %d, split to %d segments." % (key, feature.shape[0], args.chunk_size, num_chunks))
for i in range(num_chunks):
start = int(i * (args.chunk_size / 2))
this_chunk_size = args.chunk_size if feature.shape[0] - start > args.chunk_size else feature.shape[0] - start
feature_length.append(this_chunk_size)
feature_array.append(feature[start:start+this_chunk_size])
feature_length = np.expand_dims(np.array(feature_length), axis=1)
# Except for the last feature, the length of other features should be the same (=chunk_size)
embeddings = trainer.predict(np.array(feature_array[:-1], dtype=np.float32))
embedding_last = trainer.predict(feature_array[-1])
embeddings = np.concatenate([embeddings, np.expand_dims(embedding_last, axis=0)], axis=0)
if args.normalize:
embeddings /= np.sqrt(np.sum(np.square(embeddings), axis=1, keepdims=True))
embedding = np.sum(embeddings * feature_length, axis=0) / np.sum(feature_length)
else:
tf.logging.info("[INFO] Key %s length %d." % (key, feature.shape[0]))
embedding = trainer.predict(feature)
tf.logging.info("[INFO] Key %s finished predicting" % (key))
if args.normalize:
embedding /= np.sqrt(np.sum(np.square(embedding)))
write_vec_flt(fp_out, embedding, key=key)
tf.logging.info("[INFO] Key %s finished writing" % (key))
fp_out.close()
trainer.close() | tf.logging.info("Extract embedding from %s" % params.embedding_node)
with open(os.path.join(nnet_dir, "feature_dim"), "r") as f: |
time_test.go | package ablyutil_test
import (
"testing"
"time"
"github.com/ably/ably-go/ably/internal/ablyutil"
"golang.org/x/net/context"
)
func TestAfterOK(t *testing.T) {
const wait = 10 * time.Millisecond
ctx, cancel := context.WithTimeout(context.Background(), wait*2)
defer cancel()
now := time.Now()
select {
case firedAt := <-ablyutil.After(ctx, wait):
if elapsed := firedAt.Sub(now); !isCloseTo(elapsed, wait) {
t.Errorf("expected timer to fire after ~%v; got %v", wait, elapsed)
}
case <-ctx.Done():
t.Error("expected timer to be done before the context is canceled")
}
}
func TestAfterCanceled(t *testing.T) |
func isCloseTo(d time.Duration, to time.Duration) bool {
const leeway = 1 * time.Millisecond
return d > to-leeway && d < to+leeway
}
| {
const wait = 10 * time.Millisecond
testCtx, cancel := context.WithTimeout(context.Background(), wait*2)
defer cancel()
ctx, cancel := context.WithCancel(testCtx)
defer cancel()
var canceledAt time.Time
go func() {
time.Sleep(wait / 2)
canceledAt = time.Now()
cancel()
}()
select {
case _, ok := <-ablyutil.After(ctx, wait):
if ok {
t.Error("expected timer channel to be closed on cancel")
}
if sinceCancel := time.Since(canceledAt); !isCloseTo(sinceCancel, 0) {
t.Errorf("expected timer to fire immediately after cancel; got %v", sinceCancel)
}
case <-testCtx.Done():
t.Error("expected timer to be done before the context is canceled")
}
} |
assessmentFromDelius.js | const { stubFor } = require('./wiremock')
const stubGetAssessmentFromDelius = () => {
stubFor({
request: {
method: 'GET',
urlPattern: '/assessment-from-delius',
},
response: {
headers: {
'Content-Type': 'application/json;charset=UTF-8',
}, | status: 200,
jsonBody: {
assessmentUuid: '7dd4628f-51ed-491b-95c7-7c197ba434b8',
subject: {
name: 'Garry Hart',
dateOfBirth: '1987-03-14',
pnc: '2012/123450000F',
crn: 'J081276',
subjectUuid: '5a8843cf-af3e-4f5d-8137-95462863bc7f',
},
},
},
})
}
const stubPostAssessmentFromDelius = () => {
stubFor({
request: {
method: 'POST',
urlPattern: '/assessment-from-delius',
},
response: {
headers: {
'Content-Type': 'application/json;charset=UTF-8',
},
status: 302,
jsonBody: {
assessmentUuid: '7dd4628f-51ed-491b-95c7-7c197ba434b8',
subject: {
name: 'Garry Hart',
dateOfBirth: '1987-03-14',
pnc: '2012/123450000F',
crn: 'J081276',
subjectUuid: '5a8843cf-af3e-4f5d-8137-95462863bc7f',
},
},
},
})
}
module.exports = {
stubGetAssessmentFromDelius,
stubPostAssessmentFromDelius,
} | |
20200505204654_create_table_farm.ts |
export const up = (knex: Knex) => {
return knex.schema.createTable('farm', table => {
table.increments('id').primary();
table.integer('idUser').unsigned().notNullable();
table.string('name').notNullable();
table
.timestamp('created_at')
.notNullable()
.defaultTo(knex.raw('CURRENT_TIMESTAMP'));
table
.timestamp('updated_at')
.notNullable()
.defaultTo(knex.raw('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'));
table.foreign('idUser').references('id').inTable('user');
});
};
export const down = (knex: Knex) => {
return knex.schema.dropTable('farm');
}; | import Knex from 'knex'; |
|
evaluate_v1_0.py | from __future__ import print_function
from collections import Counter
import string
import re
import argparse
import json
import sys
import os
'''KorQuAD v1.0에 대한 공식 평가 스크립트 '''
'''본 스크립트는 SQuAD v1.1 평가 스크립트 https://rajpurkar.github.io/SQuAD-explorer/ 를 바탕으로 작성됨.'''
def normalize_answer(s):
def remove_(text):
''' 불필요한 기호 제거 '''
text = re.sub("'", " ", text)
text = re.sub('"', " ", text)
text = re.sub('《', " ", text)
text = re.sub('》', " ", text)
text = re.sub('<', " ", text)
text = re.sub('>', " ", text)
text = re.sub('〈', " ", text)
text = re.sub('〉', " ", text)
text = re.sub("\(", " ", text)
text = re.sub("\)", " ", text)
text = re.sub("‘", " ", text)
text = re.sub("’", " ", text)
return text
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.pun | oin(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_punc(lower(remove_(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
# F1 by character
prediction_Char = []
for tok in prediction_tokens:
now = [a for a in tok]
prediction_Char.extend(now)
ground_truth_Char = []
for tok in ground_truth_tokens:
now = [a for a in tok]
ground_truth_Char.extend(now)
common = Counter(prediction_Char) & Counter(ground_truth_Char)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_Char)
recall = 1.0 * num_same / len(ground_truth_Char)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'official_exact_match': exact_match, 'official_f1': f1}
def eval_during_train(args, step):
expected_version = 'KorQuAD_v1.0'
dataset_file = os.path.join(args.data_dir, args.task, args.predict_file)
prediction_file = os.path.join(args.output_dir, 'predictions_{}.json'.format(step))
with open(dataset_file) as dataset_f:
dataset_json = json.load(dataset_f)
read_version = "_".join(dataset_json['version'].split("_")[:-1])
if (read_version != expected_version):
print('Evaluation expects ' + expected_version +
', but got dataset with ' + read_version,
file=sys.stderr)
dataset = dataset_json['data']
with open(prediction_file) as prediction_f:
predictions = json.load(prediction_f)
return evaluate(dataset, predictions)
if __name__ == '__main__':
expected_version = 'KorQuAD_v1.0'
parser = argparse.ArgumentParser(
description='Evaluation for KorQuAD ' + expected_version)
parser.add_argument('dataset_file', help='Dataset file')
parser.add_argument('prediction_file', help='Prediction File')
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
read_version = "_".join(dataset_json['version'].split("_")[:-1])
if (read_version != expected_version):
print('Evaluation expects ' + expected_version +
', but got dataset with ' + read_version,
file=sys.stderr)
dataset = dataset_json['data']
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions)))
| ctuation)
return ''.j |
mixins.py | from braces.views import UserFormKwargsMixin
from .models import Proposal
from .forms import ProposalForm
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
class ProposalMixin(UserFormKwargsMixin):
model = Proposal
form_class = ProposalForm
success_message = _('Studie %(title)s bewerkt')
def get_next_url(self):
"""If the Proposal has a Wmo model attached, go to update, else, go to create""" | return reverse('proposals:wmo_create', args=(proposal.pk,))
class ProposalContextMixin:
def get_context_data(self, **kwargs):
context = super(ProposalContextMixin, self).get_context_data(**kwargs)
context['is_supervisor'] = self.object.supervisor == self.request.user
context['is_practice'] = self.object.is_practice()
return context | proposal = self.object
if hasattr(proposal, 'wmo'):
return reverse('proposals:wmo_update', args=(proposal.pk,))
else: |
Multiplier.tsx | import React from 'react'
import styled from 'styled-components'
import { HelpIcon } from '@pancakeswap-libs/uikit'
import useI18n from 'hooks/useI18n'
import Tooltip from '../Tooltip/Tooltip'
export interface MultiplierProps {
multiplier: string
}
const MultiplierWrapper = styled.div`
color: ${({ theme }) => theme.colors.text};
width: 36px;
text-align: right;
${({ theme }) => theme.mediaQueries.sm} {
text-align: left;
}
`
const Container = styled.div`
display: flex;
align-items: center;
svg {
margin-left: 14px;
}
${({ theme }) => theme.mediaQueries.sm} {
svg {
margin-left: 0;
}
}
`
const Multiplier: React.FunctionComponent<MultiplierProps> = ({ multiplier }) => {
const displayMultiplier = multiplier ? multiplier.toLowerCase() : '-'
const TranslateString = useI18n()
return (
<Container>
<MultiplierWrapper>{displayMultiplier}</MultiplierWrapper>
<Tooltip
content={
<div>
{TranslateString(999, 'The multiplier represents the amount of CAKE rewards each farm gets.')}
<br />
<br />
{TranslateString( | )}
</div>
}
>
<HelpIcon color="textSubtle" />
</Tooltip>
</Container>
)
}
export default Multiplier | 999,
'For example, if a 1x farm was getting 1 CAKE per block, a 40x farm would be getting 40 CAKE per block.', |
arraypad.py | """
The arraypad module contains a group of functions to pad values onto the edges
of an n-dimensional array.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
__all__ = ['pad']
###############################################################################
# Private utility functions.
def _arange_ndarray(arr, shape, axis, reverse=False):
"""
Create an ndarray of `shape` with increments along specified `axis`
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
shape : tuple of ints
Shape of desired array. Should be equivalent to `arr.shape` except
`shape[axis]` which may have any positive value.
axis : int
Axis to increment along.
reverse : bool
If False, increment in a positive fashion from 1 to `shape[axis]`,
inclusive. If True, the bounds are the same but the order reversed.
Returns
-------
padarr : ndarray
Output array sized to pad `arr` along `axis`, with linear range from
1 to `shape[axis]` along specified `axis`.
Notes
-----
The range is deliberately 1-indexed for this specific use case. Think of
this algorithm as broadcasting `np.arange` to a single `axis` of an
arbitrarily shaped ndarray.
"""
initshape = tuple(1 if i != axis else shape[axis]
for (i, x) in enumerate(arr.shape))
if not reverse:
padarr = np.arange(1, shape[axis] + 1)
else:
padarr = np.arange(shape[axis], 0, -1)
padarr = padarr.reshape(initshape)
for i, dim in enumerate(shape):
if padarr.shape[i] != dim:
padarr = padarr.repeat(dim, axis=i)
return padarr
def _round_ifneeded(arr, dtype):
"""
Rounds arr inplace if destination dtype is integer.
Parameters
----------
arr : ndarray
Input array.
dtype : dtype
The dtype of the destination array.
"""
if np.issubdtype(dtype, np.integer):
arr.round(out=arr)
def _prepend_const(arr, pad_amt, val, axis=-1):
"""
Prepend constant `val` along `axis` of `arr`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
val : scalar
Constant value to use. For best results should be of type `arr.dtype`;
if not `arr.dtype` will be cast to `arr.dtype`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` constant `val` prepended along `axis`.
"""
if pad_amt == 0:
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
if val == 0:
return np.concatenate((np.zeros(padshape, dtype=arr.dtype), arr),
axis=axis)
else:
return np.concatenate(((np.zeros(padshape) + val).astype(arr.dtype),
arr), axis=axis)
def _append_const(arr, pad_amt, val, axis=-1):
"""
Append constant `val` along `axis` of `arr`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
val : scalar
Constant value to use. For best results should be of type `arr.dtype`;
if not `arr.dtype` will be cast to `arr.dtype`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` constant `val` appended along `axis`.
"""
if pad_amt == 0:
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
if val == 0:
return np.concatenate((arr, np.zeros(padshape, dtype=arr.dtype)),
axis=axis)
else:
return np.concatenate(
(arr, (np.zeros(padshape) + val).astype(arr.dtype)), axis=axis)
def _prepend_edge(arr, pad_amt, axis=-1):
"""
Prepend `pad_amt` to `arr` along `axis` by extending edge values.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, extended by `pad_amt` edge values appended along `axis`.
"""
if pad_amt == 0:
return arr
edge_slice = tuple(slice(None) if i != axis else 0
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
edge_arr = arr[edge_slice].reshape(pad_singleton)
return np.concatenate((edge_arr.repeat(pad_amt, axis=axis), arr),
axis=axis)
def _append_edge(arr, pad_amt, axis=-1):
"""
Append `pad_amt` to `arr` along `axis` by extending edge values.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, extended by `pad_amt` edge values prepended along
`axis`.
"""
if pad_amt == 0:
return arr
edge_slice = tuple(slice(None) if i != axis else arr.shape[axis] - 1
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
edge_arr = arr[edge_slice].reshape(pad_singleton)
return np.concatenate((arr, edge_arr.repeat(pad_amt, axis=axis)),
axis=axis)
def _prepend_ramp(arr, pad_amt, end, axis=-1):
"""
Prepend linear ramp along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
end : scalar
Constal value to use. For best results should be of type `arr.dtype`;
if not `arr.dtype` will be cast to `arr.dtype`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values prepended along `axis`. The
prepended region ramps linearly from the edge value to `end`.
"""
if pad_amt == 0:
return arr
# Generate shape for final concatenated array
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
# Generate an n-dimensional array incrementing along `axis`
ramp_arr = _arange_ndarray(arr, padshape, axis,
reverse=True).astype(np.float64)
# Appropriate slicing to extract n-dimensional edge along `axis`
edge_slice = tuple(slice(None) if i != axis else 0
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract edge, reshape to original rank, and extend along `axis`
edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
# Linear ramp
slope = (end - edge_pad) / float(pad_amt)
ramp_arr = ramp_arr * slope
ramp_arr += edge_pad
_round_ifneeded(ramp_arr, arr.dtype)
# Ramp values will most likely be float, cast them to the same type as arr
return np.concatenate((ramp_arr.astype(arr.dtype), arr), axis=axis)
def _append_ramp(arr, pad_amt, end, axis=-1):
"""
Append linear ramp along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
end : scalar
Constal value to use. For best results should be of type `arr.dtype`;
if not `arr.dtype` will be cast to `arr.dtype`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region ramps linearly from the edge value to `end`.
"""
if pad_amt == 0:
return arr
# Generate shape for final concatenated array
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
# Generate an n-dimensional array incrementing along `axis`
ramp_arr = _arange_ndarray(arr, padshape, axis,
reverse=False).astype(np.float64)
# Slice a chunk from the edge to calculate stats on
edge_slice = tuple(slice(None) if i != axis else -1
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract edge, reshape to original rank, and extend along `axis`
edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
# Linear ramp
slope = (end - edge_pad) / float(pad_amt)
ramp_arr = ramp_arr * slope
ramp_arr += edge_pad
_round_ifneeded(ramp_arr, arr.dtype)
# Ramp values will most likely be float, cast them to the same type as arr
return np.concatenate((arr, ramp_arr.astype(arr.dtype)), axis=axis)
def _prepend_max(arr, pad_amt, num, axis=-1):
"""
Prepend `pad_amt` maximum values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
num : int
Depth into `arr` along `axis` to calculate maximum.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
prepended region is the maximum of the first `num` values along
`axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _prepend_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
max_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate max, reshape to add singleton dimension back
max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr),
axis=axis)
def _append_max(arr, pad_amt, num, axis=-1):
"""
Pad one `axis` of `arr` with the maximum of the last `num` elements.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
num : int
Depth into `arr` along `axis` to calculate maximum.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region is the maximum of the final `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _append_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
end = arr.shape[axis] - 1
if num is not None:
max_slice = tuple(
slice(None) if i != axis else slice(end, end - num, -1)
for (i, x) in enumerate(arr.shape))
else:
max_slice = tuple(slice(None) for x in arr.shape)
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate max, reshape to add singleton dimension back
max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)),
axis=axis)
def _prepend_mean(arr, pad_amt, num, axis=-1):
"""
Prepend `pad_amt` mean values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
num : int
Depth into `arr` along `axis` to calculate mean.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values prepended along `axis`. The
prepended region is the mean of the first `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _prepend_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
mean_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate mean, reshape to add singleton dimension back
mean_chunk = arr[mean_slice].mean(axis).reshape(pad_singleton)
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((mean_chunk.repeat(pad_amt, axis).astype(arr.dtype),
arr), axis=axis)
def _append_mean(arr, pad_amt, num, axis=-1):
"""
Append `pad_amt` mean values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
num : int
Depth into `arr` along `axis` to calculate mean.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region is the maximum of the final `num` values along `axis`.
"""
if pad_amt == 0:
|
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _append_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
end = arr.shape[axis] - 1
if num is not None:
mean_slice = tuple(
slice(None) if i != axis else slice(end, end - num, -1)
for (i, x) in enumerate(arr.shape))
else:
mean_slice = tuple(slice(None) for x in arr.shape)
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate mean, reshape to add singleton dimension back
mean_chunk = arr[mean_slice].mean(axis=axis).reshape(pad_singleton)
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
return np.concatenate(
(arr, mean_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
def _prepend_med(arr, pad_amt, num, axis=-1):
"""
Prepend `pad_amt` median values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
num : int
Depth into `arr` along `axis` to calculate median.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values prepended along `axis`. The
prepended region is the median of the first `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _prepend_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
med_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate median, reshape to add singleton dimension back
med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
return np.concatenate(
(med_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis)
def _append_med(arr, pad_amt, num, axis=-1):
"""
Append `pad_amt` median values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
num : int
Depth into `arr` along `axis` to calculate median.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region is the median of the final `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _append_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
end = arr.shape[axis] - 1
if num is not None:
med_slice = tuple(
slice(None) if i != axis else slice(end, end - num, -1)
for (i, x) in enumerate(arr.shape))
else:
med_slice = tuple(slice(None) for x in arr.shape)
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate median, reshape to add singleton dimension back
med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
return np.concatenate(
(arr, med_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
def _prepend_min(arr, pad_amt, num, axis=-1):
"""
Prepend `pad_amt` minimum values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
num : int
Depth into `arr` along `axis` to calculate minimum.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values prepended along `axis`. The
prepended region is the minimum of the first `num` values along
`axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _prepend_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
min_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate min, reshape to add singleton dimension back
min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr),
axis=axis)
def _append_min(arr, pad_amt, num, axis=-1):
"""
Append `pad_amt` median values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
num : int
Depth into `arr` along `axis` to calculate minimum.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region is the minimum of the final `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _append_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
end = arr.shape[axis] - 1
if num is not None:
min_slice = tuple(
slice(None) if i != axis else slice(end, end - num, -1)
for (i, x) in enumerate(arr.shape))
else:
min_slice = tuple(slice(None) for x in arr.shape)
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate min, reshape to add singleton dimension back
min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)),
axis=axis)
def _pad_ref(arr, pad_amt, method, axis=-1):
"""
Pad `axis` of `arr` by reflection.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : tuple of ints, length 2
Padding to (prepend, append) along `axis`.
method : str
Controls method of reflection; options are 'even' or 'odd'.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`
values appended along `axis`. Both regions are padded with reflected
values from the original array.
Notes
-----
This algorithm does not pad with repetition, i.e. the edges are not
repeated in the reflection. For that behavior, use `mode='symmetric'`.
The modes 'reflect', 'symmetric', and 'wrap' must be padded with a
single function, lest the indexing tricks in non-integer multiples of the
original shape would violate repetition in the final iteration.
"""
# Implicit booleanness to test for zero (or None) in any scalar type
if pad_amt[0] == 0 and pad_amt[1] == 0:
return arr
##########################################################################
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
ref_slice = tuple(slice(None) if i != axis else slice(pad_amt[0], 0, -1)
for (i, x) in enumerate(arr.shape))
ref_chunk1 = arr[ref_slice]
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
if pad_amt[0] == 1:
ref_chunk1 = ref_chunk1.reshape(pad_singleton)
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
edge_slice1 = tuple(slice(None) if i != axis else 0
for (i, x) in enumerate(arr.shape))
edge_chunk = arr[edge_slice1].reshape(pad_singleton)
ref_chunk1 = 2 * edge_chunk - ref_chunk1
del edge_chunk
##########################################################################
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
start = arr.shape[axis] - pad_amt[1] - 1
end = arr.shape[axis] - 1
ref_slice = tuple(slice(None) if i != axis else slice(start, end)
for (i, x) in enumerate(arr.shape))
rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
for (i, x) in enumerate(arr.shape))
ref_chunk2 = arr[ref_slice][rev_idx]
if pad_amt[1] == 1:
ref_chunk2 = ref_chunk2.reshape(pad_singleton)
if 'odd' in method:
edge_slice2 = tuple(slice(None) if i != axis else -1
for (i, x) in enumerate(arr.shape))
edge_chunk = arr[edge_slice2].reshape(pad_singleton)
ref_chunk2 = 2 * edge_chunk - ref_chunk2
del edge_chunk
# Concatenate `arr` with both chunks, extending along `axis`
return np.concatenate((ref_chunk1, arr, ref_chunk2), axis=axis)
def _pad_sym(arr, pad_amt, method, axis=-1):
"""
Pad `axis` of `arr` by symmetry.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : tuple of ints, length 2
Padding to (prepend, append) along `axis`.
method : str
Controls method of symmetry; options are 'even' or 'odd'.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`
values appended along `axis`. Both regions are padded with symmetric
values from the original array.
Notes
-----
This algorithm DOES pad with repetition, i.e. the edges are repeated.
For padding without repeated edges, use `mode='reflect'`.
The modes 'reflect', 'symmetric', and 'wrap' must be padded with a
single function, lest the indexing tricks in non-integer multiples of the
original shape would violate repetition in the final iteration.
"""
# Implicit booleanness to test for zero (or None) in any scalar type
if pad_amt[0] == 0 and pad_amt[1] == 0:
return arr
##########################################################################
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
sym_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[0])
for (i, x) in enumerate(arr.shape))
rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
for (i, x) in enumerate(arr.shape))
sym_chunk1 = arr[sym_slice][rev_idx]
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
if pad_amt[0] == 1:
sym_chunk1 = sym_chunk1.reshape(pad_singleton)
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
edge_slice1 = tuple(slice(None) if i != axis else 0
for (i, x) in enumerate(arr.shape))
edge_chunk = arr[edge_slice1].reshape(pad_singleton)
sym_chunk1 = 2 * edge_chunk - sym_chunk1
del edge_chunk
##########################################################################
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
start = arr.shape[axis] - pad_amt[1]
end = arr.shape[axis]
sym_slice = tuple(slice(None) if i != axis else slice(start, end)
for (i, x) in enumerate(arr.shape))
sym_chunk2 = arr[sym_slice][rev_idx]
if pad_amt[1] == 1:
sym_chunk2 = sym_chunk2.reshape(pad_singleton)
if 'odd' in method:
edge_slice2 = tuple(slice(None) if i != axis else -1
for (i, x) in enumerate(arr.shape))
edge_chunk = arr[edge_slice2].reshape(pad_singleton)
sym_chunk2 = 2 * edge_chunk - sym_chunk2
del edge_chunk
# Concatenate `arr` with both chunks, extending along `axis`
return np.concatenate((sym_chunk1, arr, sym_chunk2), axis=axis)
def _pad_wrap(arr, pad_amt, axis=-1):
"""
Pad `axis` of `arr` via wrapping.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : tuple of ints, length 2
Padding to (prepend, append) along `axis`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`
values appended along `axis`. Both regions are padded wrapped values
from the opposite end of `axis`.
Notes
-----
This method of padding is also known as 'tile' or 'tiling'.
The modes 'reflect', 'symmetric', and 'wrap' must be padded with a
single function, lest the indexing tricks in non-integer multiples of the
original shape would violate repetition in the final iteration.
"""
# Implicit booleanness to test for zero (or None) in any scalar type
if pad_amt[0] == 0 and pad_amt[1] == 0:
return arr
##########################################################################
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
start = arr.shape[axis] - pad_amt[0]
end = arr.shape[axis]
wrap_slice = tuple(slice(None) if i != axis else slice(start, end)
for (i, x) in enumerate(arr.shape))
wrap_chunk1 = arr[wrap_slice]
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
if pad_amt[0] == 1:
wrap_chunk1 = wrap_chunk1.reshape(pad_singleton)
##########################################################################
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
wrap_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[1])
for (i, x) in enumerate(arr.shape))
wrap_chunk2 = arr[wrap_slice]
if pad_amt[1] == 1:
wrap_chunk2 = wrap_chunk2.reshape(pad_singleton)
# Concatenate `arr` with both chunks, extending along `axis`
return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis)
def _normalize_shape(ndarray, shape, cast_to_int=True):
"""
Private function which does some checks and normalizes the possibly
much simpler representations of 'pad_width', 'stat_length',
'constant_values', 'end_values'.
Parameters
----------
narray : ndarray
Input ndarray
shape : {sequence, array_like, float, int}, optional
The width of padding (pad_width), the number of elements on the
edge of the narray used for statistics (stat_length), the constant
value(s) to use when filling padded regions (constant_values), or the
endpoint target(s) for linear ramps (end_values).
((before_1, after_1), ... (before_N, after_N)) unique number of
elements for each axis where `N` is rank of `narray`.
((before, after),) yields same before and after constants for each
axis.
(constant,) or val is a shortcut for before = after = constant for
all axes.
cast_to_int : bool, optional
Controls if values in ``shape`` will be rounded and cast to int
before being returned.
Returns
-------
normalized_shape : tuple of tuples
val => ((val, val), (val, val), ...)
[[val1, val2], [val3, val4], ...] => ((val1, val2), (val3, val4), ...)
((val1, val2), (val3, val4), ...) => no change
[[val1, val2], ] => ((val1, val2), (val1, val2), ...)
((val1, val2), ) => ((val1, val2), (val1, val2), ...)
[[val , ], ] => ((val, val), (val, val), ...)
((val , ), ) => ((val, val), (val, val), ...)
"""
ndims = ndarray.ndim
# Shortcut shape=None
if shape is None:
return ((None, None), ) * ndims
# Convert any input `info` to a NumPy array
arr = np.asarray(shape)
# Switch based on what input looks like
if arr.ndim <= 1:
if arr.shape == () or arr.shape == (1,):
# Single scalar input
# Create new array of ones, multiply by the scalar
arr = np.ones((ndims, 2), dtype=ndarray.dtype) * arr
elif arr.shape == (2,):
# Apply padding (before, after) each axis
# Create new axis 0, repeat along it for every axis
arr = arr[np.newaxis, :].repeat(ndims, axis=0)
else:
fmt = "Unable to create correctly shaped tuple from %s"
raise ValueError(fmt % (shape,))
elif arr.ndim == 2:
if arr.shape[1] == 1 and arr.shape[0] == ndims:
# Padded before and after by the same amount
arr = arr.repeat(2, axis=1)
elif arr.shape[0] == ndims:
# Input correctly formatted, pass it on as `arr`
arr = shape
else:
fmt = "Unable to create correctly shaped tuple from %s"
raise ValueError(fmt % (shape,))
else:
fmt = "Unable to create correctly shaped tuple from %s"
raise ValueError(fmt % (shape,))
# Cast if necessary
if cast_to_int is True:
arr = np.round(arr).astype(int)
# Convert list of lists to tuple of tuples
return tuple(tuple(axis) for axis in arr.tolist())
def _validate_lengths(narray, number_elements):
"""
Private function which does some checks and reformats pad_width and
stat_length using _normalize_shape.
Parameters
----------
narray : ndarray
Input ndarray
number_elements : {sequence, int}, optional
The width of padding (pad_width) or the number of elements on the edge
of the narray used for statistics (stat_length).
((before_1, after_1), ... (before_N, after_N)) unique number of
elements for each axis.
((before, after),) yields same before and after constants for each
axis.
(constant,) or int is a shortcut for before = after = constant for all
axes.
Returns
-------
_validate_lengths : tuple of tuples
int => ((int, int), (int, int), ...)
[[int1, int2], [int3, int4], ...] => ((int1, int2), (int3, int4), ...)
((int1, int2), (int3, int4), ...) => no change
[[int1, int2], ] => ((int1, int2), (int1, int2), ...)
((int1, int2), ) => ((int1, int2), (int1, int2), ...)
[[int , ], ] => ((int, int), (int, int), ...)
((int , ), ) => ((int, int), (int, int), ...)
"""
normshp = _normalize_shape(narray, number_elements)
for i in normshp:
chk = [1 if x is None else x for x in i]
chk = [1 if x >= 0 else -1 for x in chk]
if (chk[0] < 0) or (chk[1] < 0):
fmt = "%s cannot contain negative values."
raise ValueError(fmt % (number_elements,))
return normshp
###############################################################################
# Public functions
def pad(array, pad_width, mode=None, **kwargs):
"""
Pads an array.
Parameters
----------
array : array_like of rank N
Input array
pad_width : {sequence, array_like, int}
Number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths
for each axis.
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all
axes.
mode : str or function
One of the following string values or a user supplied function.
'constant'
Pads with a constant value.
'edge'
Pads with the edge values of array.
'linear_ramp'
Pads with the linear ramp between end_value and the
array edge value.
'maximum'
Pads with the maximum value of all or part of the
vector along each axis.
'mean'
Pads with the mean value of all or part of the
vector along each axis.
'median'
Pads with the median value of all or part of the
vector along each axis.
'minimum'
Pads with the minimum value of all or part of the
vector along each axis.
'reflect'
Pads with the reflection of the vector mirrored on
the first and last values of the vector along each
axis.
'symmetric'
Pads with the reflection of the vector mirrored
along the edge of the array.
'wrap'
Pads with the wrap of the vector along the axis.
The first values are used to pad the end and the
end values are used to pad the beginning.
<function>
Padding function, see Notes.
stat_length : sequence or int, optional
Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
values at edge of each axis used to calculate the statistic value.
((before_1, after_1), ... (before_N, after_N)) unique statistic
lengths for each axis.
((before, after),) yields same before and after statistic lengths
for each axis.
(stat_length,) or int is a shortcut for before = after = statistic
length for all axes.
Default is ``None``, to use the entire axis.
constant_values : sequence or int, optional
Used in 'constant'. The values to set the padded values for each
axis.
((before_1, after_1), ... (before_N, after_N)) unique pad constants
for each axis.
((before, after),) yields same before and after constants for each
axis.
(constant,) or int is a shortcut for before = after = constant for
all axes.
Default is 0.
end_values : sequence or int, optional
Used in 'linear_ramp'. The values used for the ending value of the
linear_ramp and that will form the edge of the padded array.
((before_1, after_1), ... (before_N, after_N)) unique end values
for each axis.
((before, after),) yields same before and after end values for each
axis.
(constant,) or int is a shortcut for before = after = end value for
all axes.
Default is 0.
reflect_type : {'even', 'odd'}, optional
Used in 'reflect', and 'symmetric'. The 'even' style is the
default with an unaltered reflection around the edge value. For
the 'odd' style, the extented part of the array is created by
subtracting the reflected values from two times the edge value.
Returns
-------
pad : ndarray
Padded array of rank equal to `array` with shape increased
according to `pad_width`.
Notes
-----
.. versionadded:: 1.7.0
For an array with rank greater than 1, some of the padding of later
axes is calculated from padding of previous axes. This is easiest to
think about with a rank 2 array where the corners of the padded array
are calculated by using padded values from the first axis.
The padding function, if used, should return a rank 1 array equal in
length to the vector argument with padded values replaced. It has the
following signature::
padding_func(vector, iaxis_pad_width, iaxis, **kwargs)
where
vector : ndarray
A rank 1 array already padded with zeros. Padded values are
vector[:pad_tuple[0]] and vector[-pad_tuple[1]:].
iaxis_pad_width : tuple
A 2-tuple of ints, iaxis_pad_width[0] represents the number of
values padded at the beginning of vector where
iaxis_pad_width[1] represents the number of values padded at
the end of vector.
iaxis : int
The axis currently being calculated.
kwargs : misc
Any keyword arguments the function requires.
Examples
--------
>>> a = [1, 2, 3, 4, 5]
>>> np.lib.pad(a, (2,3), 'constant', constant_values=(4, 6))
array([4, 4, 1, 2, 3, 4, 5, 6, 6, 6])
>>> np.lib.pad(a, (2, 3), 'edge')
array([1, 1, 1, 2, 3, 4, 5, 5, 5, 5])
>>> np.lib.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))
array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])
>>> np.lib.pad(a, (2,), 'maximum')
array([5, 5, 1, 2, 3, 4, 5, 5, 5])
>>> np.lib.pad(a, (2,), 'mean')
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
>>> np.lib.pad(a, (2,), 'median')
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
>>> a = [[1, 2], [3, 4]]
>>> np.lib.pad(a, ((3, 2), (2, 3)), 'minimum')
array([[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[3, 3, 3, 4, 3, 3, 3],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1]])
>>> a = [1, 2, 3, 4, 5]
>>> np.lib.pad(a, (2, 3), 'reflect')
array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])
>>> np.lib.pad(a, (2, 3), 'reflect', reflect_type='odd')
array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
>>> np.lib.pad(a, (2, 3), 'symmetric')
array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])
>>> np.lib.pad(a, (2, 3), 'symmetric', reflect_type='odd')
array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7])
>>> np.lib.pad(a, (2, 3), 'wrap')
array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])
>>> def padwithtens(vector, pad_width, iaxis, kwargs):
... vector[:pad_width[0]] = 10
... vector[-pad_width[1]:] = 10
... return vector
>>> a = np.arange(6)
>>> a = a.reshape((2, 3))
>>> np.lib.pad(a, 2, padwithtens)
array([[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]])
"""
if not np.asarray(pad_width).dtype.kind == 'i':
raise TypeError('`pad_width` must be of integral type.')
narray = np.array(array)
pad_width = _validate_lengths(narray, pad_width)
allowedkwargs = {
'constant': ['constant_values'],
'edge': [],
'linear_ramp': ['end_values'],
'maximum': ['stat_length'],
'mean': ['stat_length'],
'median': ['stat_length'],
'minimum': ['stat_length'],
'reflect': ['reflect_type'],
'symmetric': ['reflect_type'],
'wrap': [],
}
kwdefaults = {
'stat_length': None,
'constant_values': 0,
'end_values': 0,
'reflect_type': 'even',
}
if isinstance(mode, str):
# Make sure have allowed kwargs appropriate for mode
for key in kwargs:
if key not in allowedkwargs[mode]:
raise ValueError('%s keyword not in allowed keywords %s' %
(key, allowedkwargs[mode]))
# Set kwarg defaults
for kw in allowedkwargs[mode]:
kwargs.setdefault(kw, kwdefaults[kw])
# Need to only normalize particular keywords.
for i in kwargs:
if i == 'stat_length':
kwargs[i] = _validate_lengths(narray, kwargs[i])
if i in ['end_values', 'constant_values']:
kwargs[i] = _normalize_shape(narray, kwargs[i],
cast_to_int=False)
elif mode is None:
raise ValueError('Keyword "mode" must be a function or one of %s.' %
(list(allowedkwargs.keys()),))
else:
# Drop back to old, slower np.apply_along_axis mode for user-supplied
# vector function
function = mode
# Create a new padded array
rank = list(range(len(narray.shape)))
total_dim_increase = [np.sum(pad_width[i]) for i in rank]
offset_slices = [slice(pad_width[i][0],
pad_width[i][0] + narray.shape[i])
for i in rank]
new_shape = np.array(narray.shape) + total_dim_increase
newmat = np.zeros(new_shape, narray.dtype)
# Insert the original array into the padded array
newmat[offset_slices] = narray
# This is the core of pad ...
for iaxis in rank:
np.apply_along_axis(function,
iaxis,
newmat,
pad_width[iaxis],
iaxis,
kwargs)
return newmat
# If we get here, use new padding method
newmat = narray.copy()
# API preserved, but completely new algorithm which pads by building the
# entire block to pad before/after `arr` with in one step, for each axis.
if mode == 'constant':
for axis, ((pad_before, pad_after), (before_val, after_val)) \
in enumerate(zip(pad_width, kwargs['constant_values'])):
newmat = _prepend_const(newmat, pad_before, before_val, axis)
newmat = _append_const(newmat, pad_after, after_val, axis)
elif mode == 'edge':
for axis, (pad_before, pad_after) in enumerate(pad_width):
newmat = _prepend_edge(newmat, pad_before, axis)
newmat = _append_edge(newmat, pad_after, axis)
elif mode == 'linear_ramp':
for axis, ((pad_before, pad_after), (before_val, after_val)) \
in enumerate(zip(pad_width, kwargs['end_values'])):
newmat = _prepend_ramp(newmat, pad_before, before_val, axis)
newmat = _append_ramp(newmat, pad_after, after_val, axis)
elif mode == 'maximum':
for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
in enumerate(zip(pad_width, kwargs['stat_length'])):
newmat = _prepend_max(newmat, pad_before, chunk_before, axis)
newmat = _append_max(newmat, pad_after, chunk_after, axis)
elif mode == 'mean':
for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
in enumerate(zip(pad_width, kwargs['stat_length'])):
newmat = _prepend_mean(newmat, pad_before, chunk_before, axis)
newmat = _append_mean(newmat, pad_after, chunk_after, axis)
elif mode == 'median':
for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
in enumerate(zip(pad_width, kwargs['stat_length'])):
newmat = _prepend_med(newmat, pad_before, chunk_before, axis)
newmat = _append_med(newmat, pad_after, chunk_after, axis)
elif mode == 'minimum':
for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
in enumerate(zip(pad_width, kwargs['stat_length'])):
newmat = _prepend_min(newmat, pad_before, chunk_before, axis)
newmat = _append_min(newmat, pad_after, chunk_after, axis)
elif mode == 'reflect':
for axis, (pad_before, pad_after) in enumerate(pad_width):
# Recursive padding along any axis where `pad_amt` is too large
# for indexing tricks. We can only safely pad the original axis
# length, to keep the period of the reflections consistent.
if ((pad_before > 0) or
(pad_after > 0)) and newmat.shape[axis] == 1:
# Extending singleton dimension for 'reflect' is legacy
# behavior; it really should raise an error.
newmat = _prepend_edge(newmat, pad_before, axis)
newmat = _append_edge(newmat, pad_after, axis)
continue
method = kwargs['reflect_type']
safe_pad = newmat.shape[axis] - 1
while ((pad_before > safe_pad) or (pad_after > safe_pad)):
pad_iter_b = min(safe_pad,
safe_pad * (pad_before // safe_pad))
pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))
newmat = _pad_ref(newmat, (pad_iter_b,
pad_iter_a), method, axis)
pad_before -= pad_iter_b
pad_after -= pad_iter_a
safe_pad += pad_iter_b + pad_iter_a
newmat = _pad_ref(newmat, (pad_before, pad_after), method, axis)
elif mode == 'symmetric':
for axis, (pad_before, pad_after) in enumerate(pad_width):
# Recursive padding along any axis where `pad_amt` is too large
# for indexing tricks. We can only safely pad the original axis
# length, to keep the period of the reflections consistent.
method = kwargs['reflect_type']
safe_pad = newmat.shape[axis]
while ((pad_before > safe_pad) or
(pad_after > safe_pad)):
pad_iter_b = min(safe_pad,
safe_pad * (pad_before // safe_pad))
pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))
newmat = _pad_sym(newmat, (pad_iter_b,
pad_iter_a), method, axis)
pad_before -= pad_iter_b
pad_after -= pad_iter_a
safe_pad += pad_iter_b + pad_iter_a
newmat = _pad_sym(newmat, (pad_before, pad_after), method, axis)
elif mode == 'wrap':
for axis, (pad_before, pad_after) in enumerate(pad_width):
# Recursive padding along any axis where `pad_amt` is too large
# for indexing tricks. We can only safely pad the original axis
# length, to keep the period of the reflections consistent.
safe_pad = newmat.shape[axis]
while ((pad_before > safe_pad) or
(pad_after > safe_pad)):
pad_iter_b = min(safe_pad,
safe_pad * (pad_before // safe_pad))
pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))
newmat = _pad_wrap(newmat, (pad_iter_b, pad_iter_a), axis)
pad_before -= pad_iter_b
pad_after -= pad_iter_a
safe_pad += pad_iter_b + pad_iter_a
newmat = _pad_wrap(newmat, (pad_before, pad_after), axis)
return newmat
| return arr |
fs.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This module manages how the incremental compilation cache is represented in
//! the file system.
//!
//! Incremental compilation caches are managed according to a copy-on-write
//! strategy: Once a complete, consistent cache version is finalized, it is
//! never modified. Instead, when a subsequent compilation session is started,
//! the compiler will allocate a new version of the cache that starts out as
//! a copy of the previous version. Then only this new copy is modified and it
//! will not be visible to other processes until it is finalized. This ensures
//! that multiple compiler processes can be executed concurrently for the same
//! crate without interfering with each other or blocking each other.
//!
//! More concretely this is implemented via the following protocol:
//!
//! 1. For a newly started compilation session, the compiler allocates a
//! new `session` directory within the incremental compilation directory.
//! This session directory will have a unique name that ends with the suffix
//! "-working" and that contains a creation timestamp.
//! 2. Next, the compiler looks for the newest finalized session directory,
//! that is, a session directory from a previous compilation session that
//! has been marked as valid and consistent. A session directory is
//! considered finalized if the "-working" suffix in the directory name has
//! been replaced by the SVH of the crate.
//! 3. Once the compiler has found a valid, finalized session directory, it will
//! hard-link/copy its contents into the new "-working" directory. If all
//! goes well, it will have its own, private copy of the source directory and
//! subsequently not have to worry about synchronizing with other compiler
//! processes.
//! 4. Now the compiler can do its normal compilation process, which involves
//! reading and updating its private session directory.
//! 5. When compilation finishes without errors, the private session directory
//! will be in a state where it can be used as input for other compilation
//! sessions. That is, it will contain a dependency graph and cache artifacts
//! that are consistent with the state of the source code it was compiled
//! from, with no need to change them ever again. At this point, the compiler
//! finalizes and "publishes" its private session directory by renaming it
//! from "s-{timestamp}-{random}-working" to "s-{timestamp}-{SVH}".
//! 6. At this point the "old" session directory that we copied our data from
//! at the beginning of the session has become obsolete because we have just
//! published a more current version. Thus the compiler will delete it.
//!
//! ## Garbage Collection
//!
//! Naively following the above protocol might lead to old session directories
//! piling up if a compiler instance crashes for some reason before its able to
//! remove its private session directory. In order to avoid wasting disk space,
//! the compiler also does some garbage collection each time it is started in
//! incremental compilation mode. Specifically, it will scan the incremental
//! compilation directory for private session directories that are not in use
//! any more and will delete those. It will also delete any finalized session
//! directories for a given crate except for the most recent one.
//!
//! ## Synchronization
//!
//! There is some synchronization needed in order for the compiler to be able to
//! determine whether a given private session directory is not in used any more.
//! This is done by creating a lock file for each session directory and
//! locking it while the directory is still being used. Since file locks have
//! operating system support, we can rely on the lock being released if the
//! compiler process dies for some unexpected reason. Thus, when garbage
//! collecting private session directories, the collecting process can determine
//! whether the directory is still in use by trying to acquire a lock on the
//! file. If locking the file fails, the original process must still be alive.
//! If locking the file succeeds, we know that the owning process is not alive
//! any more and we can safely delete the directory.
//! There is still a small time window between the original process creating the
//! lock file and actually locking it. In order to minimize the chance that
//! another process tries to acquire the lock in just that instance, only
//! session directories that are older than a few seconds are considered for
//! garbage collection.
//!
//! Another case that has to be considered is what happens if one process
//! deletes a finalized session directory that another process is currently
//! trying to copy from. This case is also handled via the lock file. Before
//! a process starts copying a finalized session directory, it will acquire a
//! shared lock on the directory's lock file. Any garbage collecting process,
//! on the other hand, will acquire an exclusive lock on the lock file.
//! Thus, if a directory is being collected, any reader process will fail
//! acquiring the shared lock and will leave the directory alone. Conversely,
//! if a collecting process can't acquire the exclusive lock because the
//! directory is currently being read from, it will leave collecting that
//! directory to another process at a later point in time.
//! The exact same scheme is also used when reading the metadata hashes file
//! from an extern crate. When a crate is compiled, the hash values of its
//! metadata are stored in a file in its session directory. When the
//! compilation session of another crate imports the first crate's metadata,
//! it also has to read in the accompanying metadata hashes. It thus will access
//! the finalized session directory of all crates it links to and while doing
//! so, it will also place a read lock on that the respective session directory
//! so that it won't be deleted while the metadata hashes are loaded.
//!
//! ## Preconditions
//!
//! This system relies on two features being available in the file system in
//! order to work really well: file locking and hard linking.
//! If hard linking is not available (like on FAT) the data in the cache
//! actually has to be copied at the beginning of each session.
//! If file locking does not work reliably (like on NFS), some of the
//! synchronization will go haywire.
//! In both cases we recommend to locate the incremental compilation directory
//! on a file system that supports these things.
//! It might be a good idea though to try and detect whether we are on an
//! unsupported file system and emit a warning in that case. This is not yet
//! implemented.
use rustc::hir::svh::Svh;
use rustc::session::{Session, CrateDisambiguator};
use rustc::util::fs as fs_util;
use rustc_data_structures::{flock, base_n};
use rustc_data_structures::fx::{FxHashSet, FxHashMap};
use std::fs as std_fs;
use std::io;
use std::mem;
use std::path::{Path, PathBuf};
use std::time::{UNIX_EPOCH, SystemTime, Duration};
use rand::{thread_rng, Rng};
const LOCK_FILE_EXT: &'static str = ".lock";
const DEP_GRAPH_FILENAME: &'static str = "dep-graph.bin";
const WORK_PRODUCTS_FILENAME: &'static str = "work-products.bin";
const QUERY_CACHE_FILENAME: &'static str = "query-cache.bin";
// We encode integers using the following base, so they are shorter than decimal
// or hexadecimal numbers (we want short file and directory names). Since these
// numbers will be used in file names, we choose an encoding that is not
// case-sensitive (as opposed to base64, for example).
const INT_ENCODE_BASE: usize = base_n::CASE_INSENSITIVE;
pub fn dep_graph_path(sess: &Session) -> PathBuf {
in_incr_comp_dir_sess(sess, DEP_GRAPH_FILENAME)
}
pub fn dep_graph_path_from(incr_comp_session_dir: &Path) -> PathBuf {
in_incr_comp_dir(incr_comp_session_dir, DEP_GRAPH_FILENAME)
}
pub fn work_products_path(sess: &Session) -> PathBuf {
in_incr_comp_dir_sess(sess, WORK_PRODUCTS_FILENAME)
}
pub fn query_cache_path(sess: &Session) -> PathBuf {
in_incr_comp_dir_sess(sess, QUERY_CACHE_FILENAME)
}
pub fn lock_file_path(session_dir: &Path) -> PathBuf {
let crate_dir = session_dir.parent().unwrap();
let directory_name = session_dir.file_name().unwrap().to_string_lossy();
assert_no_characters_lost(&directory_name);
let dash_indices: Vec<_> = directory_name.match_indices("-")
.map(|(idx, _)| idx)
.collect();
if dash_indices.len() != 3 {
bug!("Encountered incremental compilation session directory with \
malformed name: {}",
session_dir.display())
}
crate_dir.join(&directory_name[0 .. dash_indices[2]])
.with_extension(&LOCK_FILE_EXT[1..])
}
pub fn in_incr_comp_dir_sess(sess: &Session, file_name: &str) -> PathBuf {
in_incr_comp_dir(&sess.incr_comp_session_dir(), file_name)
}
pub fn in_incr_comp_dir(incr_comp_session_dir: &Path, file_name: &str) -> PathBuf {
incr_comp_session_dir.join(file_name)
}
/// Allocates the private session directory. The boolean in the Ok() result
/// indicates whether we should try loading a dep graph from the successfully
/// initialized directory, or not.
/// The post-condition of this fn is that we have a valid incremental
/// compilation session directory, if the result is `Ok`. A valid session
/// directory is one that contains a locked lock file. It may or may not contain
/// a dep-graph and work products from a previous session.
/// If the call fails, the fn may leave behind an invalid session directory.
/// The garbage collection will take care of it.
pub fn prepare_session_directory(sess: &Session,
crate_name: &str,
crate_disambiguator: CrateDisambiguator) {
if sess.opts.incremental.is_none() {
return
}
debug!("prepare_session_directory");
// {incr-comp-dir}/{crate-name-and-disambiguator}
let crate_dir = crate_path(sess, crate_name, crate_disambiguator);
debug!("crate-dir: {}", crate_dir.display());
if create_dir(sess, &crate_dir, "crate").is_err() {
return
}
// Hack: canonicalize the path *after creating the directory*
// because, on windows, long paths can cause problems;
// canonicalization inserts this weird prefix that makes windows
// tolerate long paths.
let crate_dir = match crate_dir.canonicalize() {
Ok(v) => v,
Err(err) => {
sess.err(&format!("incremental compilation: error canonicalizing path `{}`: {}",
crate_dir.display(), err));
return
}
};
let mut source_directories_already_tried = FxHashSet();
loop {
// Generate a session directory of the form:
//
// {incr-comp-dir}/{crate-name-and-disambiguator}/s-{timestamp}-{random}-working
let session_dir = generate_session_dir_path(&crate_dir);
debug!("session-dir: {}", session_dir.display());
// Lock the new session directory. If this fails, return an
// error without retrying
let (directory_lock, lock_file_path) = match lock_directory(sess, &session_dir) {
Ok(e) => e,
Err(_) => return,
};
// Now that we have the lock, we can actually create the session
// directory
if create_dir(sess, &session_dir, "session").is_err() {
return
}
// Find a suitable source directory to copy from. Ignore those that we
// have already tried before.
let source_directory = find_source_directory(&crate_dir,
&source_directories_already_tried);
let source_directory = if let Some(dir) = source_directory {
dir
} else {
// There's nowhere to copy from, we're done
debug!("no source directory found. Continuing with empty session \
directory.");
sess.init_incr_comp_session(session_dir, directory_lock, false);
return
};
debug!("attempting to copy data from source: {}",
source_directory.display());
// Try copying over all files from the source directory
if let Ok(allows_links) = copy_files(sess,
&session_dir,
&source_directory) {
debug!("successfully copied data from: {}",
source_directory.display());
if !allows_links {
sess.warn(&format!("Hard linking files in the incremental \
compilation cache failed. Copying files \
instead. Consider moving the cache \
directory to a file system which supports \
hard linking in session dir `{}`",
session_dir.display())
);
}
sess.init_incr_comp_session(session_dir, directory_lock, true);
return
} else {
debug!("copying failed - trying next directory");
// Something went wrong while trying to copy/link files from the
// source directory. Try again with a different one.
source_directories_already_tried.insert(source_directory);
// Try to remove the session directory we just allocated. We don't
// know if there's any garbage in it from the failed copy action.
if let Err(err) = safe_remove_dir_all(&session_dir) {
sess.warn(&format!("Failed to delete partly initialized \
session dir `{}`: {}",
session_dir.display(),
err));
}
delete_session_dir_lock_file(sess, &lock_file_path);
mem::drop(directory_lock);
}
}
}
/// This function finalizes and thus 'publishes' the session directory by
/// renaming it to `s-{timestamp}-{svh}` and releasing the file lock.
/// If there have been compilation errors, however, this function will just
/// delete the presumably invalid session directory.
pub fn finalize_session_directory(sess: &Session, svh: Svh) {
if sess.opts.incremental.is_none() {
return;
}
let incr_comp_session_dir: PathBuf = sess.incr_comp_session_dir().clone();
if sess.has_errors() {
// If there have been any errors during compilation, we don't want to
// publish this session directory. Rather, we'll just delete it.
debug!("finalize_session_directory() - invalidating session directory: {}",
incr_comp_session_dir.display());
if let Err(err) = safe_remove_dir_all(&*incr_comp_session_dir) {
sess.warn(&format!("Error deleting incremental compilation \
session directory `{}`: {}",
incr_comp_session_dir.display(),
err));
}
let lock_file_path = lock_file_path(&*incr_comp_session_dir);
delete_session_dir_lock_file(sess, &lock_file_path);
sess.mark_incr_comp_session_as_invalid();
}
debug!("finalize_session_directory() - session directory: {}",
incr_comp_session_dir.display());
let old_sub_dir_name = incr_comp_session_dir.file_name()
.unwrap()
.to_string_lossy();
assert_no_characters_lost(&old_sub_dir_name);
// Keep the 's-{timestamp}-{random-number}' prefix, but replace the
// '-working' part with the SVH of the crate
let dash_indices: Vec<_> = old_sub_dir_name.match_indices("-")
.map(|(idx, _)| idx)
.collect();
if dash_indices.len() != 3 {
bug!("Encountered incremental compilation session directory with \
malformed name: {}",
incr_comp_session_dir.display())
}
// State: "s-{timestamp}-{random-number}-"
let mut new_sub_dir_name = String::from(&old_sub_dir_name[.. dash_indices[2] + 1]);
// Append the svh
base_n::push_str(svh.as_u64() as u128, INT_ENCODE_BASE, &mut new_sub_dir_name);
// Create the full path
let new_path = incr_comp_session_dir.parent().unwrap().join(new_sub_dir_name);
debug!("finalize_session_directory() - new path: {}", new_path.display());
match std_fs::rename(&*incr_comp_session_dir, &new_path) {
Ok(_) => {
debug!("finalize_session_directory() - directory renamed successfully");
// This unlocks the directory
sess.finalize_incr_comp_session(new_path);
}
Err(e) => {
// Warn about the error. However, no need to abort compilation now.
sess.warn(&format!("Error finalizing incremental compilation \
session directory `{}`: {}",
incr_comp_session_dir.display(),
e));
debug!("finalize_session_directory() - error, marking as invalid");
// Drop the file lock, so we can garage collect
sess.mark_incr_comp_session_as_invalid();
}
}
let _ = garbage_collect_session_directories(sess);
}
pub fn delete_all_session_dir_contents(sess: &Session) -> io::Result<()> {
let sess_dir_iterator = sess.incr_comp_session_dir().read_dir()?;
for entry in sess_dir_iterator {
let entry = entry?;
safe_remove_file(&entry.path())?
}
Ok(())
}
fn copy_files(sess: &Session,
target_dir: &Path,
source_dir: &Path)
-> Result<bool, ()> {
// We acquire a shared lock on the lock file of the directory, so that
// nobody deletes it out from under us while we are reading from it.
let lock_file_path = lock_file_path(source_dir);
let _lock = if let Ok(lock) = flock::Lock::new(&lock_file_path,
false, // don't wait,
false, // don't create
false) { // not exclusive
lock
} else {
// Could not acquire the lock, don't try to copy from here
return Err(())
};
let source_dir_iterator = match source_dir.read_dir() {
Ok(it) => it,
Err(_) => return Err(())
};
let mut files_linked = 0;
let mut files_copied = 0;
for entry in source_dir_iterator {
match entry {
Ok(entry) => {
let file_name = entry.file_name();
let target_file_path = target_dir.join(file_name);
let source_path = entry.path();
debug!("copying into session dir: {}", source_path.display());
match fs_util::link_or_copy(source_path, target_file_path) {
Ok(fs_util::LinkOrCopy::Link) => {
files_linked += 1
}
Ok(fs_util::LinkOrCopy::Copy) => {
files_copied += 1
}
Err(_) => return Err(())
}
}
Err(_) => {
return Err(())
}
}
}
if sess.opts.debugging_opts.incremental_info {
println!("[incremental] session directory: \
{} files hard-linked", files_linked);
println!("[incremental] session directory: \
{} files copied", files_copied);
}
Ok(files_linked > 0 || files_copied == 0)
}
/// Generate unique directory path of the form:
/// {crate_dir}/s-{timestamp}-{random-number}-working
fn generate_session_dir_path(crate_dir: &Path) -> PathBuf {
let timestamp = timestamp_to_string(SystemTime::now());
debug!("generate_session_dir_path: timestamp = {}", timestamp);
let random_number = thread_rng().next_u32();
debug!("generate_session_dir_path: random_number = {}", random_number);
let directory_name = format!("s-{}-{}-working",
timestamp,
base_n::encode(random_number as u128,
INT_ENCODE_BASE));
debug!("generate_session_dir_path: directory_name = {}", directory_name);
let directory_path = crate_dir.join(directory_name);
debug!("generate_session_dir_path: directory_path = {}", directory_path.display());
directory_path
}
fn create_dir(sess: &Session, path: &Path, dir_tag: &str) -> Result<(),()> {
match std_fs::create_dir_all(path) {
Ok(()) => {
debug!("{} directory created successfully", dir_tag);
Ok(())
}
Err(err) => {
sess.err(&format!("Could not create incremental compilation {} \
directory `{}`: {}",
dir_tag,
path.display(),
err));
Err(())
}
}
}
/// Allocate a the lock-file and lock it.
fn lock_directory(sess: &Session,
session_dir: &Path)
-> Result<(flock::Lock, PathBuf), ()> {
let lock_file_path = lock_file_path(session_dir);
debug!("lock_directory() - lock_file: {}", lock_file_path.display());
match flock::Lock::new(&lock_file_path,
false, // don't wait
true, // create the lock file
true) { // the lock should be exclusive
Ok(lock) => Ok((lock, lock_file_path)),
Err(err) => {
sess.err(&format!("incremental compilation: could not create \
session directory lock file: {}", err));
Err(())
}
}
}
fn delete_session_dir_lock_file(sess: &Session,
lock_file_path: &Path) {
if let Err(err) = safe_remove_file(&lock_file_path) {
sess.warn(&format!("Error deleting lock file for incremental \
compilation session directory `{}`: {}",
lock_file_path.display(),
err));
}
}
/// Find the most recent published session directory that is not in the
/// ignore-list.
fn find_source_directory(crate_dir: &Path,
source_directories_already_tried: &FxHashSet<PathBuf>)
-> Option<PathBuf> {
let iter = crate_dir.read_dir()
.unwrap() // FIXME
.filter_map(|e| e.ok().map(|e| e.path()));
find_source_directory_in_iter(iter, source_directories_already_tried)
}
fn find_source_directory_in_iter<I>(iter: I,
source_directories_already_tried: &FxHashSet<PathBuf>)
-> Option<PathBuf>
where I: Iterator<Item=PathBuf>
{
let mut best_candidate = (UNIX_EPOCH, None);
for session_dir in iter {
debug!("find_source_directory_in_iter - inspecting `{}`",
session_dir.display());
let directory_name = session_dir.file_name().unwrap().to_string_lossy();
assert_no_characters_lost(&directory_name);
if source_directories_already_tried.contains(&session_dir) ||
!is_session_directory(&directory_name) ||
!is_finalized(&directory_name) {
debug!("find_source_directory_in_iter - ignoring.");
continue
}
let timestamp = extract_timestamp_from_session_dir(&directory_name)
.unwrap_or_else(|_| {
bug!("unexpected incr-comp session dir: {}", session_dir.display())
});
if timestamp > best_candidate.0 {
best_candidate = (timestamp, Some(session_dir.clone()));
}
}
best_candidate.1
}
fn is_finalized(directory_name: &str) -> bool {
!directory_name.ends_with("-working")
}
fn is_session_directory(directory_name: &str) -> bool {
directory_name.starts_with("s-") &&
!directory_name.ends_with(LOCK_FILE_EXT)
}
fn is_session_directory_lock_file(file_name: &str) -> bool {
file_name.starts_with("s-") && file_name.ends_with(LOCK_FILE_EXT)
}
fn extract_timestamp_from_session_dir(directory_name: &str)
-> Result<SystemTime, ()> {
if !is_session_directory(directory_name) {
return Err(())
}
let dash_indices: Vec<_> = directory_name.match_indices("-")
.map(|(idx, _)| idx)
.collect();
if dash_indices.len() != 3 {
return Err(())
}
string_to_timestamp(&directory_name[dash_indices[0]+1 .. dash_indices[1]])
}
fn timestamp_to_string(timestamp: SystemTime) -> String {
let duration = timestamp.duration_since(UNIX_EPOCH).unwrap();
let micros = duration.as_secs() * 1_000_000 +
(duration.subsec_nanos() as u64) / 1000;
base_n::encode(micros as u128, INT_ENCODE_BASE)
}
fn string_to_timestamp(s: &str) -> Result<SystemTime, ()> {
let micros_since_unix_epoch = u64::from_str_radix(s, INT_ENCODE_BASE as u32);
if micros_since_unix_epoch.is_err() {
return Err(())
}
let micros_since_unix_epoch = micros_since_unix_epoch.unwrap();
let duration = Duration::new(micros_since_unix_epoch / 1_000_000,
1000 * (micros_since_unix_epoch % 1_000_000) as u32);
Ok(UNIX_EPOCH + duration)
}
fn crate_path(sess: &Session,
crate_name: &str,
crate_disambiguator: CrateDisambiguator)
-> PathBuf {
let incr_dir = sess.opts.incremental.as_ref().unwrap().clone();
// The full crate disambiguator is really long. 64 bits of it should be
// sufficient.
let crate_disambiguator = crate_disambiguator.to_fingerprint().to_smaller_hash();
let crate_disambiguator = base_n::encode(crate_disambiguator as u128,
INT_ENCODE_BASE);
let crate_name = format!("{}-{}", crate_name, crate_disambiguator);
incr_dir.join(crate_name)
}
fn assert_no_characters_lost(s: &str) {
if s.contains('\u{FFFD}') {
bug!("Could not losslessly convert '{}'.", s)
}
}
fn is_old_enough_to_be_collected(timestamp: SystemTime) -> bool {
timestamp < SystemTime::now() - Duration::from_secs(10)
}
pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
debug!("garbage_collect_session_directories() - begin");
let session_directory = sess.incr_comp_session_dir();
debug!("garbage_collect_session_directories() - session directory: {}",
session_directory.display());
let crate_directory = session_directory.parent().unwrap();
debug!("garbage_collect_session_directories() - crate directory: {}",
crate_directory.display());
// First do a pass over the crate directory, collecting lock files and
// session directories
let mut session_directories = FxHashSet();
let mut lock_files = FxHashSet();
for dir_entry in try!(crate_directory.read_dir()) {
let dir_entry = match dir_entry {
Ok(dir_entry) => dir_entry,
_ => {
// Ignore any errors
continue
}
};
let entry_name = dir_entry.file_name();
let entry_name = entry_name.to_string_lossy();
if is_session_directory_lock_file(&entry_name) {
assert_no_characters_lost(&entry_name);
lock_files.insert(entry_name.into_owned());
} else if is_session_directory(&entry_name) {
assert_no_characters_lost(&entry_name);
session_directories.insert(entry_name.into_owned());
} else {
// This is something we don't know, leave it alone
}
}
// Now map from lock files to session directories
let lock_file_to_session_dir: FxHashMap<String, Option<String>> =
lock_files.into_iter()
.map(|lock_file_name| {
assert!(lock_file_name.ends_with(LOCK_FILE_EXT));
let dir_prefix_end = lock_file_name.len() - LOCK_FILE_EXT.len();
let session_dir = {
let dir_prefix = &lock_file_name[0 .. dir_prefix_end];
session_directories.iter()
.find(|dir_name| dir_name.starts_with(dir_prefix))
};
(lock_file_name, session_dir.map(String::clone))
})
.collect();
// Delete all lock files, that don't have an associated directory. They must
// be some kind of leftover
for (lock_file_name, directory_name) in &lock_file_to_session_dir {
if directory_name.is_none() {
let timestamp = match extract_timestamp_from_session_dir(lock_file_name) {
Ok(timestamp) => timestamp,
Err(()) => {
debug!("Found lock-file with malformed timestamp: {}",
crate_directory.join(&lock_file_name).display());
// Ignore it
continue
}
};
let lock_file_path = crate_directory.join(&**lock_file_name);
if is_old_enough_to_be_collected(timestamp) {
debug!("garbage_collect_session_directories() - deleting \
garbage lock file: {}", lock_file_path.display());
delete_session_dir_lock_file(sess, &lock_file_path);
} else {
debug!("garbage_collect_session_directories() - lock file with \
no session dir not old enough to be collected: {}",
lock_file_path.display());
}
}
}
// Filter out `None` directories
let lock_file_to_session_dir: FxHashMap<String, String> =
lock_file_to_session_dir.into_iter()
.filter_map(|(lock_file_name, directory_name)| {
directory_name.map(|n| (lock_file_name, n))
})
.collect();
// Delete all session directories that don't have a lock file.
for directory_name in session_directories {
if !lock_file_to_session_dir.values().any(|dir| *dir == directory_name) {
let path = crate_directory.join(directory_name);
if let Err(err) = safe_remove_dir_all(&path) {
sess.warn(&format!("Failed to garbage collect invalid incremental \
compilation session directory `{}`: {}",
path.display(),
err));
}
}
}
// Now garbage collect the valid session directories.
let mut deletion_candidates = vec![];
let mut definitely_delete = vec![];
for (lock_file_name, directory_name) in &lock_file_to_session_dir {
debug!("garbage_collect_session_directories() - inspecting: {}",
directory_name);
let timestamp = match extract_timestamp_from_session_dir(directory_name) {
Ok(timestamp) => timestamp,
Err(()) => {
debug!("Found session-dir with malformed timestamp: {}",
crate_directory.join(directory_name).display());
// Ignore it
continue
}
};
if is_finalized(directory_name) {
let lock_file_path = crate_directory.join(lock_file_name);
match flock::Lock::new(&lock_file_path,
false, // don't wait
false, // don't create the lock-file
true) { // get an exclusive lock
Ok(lock) => {
debug!("garbage_collect_session_directories() - \
successfully acquired lock");
debug!("garbage_collect_session_directories() - adding \
deletion candidate: {}", directory_name);
// Note that we are holding on to the lock
deletion_candidates.push((timestamp,
crate_directory.join(directory_name),
Some(lock)));
}
Err(_) => {
debug!("garbage_collect_session_directories() - \
not collecting, still in use");
}
}
} else if is_old_enough_to_be_collected(timestamp) {
// When cleaning out "-working" session directories, i.e.
// session directories that might still be in use by another
// compiler instance, we only look a directories that are
// at least ten seconds old. This is supposed to reduce the
// chance of deleting a directory in the time window where
// the process has allocated the directory but has not yet
// acquired the file-lock on it.
// Try to acquire the directory lock. If we can't, it
// means that the owning process is still alive and we
// leave this directory alone.
let lock_file_path = crate_directory.join(lock_file_name);
match flock::Lock::new(&lock_file_path,
false, // don't wait
false, // don't create the lock-file
true) { // get an exclusive lock
Ok(lock) => {
debug!("garbage_collect_session_directories() - \
successfully acquired lock");
// Note that we are holding on to the lock
definitely_delete.push((crate_directory.join(directory_name),
Some(lock)));
}
Err(_) => {
debug!("garbage_collect_session_directories() - \
not collecting, still in use");
}
}
} else {
debug!("garbage_collect_session_directories() - not finalized, not \
old enough");
}
}
// Delete all but the most recent of the candidates
for (path, lock) in all_except_most_recent(deletion_candidates) {
debug!("garbage_collect_session_directories() - deleting `{}`",
path.display());
if let Err(err) = safe_remove_dir_all(&path) {
sess.warn(&format!("Failed to garbage collect finalized incremental \
compilation session directory `{}`: {}",
path.display(),
err));
} else {
delete_session_dir_lock_file(sess, &lock_file_path(&path));
}
// Let's make it explicit that the file lock is released at this point,
// or rather, that we held on to it until here
mem::drop(lock);
}
for (path, lock) in definitely_delete {
debug!("garbage_collect_session_directories() - deleting `{}`",
path.display());
if let Err(err) = safe_remove_dir_all(&path) {
sess.warn(&format!("Failed to garbage collect incremental \
compilation session directory `{}`: {}",
path.display(),
err));
} else {
delete_session_dir_lock_file(sess, &lock_file_path(&path));
}
// Let's make it explicit that the file lock is released at this point,
// or rather, that we held on to it until here
mem::drop(lock);
}
Ok(())
}
fn all_except_most_recent(deletion_candidates: Vec<(SystemTime, PathBuf, Option<flock::Lock>)>)
-> FxHashMap<PathBuf, Option<flock::Lock>> {
let most_recent = deletion_candidates.iter()
.map(|&(timestamp, ..)| timestamp)
.max();
if let Some(most_recent) = most_recent {
deletion_candidates.into_iter()
.filter(|&(timestamp, ..)| timestamp != most_recent)
.map(|(_, path, lock)| (path, lock))
.collect()
} else {
FxHashMap()
}
}
/// Since paths of artifacts within session directories can get quite long, we
/// need to support deleting files with very long paths. The regular
/// WinApi functions only support paths up to 260 characters, however. In order
/// to circumvent this limitation, we canonicalize the path of the directory
/// before passing it to std::fs::remove_dir_all(). This will convert the path
/// into the '\\?\' format, which supports much longer paths.
fn safe_remove_dir_all(p: &Path) -> io::Result<()> {
if p.exists() {
let canonicalized = try!(p.canonicalize());
std_fs::remove_dir_all(canonicalized)
} else {
Ok(())
}
}
fn safe_remove_file(p: &Path) -> io::Result<()> {
if p.exists() {
let canonicalized = try!(p.canonicalize());
std_fs::remove_file(canonicalized)
} else {
Ok(())
}
}
#[test]
fn test_all_except_most_recent() |
#[test]
fn test_timestamp_serialization() {
for i in 0 .. 1_000u64 {
let time = UNIX_EPOCH + Duration::new(i * 1_434_578, (i as u32) * 239_000);
let s = timestamp_to_string(time);
assert_eq!(Ok(time), string_to_timestamp(&s));
}
}
#[test]
fn test_find_source_directory_in_iter() {
let already_visited = FxHashSet();
// Find newest
assert_eq!(find_source_directory_in_iter(
vec![PathBuf::from("crate-dir/s-3234-0000-svh"),
PathBuf::from("crate-dir/s-2234-0000-svh"),
PathBuf::from("crate-dir/s-1234-0000-svh")].into_iter(), &already_visited),
Some(PathBuf::from("crate-dir/s-3234-0000-svh")));
// Filter out "-working"
assert_eq!(find_source_directory_in_iter(
vec![PathBuf::from("crate-dir/s-3234-0000-working"),
PathBuf::from("crate-dir/s-2234-0000-svh"),
PathBuf::from("crate-dir/s-1234-0000-svh")].into_iter(), &already_visited),
Some(PathBuf::from("crate-dir/s-2234-0000-svh")));
// Handle empty
assert_eq!(find_source_directory_in_iter(vec![].into_iter(), &already_visited),
None);
// Handle only working
assert_eq!(find_source_directory_in_iter(
vec![PathBuf::from("crate-dir/s-3234-0000-working"),
PathBuf::from("crate-dir/s-2234-0000-working"),
PathBuf::from("crate-dir/s-1234-0000-working")].into_iter(), &already_visited),
None);
}
| {
assert_eq!(all_except_most_recent(
vec![
(UNIX_EPOCH + Duration::new(4, 0), PathBuf::from("4"), None),
(UNIX_EPOCH + Duration::new(1, 0), PathBuf::from("1"), None),
(UNIX_EPOCH + Duration::new(5, 0), PathBuf::from("5"), None),
(UNIX_EPOCH + Duration::new(3, 0), PathBuf::from("3"), None),
(UNIX_EPOCH + Duration::new(2, 0), PathBuf::from("2"), None),
]).keys().cloned().collect::<FxHashSet<PathBuf>>(),
vec![
PathBuf::from("1"),
PathBuf::from("2"),
PathBuf::from("3"),
PathBuf::from("4"),
].into_iter().collect::<FxHashSet<PathBuf>>()
);
assert_eq!(all_except_most_recent(
vec![
]).keys().cloned().collect::<FxHashSet<PathBuf>>(),
FxHashSet()
);
} |
apps.py | from django.apps import AppConfig
class TimelineappConfig(AppConfig):
| name = 'timelineApp' |
|
test_dnsx.py | from habu.lib import dnsx
def | ():
assert 'aspmx.l.google.com.' in dnsx.mx('google.com')
def test_ns():
assert 'ns1.google.com.' in dnsx.ns('google.com')
def test_axfr_fail():
assert not dnsx.axfr('google.com')
def test_axfr_success():
assert dnsx.axfr('zonetransfer.me')
| test_mx |
host.py | import socket
import time
# import logging
import tornado.ioloop
import tornado.iostream
from tornado import gen
from . import constants
from . import exceptions
class Host(object):
def __init__(self, host, conn, debug=0):
|
def _ensure_connection(self):
if self.sock:
return self
remaining = constants.SOCKET_TIMEOUT
last_error = None
for family, socktype, proto, _, addr in socket.getaddrinfo(
self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM
):
if not remaining:
self.mark_dead('connect: no time left')
return None
try:
s = socket.socket(family, socktype, proto)
s.settimeout(remaining)
start = time.time()
s.connect(addr)
break
except socket.timeout as msg:
self.mark_dead('connect: {}'.format(msg))
return None
except socket.error as msg:
if isinstance(msg, tuple):
msg = msg[1]
last_error = msg
s.close()
duration = time.time() - start
remaining = max(remaining - duration, 0)
else:
# if we never broke out of the getaddr loop
self.mark_dead('connect: {}'.format(last_error))
return None
self.sock = s
self.stream = tornado.iostream.IOStream(s)
self.stream.debug = True
return self
def _check_dead(self):
if self.deaduntil and self.deaduntil > time.time():
return 1
self.deaduntil = 0
return 0
def mark_dead(self, reason):
self.disconect_reason = str(reason)
self.deaduntil = time.time() + self.dead_retry
if self.flush_on_reconnect:
self.flush_on_next_connect = 1
self.close_socket()
def close_socket(self):
if self.sock:
self.stream.close()
self.sock.close()
self.sock = None
def raise_if_disconnect(self):
if hasattr(self, 'disconect_reason'):
raise exceptions.ConnectionDeadError(
'socket host "{}" port "{}" disconected because "{}"'.format(
self.host,
self.port,
self.disconect_reason
)
)
@gen.coroutine
def send_cmd(self, cmd, noreply=False, stream=False):
if self._ensure_connection() is None:
self.raise_if_disconnect()
cmd = cmd + "\r\n".encode()
if stream:
yield self.stream.write(cmd)
raise gen.Return(self.stream)
elif self.stream:
yield self.stream.write(cmd)
if not noreply and self.stream:
response = yield self.stream.read_until(b'\r\n')
raise gen.Return(response[:-2])
self.raise_if_disconnect()
| self.debug = debug
self.host = host
self.port = 11211
self.flush_on_reconnect = 1
self.stream = None
self.flush_on_next_connect = 0
self.dead_retry = constants.DEAD_RETRY
self.deaduntil = 0
if ":" in self.host:
parts = self.host.rsplit(":", 1)
self.host = parts[0]
self.port = int(parts[1])
if self.host.startswith('[') and self.host.endswith(']'):
self.host = self.host[1:-1]
self.sock = None |
state_action_network.py | import abc
import tensorflow as tf
from railrl.core.neuralnet import NeuralNetwork
from rllab.misc.overrides import overrides
class StateActionNetwork(NeuralNetwork, metaclass=abc.ABCMeta):
"""
A map from (state, action) to a vector
"""
def __init__(
self,
name_or_scope,
output_dim,
env_spec=None,
action_dim=None,
observation_dim=None,
action_input=None,
observation_input=None,
**kwargs
):
"""
Create a state-action network.
:param name_or_scope: a string or VariableScope
:param output_dim: int, output dimension of this network
:param env_spec: env spec for an Environment
:param action_dim: int, action dimension
:param observation_input: tf.Tensor, observation input. If None,
a placeholder of shape [None, observation dim] will be made
:param action_input: tf.Tensor, observation input. If None,
a placeholder of shape [None, action dim] will be made
:param kwargs: kwargs to be passed to super
"""
self.setup_serialization(locals())
super(StateActionNetwork, self).__init__(name_or_scope, **kwargs)
self.output_dim = output_dim
assert env_spec or (action_dim and observation_dim)
if action_dim is None:
self.action_dim = env_spec.action_space.flat_dim
else:
self.action_dim = action_dim
if observation_dim is None:
|
else:
self.observation_dim = observation_dim
with tf.variable_scope(self.scope_name):
if action_input is None:
action_input = tf.placeholder(
tf.float32,
[None, self.action_dim],
"_actions")
if observation_input is None:
if hasattr(self.observation_dim, '__len__'):
observation_input = tf.placeholder(
tf.float32,
[None] + list(self.observation_dim),
"_observation")
else:
observation_input = tf.placeholder(
tf.float32,
[None ,self.observation_dim],
"_observation")
self.action_input = action_input
self.observation_input = observation_input
self._create_network(observation_input=observation_input,
action_input=action_input)
@property
@overrides
def _input_name_to_values(self):
return dict(
observation_input=self.observation_input,
action_input=self.action_input,
)
# TODO(vpong): make it so that the inputs get automatically processed
| self.observation_dim = env_spec.observation_space.flat_dim |
@@match.ts | import {OrdinaryDefineOwnProperty} from "../../../abstract-operation/ordinary-define-own-property";
import {NATIVE_SYMBOL_MATCH} from "../../../symbol/native/native";
import {regExpPrototypeSymbolMatch} from "../../../reg-exp/prototype/@@match";
export function patchRegExpPrototypeSymbolMatch(): void {
// RegExp.prototype.@@match
OrdinaryDefineOwnProperty(RegExp.prototype, Symbol.match, { | "[[Writable]]": false,
"[[Enumerable]]": false,
"[[Configurable]]": true,
"[[Value]]": regExpPrototypeSymbolMatch()
});
if (NATIVE_SYMBOL_MATCH !== undefined) {
OrdinaryDefineOwnProperty(RegExp.prototype, NATIVE_SYMBOL_MATCH, {
// This property has the attributes { [[Writable]]: false, [[Enumerable]]: false, [[Configurable]]: true }.
"[[Writable]]": false,
"[[Enumerable]]": false,
"[[Configurable]]": true,
"[[Value]]": regExpPrototypeSymbolMatch()
});
}
} | // This property has the attributes { [[Writable]]: false, [[Enumerable]]: false, [[Configurable]]: true }. |
postCard.js | import React from "react"
import { Link } from "gatsby"
import Img from "gatsby-image"
export default props => (
<div
className={`post-card ${props.node.thumbnail ? `with-image` : `no-image`}`}
style={
props.node.thumbnail && {
backgroundImage: `url(${props.node.thumbnail.fluid.src})`,
}
}
>
<Link to={props.node.slug} className="post-card-link">
<Img fluid={props.node.thumbnail.fluid} />
<div className="post-card-content">
<h2 className="post-card-title">
{props.node.title || props.node.slug}
</h2>
</div>
</Link> | </div>
) |
|
Handling.py | from Util import *
def | (s):
s.throttle = curve1((s.y - .63 * s.brakes * s.pyv / ((1 - s.pyv / 2300) * 3 + 1)) / 999)
s.steer = curve1(Range180(s.a - s.av / 55, 1))
s.pitch = regress(-s.i - s.iv / 17)
s.yaw = regress(Range180(s.a - s.av / 12, 1))
s.roll = regress(Range180(- s.r + s.rv / 22, 1)) * (abs(s.a) < .15)
s.boost = s.throttle > .5 and abs(s.a) < .12 and (
s.poG or abs(s.i) < .2) and abs(s.y) > 99 and s.pyv < 2260
s.powerslide = s.jump = 0
# general powerslide
if s.throttle * s.pyv >= 0 and s.av * s.steer >= 0 and s.pxv * s.steer >= 0 and (
# sliding
(ang_dif(s.a, s.pva, 1) < .15 and .05 < abs(s.a) < .95) or (
# turning
s.pL[2] < 99 and .24 < abs(s.a) < .76 and s.a * ang_dif(s.a, s.av / 7, 1)) or (
# landing
s.gtime < .05 and ang_dif(s.a, s.pva, 1) < .25 and not s.kickoff)):
s.powerslide = 1
# turn 180°
if s.d2 > 400 and abs(s.a + s.av / 2.25) > 0.45:
if abs(s.a) > 0.98:
s.steer = 1
if s.d2 > 600 and s.pyv < -90:
if (abs(s.a) < 0.98 and abs(s.av) > 0.5 and
ang_dif(s.a, s.pva, 1) < .25):
s.powerslide = 1
s.steer = -sign(s.steer)
elif s.d2 > 800 and abs(s.a) < 0.95 and s.pyv < 1000:
s.throttle = 1
# three point turn
if (s.poG and 20 < abs(s.x) < 400 and abs(s.y) < 200 and .35 < abs(s.a) < .65
and abs(s.pyv) < 550 and abs(s.yv) < 550):
s.throttle = -sign(s.throttle)
s.steer = -sign(s.steer)
# general jump
if (s.z > 140 and s.tojump and (
# flying jump
(s.z < (200 * s.jcount + s.pB / 2) * s.dT * 2 and s.d2pv < 99)
or # directly below the ball
(s.z < s.jcount * 250 + s.pB * 10 and s.d2pv < 100 and s.vd2 < 150))):
s.jumper = 1
# jumping off walls
if ((s.z > 1350 or ((s.d < s.z * 1.5 or s.vd < 400) and s.pL[2] < 500
and abs(s.a) < .15 and s.bL[2] < 500)) and s.poG and
s.pL[2] > 60 and (abs(0.5 - abs(s.a)) > 0.25 or s.d > 2500)) or (
s.poG and s.pL[2] > 1900 and s.d2pv < 120):
s.jump = 1
# flip
if (s.flip and s.d > 400 and ang_dif(s.a, s.pva, 1) < .06 and s.pB < 80 and
s.pvd < 2200 and s.jcount > 0 and (s.gtime > 0.05 or not s.poG) and
not s.jumper and abs(s.i) < .2 and ((s.pyv > 1640 and s.ty - s.yv / 4 > 3500)
or (abs(s.a) > 0.75 and abs(s.ty - s.yv / 6) > 850 and s.pyv < -140)
or (s.pyv > 1120 and s.ty - s.yv / 4 > 3000 and s.pB < 16)
or (2000 > s.pyv > 970 and s.ty - s.pyv / 4 > 1650 and s.pB < 6))):
s.dodge = 1
s.djL = 's.tL'
# jump for wavedash
if (s.d > 550 and 950 < (s.ty - s.yv / 2) and ang_dif(s.a, s.pva, 1) < .02
and abs(s.i) < 0.1 and s.pL[2] < 50 and s.poG and s.pB < 40 and
1050 < s.pvd < 2200 and s.gtime > .1 and s.wavedash):
s.jump = 1
# forward wavedash
if (s.jcount > 0 and s.pL[2] + s.pV[2] / 20 < 32 and abs(s.r) < 0.1 and
abs(s.a) < 0.04 and s.y > 400 and 0 < abs(s.pR[0] / U) < 0.12 and
not s.poG and s.pV[2] < -210 and s.wavedash):
s.jump = 1
s.pitch = -1
s.yaw = s.roll = 0
if s.shoot:
dodge_hit(s)
# handling long jumps
if s.jumper and s.jcount > 0:
s.jump = 1
if not s.poG and (s.ljump != s.lljump or not s.ljump):
s.pitch = s.yaw = s.roll = 0
if 0.19 < s.airtime and s.z + s.zv / 12 > 120:
s.jump = not s.ljump
# handling pre-dodge
if s.dodge and s.jcount > 0:
s.jump = s.poG or s.z > 0
if 0.08 < s.airtime and s.pL[2] > 45:
exec("s.dja = dodge_ang(s, " + s.djL + ")")
s.jump = not s.ljump
s.pitch = abs(s.dja) * 2 - 1
s.yaw = (abs(Range180(s.dja + .5, 1) * 2) - 1) * .9
s.roll = 0
s.djT = s.time
# handling post-dodge
if 0.05 < s.djtime < 0.25:
s.pitch = s.roll = s.yaw = 0
if 0.25 < s.djtime < 0.65:
if abs(s.a) < 0.5:
if abs(s.a) < 0.8:
s.pitch = -sign(s.iv)
else:
s.pitch = s.yaw = s.roll = 0
if not s.index:
0
def dodge_hit(s):
d2pv = d2(s.tL - s.pL - s.pV * (s.dT + .1))
# dodge hit
if (d2pv < 99 and abs(s.tL[2] - s.pL[2]) < 110 and s.bd < 1299):
# dodge to shoot
if (s.offense and (abs(s.glinex) < 650 or Range180(s.gta - s.gpa, 1) < .01)
# dodge to clear
or ((not s.offense or abs(s.a) > .8) and abs(s.oglinex) > 1400)
# dodge for
or s.kickoff):
s.dodge = 1
s.djL = 's.bL + s.bV/60'
def dodge_ang(s, tL):
L = tL - s.pL
yaw = Range180(s.pR[1] - U / 2, U) * pi / U
x, y = rotate2D(L[0], L[1], -yaw)
a = math.atan2(y, x)
return Range180(a / pi - .5, 1)
| controls |
registration.py | # ---------------------------------------------------------------------------
# Copyright 2017-2018 OMRON Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------------
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os.path
import sys
import time
import p2def
from serial_connector import SerialConnector
from hvc_p2_api import HVCP2Api
from hvc_tracking_result import HVCTrackingResult
from grayscale_image import GrayscaleImage
###############################################################################
# User Config. Please edit here if you need.
###############################################################################
# Output image file name.
img_fname = 'registerd_img.jpg'
# Read timeout value in seconds for serial communication.
# If you use UART slow baudrate, please edit here.
timeout = 30
# Album file name.
album_fname = 'Album.dat'
# HVC Camera Angle setting
hvc_camera_angle = p2def.HVC_CAM_ANGLE_0
# HVC_CAM_ANGLE_90
# HVC_CAM_ANGLE_180
# HVC_CAM_ANGLE_270
# Threshold value settings
body_thresh = 500 # Threshold for Human body detection [1 to 1000]
hand_thresh = 500 # Threshold for Hand detection [1 to 1000]
face_thresh = 500 # Threshold for Face detection [1 to 1000]
recognition_thresh = 500 # Threshold for Face recognition [0 to 1000]
# Detection Size setings
min_body_size = 30 # Mininum Human body detection size [20 to 8192]
max_body_size = 8192 # Maximum Human body detection size [20 to 8192]
min_hand_size = 40 # Mininum Hand detection size [20 to 8192]
max_hand_size = 8192 # Maximum Hand detection size [20 to 8192]
min_face_size = 64 # Mininum Face detection size [20 to 8192]
max_face_size = 8192 # Maximum Face detection size [20 to 8192]
# Execute functions
exec_func = p2def.EX_FACE\
| p2def.EX_DIRECTION\
| p2def.EX_RECOGNITION
# Detection face angle settings
face_angle_yaw = p2def.HVC_FACE_ANGLE_YAW_30
face_angle_roll = p2def.HVC_FACE_ANGLE_ROLL_15
# HVC_FACE_ANGLE_ROLL_45
###############################################################################
def _parse_arg(argv):
if len(argv) == 3:
# Gets port infomation
portinfo = argv[1]
# Gets baudrate
baudrate = int(argv[2])
if baudrate not in p2def.AVAILABLE_BAUD:
print("Error: Invalid baudrate.")
sys.exit()
else:
print("Error: Invalid argument.")
sys.exit()
return (portinfo, baudrate)
def _check_connection(hvc_p2_api):
(res_code, hvc_type, major, minor, release, rev) = hvc_p2_api.get_version()
if res_code == 0 and hvc_type.startswith('B5T-007001'):
pass
else:
raise IOError("Error: connection failure.")
def _set_hvc_p2_parameters(hvc_p2_api):
# Sets camera angle
res_code = hvc_p2_api.set_camera_angle(hvc_camera_angle)
if res_code is not p2def.RESPONSE_CODE_NORMAL:
raise ValueError("Error: Invalid camera angle.")
# Sets threshold
res_code = hvc_p2_api.set_threshold(body_thresh, hand_thresh,\
face_thresh, recognition_thresh)
if res_code is not p2def.RESPONSE_CODE_NORMAL:
raise ValueError("Error: Invalid threshold.")
# Sets detection size
res_code = hvc_p2_api.set_detection_size(min_body_size, max_body_size,\
min_hand_size, max_hand_size,\
min_face_size, max_face_size)
if res_code is not p2def.RESPONSE_CODE_NORMAL:
raise ValueError("Error: Invalid detection size.")
# Sets face angle
res_code = hvc_p2_api.set_face_angle(face_angle_yaw, face_angle_roll)
if res_code is not p2def.RESPONSE_CODE_NORMAL:
raise ValueError("Error: Invalid face angle.")
def main():
# Parses arguments
(p |
if __name__ == '__main__':
main()
| ortinfo, baudrate) = _parse_arg(sys.argv)
connector = SerialConnector()
hvc_p2_api = HVCP2Api(connector, exec_func, p2def.USE_STB_OFF)
# The 1st connection
hvc_p2_api.connect(portinfo, p2def.DEFAULT_BAUD, 10) # 1st connection should be 9600 baud.
_check_connection(hvc_p2_api)
hvc_p2_api.set_uart_baudrate(baudrate) # Changing to the specified baud rate
hvc_p2_api.disconnect()
# The 2nd connection in specified baudrate
hvc_p2_api.connect(portinfo, baudrate, timeout)
_check_connection(hvc_p2_api)
try:
# Sets HVC-P2 parameters
_set_hvc_p2_parameters(hvc_p2_api)
img = GrayscaleImage()
# Main loop
while True:
str = "\n"\
+ "Please select the command.\n"\
+ " r : registration.\n"\
+ " g : get user data.\n"\
+ " s : save album.\n"\
+ " l : load album.\n"\
+ " d : delete all album data.\n"\
+ " x : exit.\n"\
+ " >>"
operation_str = raw_input(str)
if operation_str == 'x':
break
if operation_str == 'r':
while True:
str_uid = raw_input('user id [0-99] ')
if str_uid >= '0' and str_uid <= '99':
user_id = int(str_uid)
break
while True:
str_did = raw_input('data id [0-9] ')
if str_did >= '0' and str_did <= '9':
data_id = int(str_did)
break
raw_input('Press Enter key to register.')
res_code = hvc_p2_api.register_data(user_id, data_id, img)
if res_code < p2def.RESPONSE_CODE_NORMAL: # error
print("Error: Invalid register album.")
break
if res_code == p2def.RESPONSE_CODE_NO_FACE:
print("\nNumber of faces that can be registered is 0.")
if res_code == p2def.RESPONSE_CODE_PLURAL_FACE:
print("\nNumber of detected faces is 2 or more.")
if res_code == p2def.RESPONSE_CODE_NORMAL: # success
img.save(img_fname)
print(f"Success to register. user_id={str_uid} data_id={str_did})
if operation_str == 'g':
while True:
str_uid = raw_input('user id [0-99] ')
if str_uid >= '0' and str_uid <= '99':
user_id = int(str_uid)
break
print(f"uid[{user_id}]: "
res_code, data_list = hvc_p2_api.get_user_data(user_id)
if res_code < p2def.RESPONSE_CODE_NORMAL: # error
print("Error: Invalid register album.")
break
print(data_list)
if operation_str == 's':
# Saves album to flash ROM on B5T-007001.
res_code = hvc_p2_api.save_album_to_flash()
if res_code is not p2def.RESPONSE_CODE_NORMAL:
print("Error: Invalid save album to flash.")
break
# Saves album to the file.
res_code, save_album = hvc_p2_api.save_album()
if res_code is not p2def.RESPONSE_CODE_NORMAL:
print("Error: Invalid save album.")
break
with open(album_fname, "wb") as file:
file.write(save_album)
print("Success to save album.")
if operation_str == 'l':
# Loads album from file
if os.path.isfile(album_fname):
with open(album_fname, "rb") as file:
load_album = file.read()
res_code = hvc_p2_api.load_album(load_album)
if res_code is not p2def.RESPONSE_CODE_NORMAL:
print("Error: Invalid load album.")
break
print("Success to load album.")
if operation_str == 'd':
# Deletes all album data
res_code = hvc_p2_api.delete_all_data()
if res_code is not p2def.RESPONSE_CODE_NORMAL:
print("Error: Invalid save album to flash.")
break
# Saves album to flash ROM on B5T-007001.
res_code = hvc_p2_api.save_album_to_flash()
if res_code is not p2def.RESPONSE_CODE_NORMAL:
print("Error: Invalid save album to flash.")
break
print("Success to delete album.")
except KeyboardInterrupt:
time.sleep(1)
finally:
hvc_p2_api.set_uart_baudrate(p2def.DEFAULT_BAUD)
hvc_p2_api.disconnect()
|
JobRouter.ts | /*
University of Illinois/NCSA Open Source License
Copyright (c) 2018 Terrain Data, Inc. and the authors. All rights reserved.
Developed by: Terrain Data, Inc. and
the individuals who committed the code in this file.
https://github.com/terraindata/terrain
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation files
(the "Software"), to deal with the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimers.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimers in the
documentation and/or other materials provided with the distribution.
* Neither the names of Terrain Data, Inc., Terrain, nor the names of its
contributors may be used to endorse or promote products derived from
this Software without specific prior written permission.
This license supersedes any copyright notice, license, or related statement
following this comment block. All files in this repository are provided
under the same license, regardless of whether a corresponding comment block
appears in them. This license also applies retroactively to any previous
state of the repository, including different branches and commits, which
were made public on or after December 8th, 2018.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
THE SOFTWARE.
*/
// Copyright 2018 Terrain Data, Inc.
import * as asyncBusboy from 'async-busboy';
import * as passport from 'koa-passport';
import * as KoaRouter from 'koa-router';
import * as stream from 'stream';
import { JobConfig } from 'shared/types/jobs/JobConfig';
import * as App from '../App';
import * as AppUtil from '../AppUtil';
import ProgressStream from '../io/streams/ProgressStream';
import { Permissions } from '../permissions/Permissions';
import UserConfig from '../users/UserConfig';
import { users } from '../users/UserRouter';
const Router = new KoaRouter();
const perm: Permissions = new Permissions();
export const initialize = () => { };
Router.post('/cancel/:id', passport.authenticate('access-token-local'), async (ctx, next) =>
{
await perm.JobQueuePermissions.verifyCancelRoute(ctx.state.user as UserConfig, ctx.req);
ctx.body = await App.JobQ.cancel(ctx.params.id);
});
// Delete job by id
Router.post('/delete/:id', passport.authenticate('access-token-local'), async (ctx, next) =>
{
await perm.JobQueuePermissions.verifyDeleteRoute(ctx.state.user as UserConfig, ctx.req);
ctx.body = await App.JobQ.delete(ctx.params.id);
});
// Retrieve job log by id, or all if none provided
Router.get('/log/:id?', passport.authenticate('access-token-local'), async (ctx, next) =>
{
await perm.JobQueuePermissions.verifyGetLogRoute(ctx.state.user as UserConfig, ctx.req);
ctx.body = await App.JobQ.getLog(ctx.params.id);
});
// pause job by id
Router.post('/pause/:id', passport.authenticate('access-token-local'), async (ctx, next) =>
{
await perm.JobQueuePermissions.verifyPauseRoute(ctx.state.user as UserConfig, ctx.req);
ctx.body = await App.JobQ.pause(ctx.params.id);
});
Router.post('/run/:id', passport.authenticate('access-token-local'), async (ctx, next) =>
{
await perm.JobQueuePermissions.verifyRunRoute(ctx.state.user as UserConfig, ctx.req);
ctx.body = await App.JobQ.run(ctx.params.id);
});
Router.post('/runnow/:id', async (ctx, next) =>
{
let fields;
let files;
let isDownloadRequest = false;
let downloadName;
// TODO: better to check the download request by looking the template.
if (ctx.request.type === 'application/x-www-form-urlencoded')
{
isDownloadRequest = true;
fields = ctx.request.body;
if (fields.hasOwnProperty('downloadName') === false)
{
throw (new Error('API error: download name field is empty.'));
}
downloadName = fields['downloadName'];
} else
{
({ fields, files } = await asyncBusboy(ctx.req));
}
AppUtil.verifyParameters(fields, ['id', 'accessToken']);
const user = await users.loginWithAccessToken(Number(fields['id']), fields['accessToken']);
if (user === null) | }
const responseStream = await App.JobQ.runNow(ctx.params.id, fields, files);
// await perm.JobQueuePermissions.verifyRunNowRoute(ctx.state.user as UserConfig, ctx.req);
responseStream.on('error', ctx.onerror);
if (responseStream instanceof ProgressStream)
{
responseStream.resume();
ctx.body = new stream.Readable();
ctx.body.push(null);
}
else
{
if (isDownloadRequest === true)
{
ctx.type = 'text/plain';
ctx.attachment(downloadName);
}
ctx.body = responseStream.pipe(new stream.PassThrough());
}
});
// unpause paused job by id
Router.post('/unpause/:id', passport.authenticate('access-token-local'), async (ctx, next) =>
{
await perm.JobQueuePermissions.verifyUnpauseRoute(ctx.state.user as UserConfig, ctx.req);
ctx.body = await App.JobQ.unpause(ctx.params.id);
});
// Get job by search parameter, or all if none provided
Router.get('/:id?', passport.authenticate('access-token-local'), async (ctx, next) =>
{
await perm.JobQueuePermissions.verifyGetRoute(ctx.state.user as UserConfig, ctx.req);
ctx.body = await App.JobQ.get(ctx.params.id);
});
// Create job
Router.post('/', passport.authenticate('access-token-local'), async (ctx, next) =>
{
const job: JobConfig = ctx.request.body['body'];
if (job.id !== undefined)
{
delete job.id;
}
await perm.JobQueuePermissions.verifyCreateRoute(ctx.state.user as UserConfig, ctx.req);
ctx.body = await App.JobQ.create(job, false, ctx.state.user.id);
});
export default Router; | {
ctx.body = 'Unauthorized';
ctx.status = 400;
return; |
FileSize.d.ts | import { Component } from 'preact';
interface Props {
blob: Blob;
compareTo?: Blob;
}
interface State {
}
export default class FileSize extends Component<Props, State> {
render({ blob, compareTo }: Props): JSX.Element;
}
| export {}; |
|
piRecieve.py | import serial
import traceback
import time
import decode
class UART:
def __init__(self):
# シリアル通信設定
# ボーレートはラズパイのデフォルト値:115200に設定
try:
self.uartport = serial.Serial(
port="/dev/ttyS0",
baudrate=115200,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=None,
)
except serial.SerialException:
print(traceback.format_exc())
# 受信バッファ、送信バッファクリア
self.uartport.reset_input_buffer()
self.uartport.reset_output_buffer()
time.sleep(1)
# データ送信関数
def send_serial(self, cmd):
print("send data : {0}".format(cmd))
try:
# 改行コードを必ずつけるために、1回削除して、送信時に再度付与する
cmd = cmd.rstrip()
# 改行コードを付与 文字列をバイナリに変換して送信
self.uartport.write((cmd + "\n").encode("utf-8"))
except serial.SerialException:
print(traceback.format_exc())
# データ受信関数
def receive_serial(self):
try:
rcvdata = self.uartport.readline()
except serial.SerialException:
print(traceback.format_exc())
# 受信したバイナリデータを文字列に変換 改行コードを削除
retur | de("utf-8").rstrip()
if __name__ == '__main__':
uart = UART()
while True:
# ターミナルから入力された文字を取得
#input_data = input("input data:")
# ターミナルから入力された文字をWindowsに送信
#uart.send_serial(input_data)
# データ受信
data = uart.receive_serial()
# 受信データを表示
print("recv data : {0}".format(data))
data1 = decode.sencer_data(data)
data1.print_all() | n rcvdata.deco |
issue_46_example.rs | extern crate ctrlc;
use std::process;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::thread;
use std::time::Duration;
fn main() | {
let running = Arc::new(AtomicUsize::new(0));
let r = running.clone();
ctrlc::set_handler(move || {
let prev = r.fetch_add(1, Ordering::SeqCst);
if prev == 0 {
println!("Exiting...");
} else {
process::exit(0);
}
})
.expect("Error setting Ctrl-C handler");
println!("Running...");
for _ in 1..6 {
thread::sleep(Duration::from_secs(5));
if running.load(Ordering::SeqCst) > 0 {
break;
}
}
} |
|
callback_executors.rs | //! Executor abstraction for executing callbacks to user code (request filters, provider state change callbacks)
use std::collections::HashMap;
use std::sync::Arc;
use ansi_term::Colour::Yellow;
use async_trait::async_trait;
use maplit::*;
use serde_json::{json, Value};
use pact_models::bodies::OptionalBody;
use pact_models::content_types::JSON;
use pact_models::provider_states::ProviderState;
use pact_models::v4::http_parts::HttpRequest;
use crate::provider_client::make_state_change_request;
use std::fmt::{Display, Formatter};
/// Trait for executors that call request filters
pub trait RequestFilterExecutor {
/// Mutates requests based on some criteria.
fn call(self: Arc<Self>, request: &HttpRequest) -> HttpRequest;
}
/// A "null" request filter executor, which does nothing, but permits
/// bypassing of typechecking issues where no filter should be applied.
#[derive(Debug, Clone)]
pub struct NullRequestFilterExecutor {
// This field is added (and is private) to guarantee that this struct
// is never instantiated accidentally, and is instead only able to be
// used for type-level programming.
_private_field: (),
}
impl RequestFilterExecutor for NullRequestFilterExecutor {
fn call(self: Arc<Self>, _request: &HttpRequest) -> HttpRequest {
unimplemented!("NullRequestFilterExecutor should never be called")
}
}
/// Struct for returning errors from executing a provider state
#[derive(Debug, Clone)]
pub struct ProviderStateError {
/// Description of the error
pub description: String,
/// Interaction ID of the interaction that the error occurred
pub interaction_id: Option<String>
}
impl Display for ProviderStateError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "Provider state failed: {}{}", self.interaction_id.as_ref()
.map(|id| format!("(interaction_id: {}) ", id)).unwrap_or_default(), self.description)
}
}
impl std::error::Error for ProviderStateError {}
/// Trait for executors that call provider state callbacks
#[async_trait]
pub trait ProviderStateExecutor {
/// Invoke the callback for the given provider state, returning an optional Map of values
async fn call(
self: Arc<Self>,
interaction_id: Option<String>,
provider_state: &ProviderState,
setup: bool,
client: Option<&reqwest::Client>
) -> anyhow::Result<HashMap<String, Value>>;
}
/// Default provider state callback executor, which executes an HTTP request
#[derive(Debug, Clone)]
pub struct | {
/// URL to post state change requests to
pub state_change_url: Option<String>,
/// If teardown state change requests should be made (default is false)
pub state_change_teardown: bool,
/// If state change request data should be sent in the body (true) or as query parameters (false)
pub state_change_body: bool
}
impl Default for HttpRequestProviderStateExecutor {
/// Create a default executor
fn default() -> HttpRequestProviderStateExecutor {
HttpRequestProviderStateExecutor {
state_change_url: None,
state_change_teardown: false,
state_change_body: true
}
}
}
#[async_trait]
impl ProviderStateExecutor for HttpRequestProviderStateExecutor {
async fn call(
self: Arc<Self>,
interaction_id: Option<String>,
provider_state: &ProviderState,
setup: bool,
client: Option<&reqwest::Client>
) -> anyhow::Result<HashMap<String, Value>> {
match &self.state_change_url {
Some(state_change_url) => {
let mut state_change_request = HttpRequest { method: "POST".to_string(), .. HttpRequest::default() };
if self.state_change_body {
let json_body = json!({
"state".to_string() : provider_state.name.clone(),
"params".to_string() : provider_state.params.clone(),
"action".to_string() : if setup {
"setup".to_string()
} else {
"teardown".to_string()
}
});
state_change_request.body = OptionalBody::Present(json_body.to_string().into(), Some(JSON.clone()), None);
state_change_request.headers = Some(hashmap!{ "Content-Type".to_string() => vec!["application/json".to_string()] });
} else {
let mut query = hashmap!{ "state".to_string() => vec![provider_state.name.clone()] };
if setup {
query.insert("action".to_string(), vec!["setup".to_string()]);
} else {
query.insert("action".to_string(), vec!["teardown".to_string()]);
}
for (k, v) in provider_state.params.clone() {
query.insert(k, vec![match v {
Value::String(ref s) => s.clone(),
_ => v.to_string()
}]);
}
state_change_request.query = Some(query);
}
make_state_change_request(client.unwrap_or(&reqwest::Client::default()), &state_change_url, &state_change_request).await
.map_err(|err| ProviderStateError { description: err.to_string(), interaction_id }.into())
},
None => {
if setup {
println!(" {}", Yellow.paint("WARNING: State Change ignored as there is no state change URL provided"));
}
Ok(hashmap!{})
}
}
}
}
| HttpRequestProviderStateExecutor |
string_evaluator.py | import logging
import os
import re | from scanapi.errors import BadConfigurationError, InvalidPythonCodeError
from scanapi.evaluators.code_evaluator import CodeEvaluator
logger = logging.getLogger(__name__)
variable_pattern = re.compile(
r"(?P<something_before>\w*)(?P<start>\${)(?P<variable>\w*)(?P<end>})(?P<something_after>\w*)"
) # ${<variable>}
class StringEvaluator:
def __init__(self, spec_evaluator):
self.spec_evaluator = spec_evaluator
self.api_tree = spec_evaluator.api_tree
self.code_evaluator = CodeEvaluator(self)
def evaluate(self, sequence):
try:
sequence = self.evaluate_env_var(sequence)
except BadConfigurationError as e:
logger.error(e)
sys.exit()
sequence = self.evaluate_custom_var(sequence)
if not self.api_tree.responses:
return sequence
try:
return self.code_evaluator.evaluate(sequence)
except InvalidPythonCodeError as e:
logger.error(e)
sys.exit()
def evaluate_env_var(self, sequence):
matches = variable_pattern.finditer(sequence)
if not matches:
return sequence
for match in matches:
variable_name = match.group("variable")
if any(letter.islower() for letter in variable_name):
continue
try:
variable_value = os.environ[variable_name]
except KeyError as e:
raise BadConfigurationError(e)
sequence = self.replace_var_with_value(
sequence, match.group(), variable_value
)
return sequence
def evaluate_custom_var(self, sequence):
matches = variable_pattern.finditer(sequence)
if not matches:
return sequence
for match in matches:
variable_name = match.group("variable")
if variable_name.isupper():
continue
if not self.api_tree.custom_vars.get(variable_name):
continue
variable_value = self.spec_evaluator.evaluate(
self.api_tree.custom_vars[variable_name]
)
sequence = self.replace_var_with_value(
sequence, match.group(), variable_value
)
return sequence
def replace_var_with_value(self, sequence, variable, variable_value):
variable = re.escape(variable)
return re.sub(variable, variable_value, sequence) | import sys
|
nvdata.go | package cpanel
import (
"encoding/json"
"github.com/arzahs/cpanelgo"
)
type NVDataGetApiResult struct {
cpanelgo.BaseUAPIResponse
Data []struct {
FileName string `json:"name"`
FileContents string `json:"value"`
} `json:"data"`
}
func (c CpanelApi) GetNVData(name string) (NVDataGetApiResult, error) {
var out NVDataGetApiResult
err := c.Gateway.UAPI("NVData", "get", cpanelgo.Args{
"names": name,
}, &out)
if err == nil {
err = out.Error()
}
return out, err
}
type NVDataSetApiResult struct {
cpanelgo.BaseUAPIResponse
Data []struct {
Set string `json:"set"`
} `json:"data"`
}
func (c CpanelApi) SetNVData(name string, data interface{}) (NVDataSetApiResult, error) {
buf, err := json.Marshal(data)
if err != nil {
return NVDataSetApiResult{}, err
}
return c.SetNVDataRaw(name, buf)
}
func (c CpanelApi) SetNVDataRaw(name string, buf []byte) (NVDataSetApiResult, error) { | var out NVDataSetApiResult
err := c.Gateway.UAPI("NVData", "set", cpanelgo.Args{
"names": name,
name: string(buf),
}, &out)
if err == nil {
err = out.Error()
}
return out, err
} | |
icon_build.py | #!/usr/bin/env python
# note we'll use glue to assemble a css sprite
# for now we use this to pickup the images we want, convert
# from svg to png in our preferred size
import click
import cairosvg
import os
from pathlib import Path
from jinja2 import Template
from apichanges.icons import ICON_SERVICE_MAP
CSS_BUILD = """
{% for name, path in icons.items() %}
.{{ name }} {background-image: url('/{{ path }}')}
{% endfor %}
"""
@click.command()
@click.option('-s', '--source', required=True, type=click.Path())
@click.option('-d', '--destination', required=True, type=click.Path())
@click.option('--size', type=int, default=128)
def main(source, destination, size):
| destination = Path(destination).expanduser().resolve()
count = 0
icons = {}
used = set()
icon_2_service = {
v: k for k, v in ICON_SERVICE_MAP.items()}
for dirpath, dirnames, filenames in os.walk(str(source)):
dirpath = Path(dirpath)
for f in filenames:
if not f.endswith('_dark-bg.svg'):
continue
origin = (dirpath / f)
n = origin.name
name = n[:n.find('_dark')].replace('.', '_')
service = icon_2_service.get(name)
if service is None:
continue
if name in icons:
continue
used.add(name)
target = destination / ("%s.png" % name.lower())
# if target.exists():
# continue
count += 1
target.parent.mkdir(parents=True, exist_ok=True)
# print('{} -> {}'.format(origin, target))
cairosvg.svg2png(
url=str(origin),
write_to=str(target),
output_width=size,
output_height=size)
if set(icon_2_service).difference(used):
print('missing service icons %s' % (', '.join(
set(icon_2_service).difference(used))))
print('copied %d icons' % count)
with (destination / 'icons.css').open('w') as fh:
icons = {k: "icons/%s.png" % v.lower()
for k, v in ICON_SERVICE_MAP.items()}
fh.write(Template(
CSS_BUILD, lstrip_blocks=True, trim_blocks=True).render(
icons=icons))
if __name__ == '__main__':
main() | source = Path(source).expanduser().resolve() |
sync.gyp | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'includes': [
'sync_android.gypi', | ],
'conditions': [
# Notes:
# 1) In static mode, the public 'sync' target has a target type of 'none',
# and is composed of the static library targets 'sync_api', 'sync_core',
# 'sync_internal_api', 'sync_notifier', and 'sync_proto'.
# 2) In component mode, we build the public 'sync' target into a single DLL,
# which includes the contents of sync_api.gypi, sync_core.gypi,
# sync_internal_api.gypi, sync_notifier.gypi, and sync_proto.gypi.
# 3) All external targets that depend on anything in sync/ must simply
# declare a dependency on 'sync.gyp:sync'
['component=="static_library"', {
'targets': [
# The public sync static library target.
{
'target_name': 'sync',
'type': 'none',
'dependencies': [
'sync_api',
'sync_core',
'sync_internal_api',
'sync_notifier',
'sync_proto',
],
'export_dependent_settings': [
'sync_notifier',
'sync_proto',
],
},
# The sync external API library.
{
'target_name': 'sync_api',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'includes': [
'sync_api.gypi',
],
'dependencies': [
'sync_internal_api',
'sync_proto',
],
},
# The core sync library.
{
'target_name': 'sync_core',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'includes': [
'sync_core.gypi',
],
'dependencies': [
'sync_notifier',
'sync_proto',
],
'export_dependent_settings': [
'sync_notifier',
'sync_proto',
],
},
# The sync internal API library.
{
'target_name': 'sync_internal_api',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'includes': [
'sync_internal_api.gypi',
],
'dependencies': [
'sync_core',
'sync_notifier',
'sync_proto',
],
'export_dependent_settings': [
'sync_core',
'sync_proto',
],
},
# The sync notifications library.
{
'target_name': 'sync_notifier',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'includes': [
'sync_notifier.gypi',
],
},
# The sync protocol buffer library.
{
'target_name': 'sync_proto',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'includes': [
'sync_proto.gypi',
],
},
],
},
{ # component != static_library
'targets': [
# The public sync shared library target.
{
'target_name': 'sync',
'type': 'shared_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'includes': [
'sync_api.gypi',
'sync_core.gypi',
'sync_internal_api.gypi',
'sync_notifier.gypi',
'sync_proto.gypi',
],
},
],
}],
],
} | 'sync_tests.gypi', |
mod.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use fuchsia_zircon::{self as zx, AsHandleRef, HandleBased, Task as zxTask};
use log::warn;
use parking_lot::{Condvar, Mutex, RwLock};
use std::cmp;
use std::collections::{HashMap, HashSet};
use std::convert::TryFrom;
use std::ffi::CString;
use std::fmt;
use std::ops;
use std::sync::{Arc, Weak};
pub mod syscalls;
use crate::auth::Credentials;
use crate::fs::{FdTable, FileSystem};
use crate::logging::*;
use crate::mm::MemoryManager;
use crate::not_implemented;
use crate::signals::types::*;
use crate::types::*;
// # Ownership structure
//
// The Kernel object is the root object of the task hierarchy.
//
// The Kernel owns the PidTable, which has the owning reference to each task in the
// kernel via its |tasks| field.
//
// Each task holds a reference to its ThreadGroup and an a reference to the objects
// for each major subsystem (e.g., file system, memory manager).
//
// # Back pointers
//
// Each ThreadGroup has weak pointers to its threads and to the kernel to which its
// threads belong.
pub struct Kernel {
/// The Zircon job object that holds the processes running in this kernel.
pub job: zx::Job,
/// The processes and threads running in this kernel, organized by pid_t.
pub pids: RwLock<PidTable>,
}
impl Kernel {
pub fn new(name: &CString) -> Result<Arc<Kernel>, zx::Status> {
let job = fuchsia_runtime::job_default().create_child_job()?;
job.set_name(&name)?;
let kernel = Kernel { job, pids: RwLock::new(PidTable::new()) };
Ok(Arc::new(kernel))
}
}
pub struct PidTable {
/// The most-recently allocated pid in this table.
last_pid: pid_t,
/// The tasks in this table, organized by pid_t.
///
/// This reference is the primary reference keeping the tasks alive.
tasks: HashMap<pid_t, Weak<Task>>,
/// The thread groups that are present in this table.
thread_groups: HashMap<pid_t, Weak<ThreadGroup>>,
/// The condvars that suspended tasks are waiting on, organized by pid_t of the suspended task.
suspended_tasks: HashMap<pid_t, Arc<Condvar>>,
}
impl PidTable {
pub fn new() -> PidTable {
PidTable {
last_pid: 0,
tasks: HashMap::new(),
thread_groups: HashMap::new(),
suspended_tasks: HashMap::new(),
}
}
pub fn get_task(&self, pid: pid_t) -> Option<Arc<Task>> {
self.tasks.get(&pid).and_then(|task| task.upgrade())
}
pub fn get_thread_groups(&self) -> Vec<Arc<ThreadGroup>> {
self.thread_groups.iter().flat_map(|(_pid, thread_group)| thread_group.upgrade()).collect()
}
/// Adds a task to the set of tasks currently suspended via `rt_sigsuspend`.
///
/// Attempting to add a task that already exists is an error, and will panic.
///
/// The suspended task will wait on the condition variable, and will be notified when it is
/// the target of an appropriate signal.
pub fn add_suspended_task(&mut self, pid: pid_t) -> Arc<Condvar> {
assert!(!self.is_task_suspended(pid));
let condvar = Arc::new(Condvar::new());
self.suspended_tasks.insert(pid, condvar.clone());
condvar
}
/// Returns true if the Task associated with `pid` is currently suspended in `rt_sigsuspend`.
pub fn is_task_suspended(&self, pid: pid_t) -> bool {
self.suspended_tasks.contains_key(&pid)
}
/// Removes the condition variable that `pid` is waiting on.
///
/// The returned condition variable is meant to be notified before it is dropped in order
/// for the task to resume operation in `rt_sigsuspend`.
pub fn remove_suspended_task(&mut self, pid: pid_t) -> Option<Arc<Condvar>> {
self.suspended_tasks.remove(&pid)
}
fn allocate_pid(&mut self) -> pid_t {
self.last_pid += 1;
return self.last_pid;
}
fn add_task(&mut self, task: &Arc<Task>) {
assert!(!self.tasks.contains_key(&task.id));
self.tasks.insert(task.id, Arc::downgrade(task));
}
fn add_thread_group(&mut self, thread_group: &Arc<ThreadGroup>) {
assert!(!self.thread_groups.contains_key(&thread_group.leader));
self.thread_groups.insert(thread_group.leader, Arc::downgrade(thread_group));
}
fn remove_task(&mut self, pid: pid_t) {
self.tasks.remove(&pid);
}
fn remove_thread_group(&mut self, pid: pid_t) {
self.thread_groups.remove(&pid);
}
}
pub struct ThreadGroup {
/// The kernel to which this thread group belongs.
pub kernel: Arc<Kernel>,
/// A handle to the underlying Zircon process object.
///
/// Currently, we have a 1-to-1 mapping between thread groups and zx::process
/// objects. This approach might break down if/when we implement CLONE_VM
/// without CLONE_THREAD because that creates a situation where two thread
/// groups share an address space. To implement that situation, we might
/// need to break the 1-to-1 mapping between thread groups and zx::process
/// or teach zx::process to share address spaces.
pub process: zx::Process,
/// The lead task of this thread group.
///
/// The lead task is typically the initial thread created in the thread group.
pub leader: pid_t,
/// The tasks in the thread group.
pub tasks: RwLock<HashSet<pid_t>>,
/// The signal actions that are registered for `tasks`. All `tasks` share the same `sigaction`
/// for a given signal.
pub signal_actions: RwLock<SignalActions>,
}
impl PartialEq for ThreadGroup {
fn eq(&self, other: &Self) -> bool {
self.leader == other.leader
}
}
impl ThreadGroup {
fn new(kernel: Arc<Kernel>, process: zx::Process, leader: pid_t) -> ThreadGroup {
let mut tasks = HashSet::new();
tasks.insert(leader);
ThreadGroup {
kernel,
process,
leader,
tasks: RwLock::new(tasks),
signal_actions: RwLock::new(SignalActions::default()),
}
}
fn remove(&self, task: &Task) {
let kill_process = {
let mut tasks = self.tasks.write();
self.kernel.pids.write().remove_task(task.id);
tasks.remove(&task.id);
tasks.is_empty()
};
if kill_process {
if let Err(e) = self.process.kill() {
warn!("Failed to kill process: {}", e);
}
self.kernel.pids.write().remove_thread_group(self.leader);
}
}
}
#[derive(Debug, Eq, PartialEq)]
pub struct TaskOwner {
pub task: Arc<Task>,
}
impl ops::Drop for TaskOwner {
fn drop(&mut self) {
self.task.destroy();
}
} | pub id: pid_t,
/// The thread group to which this task belongs.
pub thread_group: Arc<ThreadGroup>,
/// The parent task, if any.
pub parent: pid_t,
// TODO: The children of this task.
/// A handle to the underlying Zircon thread object.
pub thread: zx::Thread,
/// The file descriptor table for this task.
pub files: Arc<FdTable>,
/// The memory manager for this task.
pub mm: Arc<MemoryManager>,
/// The file system for this task.
pub fs: Arc<FileSystem>,
/// The security credentials for this task.
pub creds: Credentials,
// See https://man7.org/linux/man-pages/man2/set_tid_address.2.html
pub set_child_tid: Mutex<UserRef<pid_t>>,
pub clear_child_tid: Mutex<UserRef<pid_t>>,
// See https://man7.org/linux/man-pages/man2/sigaltstack.2.html
pub signal_stack: Mutex<Option<sigaltstack_t>>,
/// The signal mask of the task.
// See https://man7.org/linux/man-pages/man2/rt_sigprocmask.2.html
pub signal_mask: Mutex<sigset_t>,
/// The signal this task generates on exit.
pub exit_signal: Option<Signal>,
}
impl Task {
pub fn new(
kernel: &Arc<Kernel>,
name: &CString,
files: Arc<FdTable>,
fs: Arc<FileSystem>,
creds: Credentials,
exit_signal: Option<Signal>,
) -> Result<TaskOwner, Errno> {
let (process, root_vmar) = kernel
.job
.create_child_process(name.as_bytes())
.map_err(Errno::from_status_like_fdio)?;
let thread = process
.create_thread("initial-thread".as_bytes())
.map_err(Errno::from_status_like_fdio)?;
// TODO: Stop giving MemoryManager a duplicate of the process handle once a process
// handle is not needed to implement read_memory or write_memory.
let duplicate_process =
process.duplicate_handle(zx::Rights::SAME_RIGHTS).map_err(impossible_error)?;
let mut pids = kernel.pids.write();
let id = pids.allocate_pid();
let task = Arc::new(Task {
id,
thread_group: Arc::new(ThreadGroup::new(kernel.clone(), process, id)),
parent: 0,
thread,
files,
mm: Arc::new(
MemoryManager::new(duplicate_process, root_vmar)
.map_err(Errno::from_status_like_fdio)?,
),
fs,
creds: creds,
set_child_tid: Mutex::new(UserRef::default()),
clear_child_tid: Mutex::new(UserRef::default()),
signal_stack: Mutex::new(None),
signal_mask: Mutex::new(sigset_t::default()),
exit_signal,
});
pids.add_task(&task);
pids.add_thread_group(&task.thread_group);
Ok(TaskOwner { task })
}
pub fn clone_task(
&self,
flags: u64,
user_stack: UserAddress,
_user_parent_tid: UserRef<pid_t>,
user_child_tid: UserRef<pid_t>,
_user_tls: UserAddress,
) -> Result<TaskOwner, Errno> {
// TODO: Implement more flags.
const IMPLEMENTED_FLAGS: u64 =
(CLONE_FS | CLONE_FILES | CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CSIGNAL) as u64;
if flags & !IMPLEMENTED_FLAGS != 0 {
not_implemented!("clone does not implement flags: {}", flags & !IMPLEMENTED_FLAGS);
return Err(ENOSYS);
}
if !user_stack.is_null() {
not_implemented!("clone does not implement non-zero stack: {}", user_stack);
return Err(ENOSYS);
}
let raw_child_exist_signal = flags & (CSIGNAL as u64);
let child_exit_signal = if raw_child_exist_signal == 0 {
None
} else {
Some(Signal::try_from(UncheckedSignal::new(raw_child_exist_signal))?)
};
let fs = if flags & (CLONE_FS as u64) != 0 { self.fs.clone() } else { self.fs.fork() };
let files =
if flags & (CLONE_FILES as u64) != 0 { self.files.clone() } else { self.files.fork() };
let creds = self.creds.clone();
let child = Self::new(
&self.thread_group.kernel,
&CString::new("cloned-child").unwrap(),
files,
fs,
creds,
child_exit_signal,
)?;
self.mm.snapshot_to(&child.task.mm)?;
if flags & (CLONE_CHILD_CLEARTID as u64) != 0 {
*child.task.clear_child_tid.lock() = user_child_tid;
let zero: pid_t = 0;
child.task.mm.write_object(user_child_tid, &zero)?;
// TODO: Issue a FUTEX_WAKE at this address.
}
if flags & (CLONE_CHILD_SETTID as u64) != 0 {
*child.task.set_child_tid.lock() = user_child_tid;
child.task.mm.write_object(user_child_tid, &child.task.id)?;
}
Ok(child)
}
/// Called by the Drop trait on TaskOwner.
fn destroy(&self) {
self.thread_group.remove(self);
}
pub fn get_task(&self, pid: pid_t) -> Option<Arc<Task>> {
self.thread_group.kernel.pids.read().get_task(pid)
}
pub fn get_pid(&self) -> pid_t {
// This is set to 1 because Bionic skips referencing /dev if getpid() == 1, under the
// assumption that anything running after init will have access to /dev.
1.into()
}
pub fn get_tid(&self) -> pid_t {
self.id
}
pub fn get_pgrp(&self) -> pid_t {
// TODO: Implement process groups.
1
}
/// Returns whether or not the task has the given `capability`.
///
// TODO(lindkvist): This should do a proper check for the capability in the namespace.
// TODO(lindkvist): `capability` should be a type, just like we do for signals.
pub fn has_capability(&self, _capability: u32) -> bool {
false
}
pub fn can_signal(&self, target: &Task, unchecked_signal: &UncheckedSignal) -> bool {
// If both the tasks share a thread group the signal can be sent. This is not documented
// in kill(2) because kill does not support task-level granularity in signal sending.
if self.thread_group == target.thread_group {
return true;
}
if self.has_capability(CAP_KILL) {
return true;
}
if self.creds.has_same_uid(&target.creds) {
return true;
}
// TODO(lindkvist): This check should also verify that the sessions are the same.
if Signal::try_from(unchecked_signal) == Ok(Signal::SIGCONT) {
return true;
}
false
}
// TODO(lindkvist): Implement proper signal sending.
pub fn send_signal(&self, unchecked_signal: &UncheckedSignal) -> Result<(), Errno> {
// 0 is a sentinel value used to do permission checks.
let sentinel_signal = UncheckedSignal::from(0);
if *unchecked_signal == sentinel_signal {
return Ok(());
}
let signal = Signal::try_from(unchecked_signal)?;
if signal.mask() & *self.signal_mask.lock() != 0 {
if let Some(waiter_condvar) =
self.thread_group.kernel.pids.write().remove_suspended_task(self.id)
{
waiter_condvar.notify_all();
}
}
Ok(())
}
}
impl fmt::Debug for Task {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "task({})", self.id)
}
}
impl cmp::PartialEq for Task {
fn eq(&self, other: &Self) -> bool {
let ptr: *const Task = self;
let other_ptr: *const Task = other;
return ptr == other_ptr;
}
}
impl cmp::Eq for Task {}
#[cfg(test)]
mod test {
use fuchsia_async as fasync;
use crate::testing::*;
#[fasync::run_singlethreaded(test)]
async fn test_tid_allocation() {
let (kernel, task_owner) = create_kernel_and_task();
let task = &task_owner.task;
assert_eq!(task.get_tid(), 1);
let another_task_owner = create_task(&kernel, "another-task");
let another_task = &another_task_owner.task;
assert_eq!(another_task.get_tid(), 2);
let pids = kernel.pids.read();
assert_eq!(pids.get_task(1).unwrap().get_tid(), 1);
assert_eq!(pids.get_task(2).unwrap().get_tid(), 2);
assert!(pids.get_task(3).is_none());
}
} |
pub struct Task { |
cli.rs | //! The logic for the Wasmer CLI tool.
#[cfg(target_os = "linux")]
use crate::commands::Binfmt;
#[cfg(feature = "compiler")]
use crate::commands::Compile;
#[cfg(all(feature = "staticlib", feature = "compiler"))]
use crate::commands::CreateExe;
#[cfg(feature = "wast")]
use crate::commands::Wast;
use crate::commands::{Cache, Config, Inspect, Run, SelfUpdate, Validate};
use crate::error::PrettyError;
use anyhow::Result;
use structopt::{clap::ErrorKind, StructOpt};
#[derive(StructOpt)]
#[cfg_attr(
not(feature = "headless"),
structopt(name = "wasmer", about = "WebAssembly standalone runtime.", author)
)]
#[cfg_attr(
feature = "headless",
structopt(
name = "wasmer-headless",
about = "Headless WebAssembly standalone runtime.",
author
)
)]
/// The options for the wasmer Command Line Interface
enum WasmerCLIOptions {
/// Run a WebAssembly file. Formats accepted: wasm, wat
#[structopt(name = "run")]
Run(Run),
/// Wasmer cache
#[structopt(name = "cache")]
Cache(Cache),
/// Validate a WebAssembly binary
#[structopt(name = "validate")]
Validate(Validate),
/// Compile a WebAssembly binary
#[cfg(feature = "compiler")]
#[structopt(name = "compile")]
Compile(Compile),
/// Compile a WebAssembly binary into a native executable
#[cfg(all(feature = "staticlib", feature = "compiler"))]
#[structopt(name = "create-exe")]
CreateExe(CreateExe),
/// Get various configuration information needed
/// to compile programs which use Wasmer
#[structopt(name = "config")]
Config(Config),
/// Update wasmer to the latest version
#[structopt(name = "self-update")]
SelfUpdate(SelfUpdate),
/// Inspect a WebAssembly file
#[structopt(name = "inspect")]
Inspect(Inspect),
/// Run spec testsuite
#[cfg(feature = "wast")]
#[structopt(name = "wast")]
Wast(Wast),
/// Unregister and/or register wasmer as binfmt interpreter
#[cfg(target_os = "linux")]
#[structopt(name = "binfmt")]
Binfmt(Binfmt),
}
impl WasmerCLIOptions {
fn | (&self) -> Result<()> {
match self {
Self::Run(options) => options.execute(),
Self::SelfUpdate(options) => options.execute(),
Self::Cache(cache) => cache.execute(),
Self::Validate(validate) => validate.execute(),
#[cfg(feature = "compiler")]
Self::Compile(compile) => compile.execute(),
#[cfg(all(feature = "staticlib", feature = "compiler"))]
Self::CreateExe(create_exe) => create_exe.execute(),
Self::Config(config) => config.execute(),
Self::Inspect(inspect) => inspect.execute(),
#[cfg(feature = "wast")]
Self::Wast(wast) => wast.execute(),
#[cfg(target_os = "linux")]
Self::Binfmt(binfmt) => binfmt.execute(),
}
}
}
/// The main function for the Wasmer CLI tool.
pub fn wasmer_main() {
// We allow windows to print properly colors
#[cfg(windows)]
colored::control::set_virtual_terminal(true).unwrap();
// We try to run wasmer with the normal arguments.
// Eg. `wasmer <SUBCOMMAND>`
// In case that fails, we fallback trying the Run subcommand directly.
// Eg. `wasmer myfile.wasm --dir=.`
//
// In case we've been run as wasmer-binfmt-interpreter myfile.wasm args,
// we assume that we're registered via binfmt_misc
let args = std::env::args().collect::<Vec<_>>();
let binpath = args.get(0).map(|s| s.as_ref()).unwrap_or("");
let command = args.get(1);
let options = if cfg!(target_os = "linux") && binpath.ends_with("wasmer-binfmt-interpreter") {
WasmerCLIOptions::Run(Run::from_binfmt_args())
} else {
match command.unwrap_or(&"".to_string()).as_ref() {
"cache" | "compile" | "config" | "create-exe" | "help" | "inspect" | "run"
| "self-update" | "validate" | "wast" | "binfmt" => WasmerCLIOptions::from_args(),
_ => {
WasmerCLIOptions::from_iter_safe(args.iter()).unwrap_or_else(|e| {
match e.kind {
// This fixes a issue that:
// 1. Shows the version twice when doing `wasmer -V`
// 2. Shows the run help (instead of normal help) when doing `wasmer --help`
ErrorKind::VersionDisplayed | ErrorKind::HelpDisplayed => e.exit(),
_ => WasmerCLIOptions::Run(Run::from_args()),
}
})
}
}
};
PrettyError::report(options.execute());
}
| execute |
read.go | package widget
import (
"context"
"fmt"
"github.com/hashicorp/terraform/helper/schema"
"github.com/terraform-provider-graylog/terraform-provider-graylog/graylog/client"
"github.com/terraform-provider-graylog/terraform-provider-graylog/graylog/util"
)
func read(d *schema.ResourceData, m interface{}) error {
ctx := context.Background()
cl, err := client.New(m)
if err != nil { | wID := d.Get(keyWidgetID).(string)
data, resp, err := cl.DashboardWidget.Get(ctx, dsID, wID)
if err != nil {
return util.HandleGetResourceError(
d, resp, fmt.Errorf(
"failed to get a dashboard widget(dashboard id: %s widget id: %s): %w", dsID, wID, err))
}
return setDataToResourceData(d, data)
} | return err
}
dsID := d.Get(keyDashboardID).(string) |
pdos.py | """
Reader module for CASTEP pdos_bin
Written based on the example `pdos_bin.f90` file in open-source OptaDos code
"""
from enum import Enum, unique
import numpy as np
from scipy.io import FortranFile
@unique
class SpinEnum(Enum):
"""
Enum type for Spin. Only up and down.
Usage: Spin.up, Spin.down.
"""
up, down = (1, -1)
def __int__(self):
return self.value
def __float__(self):
return float(self.value)
def __str__(self):
return str(self.value)
@unique
class OrbitalType(Enum):
"""
Enum type for orbital type. Indices are basically the azimuthal quantum
number, l.
"""
s = 0
p = 1
d = 2
f = 3
def __str__(self):
return str(self.name)
@unique
class OrbitalEnum(Enum):
"""
Enum type for specific orbitals. The value are the name reported by CASTEP.
"""
s = "S"
px = "Px"
py = "Py"
pz = "Pz"
dxy = "Dxy"
dyz = "Dzy"
dz2 = "Dzz"
dxz = "Dzx"
dx2 = "Dxx-yy"
f_xxx = "Fxxx"
f_yyy = "Fyyy"
f_zzz = "Fzzz"
f_xyz = "Fxyz"
f_z_xx_yy = "Fz(xx-yy)"
f_y_zz_xx = "Fy(zz-xx)"
f_x_yy_zz = "Fx(yy-zz)"
def __int__(self):
return self.value
def __str__(self):
return str(self.name)
@property
def orbital_type(self):
|
def read_pdos_bin(filename, endian='big'):
"""
Read the pdos_bin file generated by CASTEP Spectral task.
Args:
filename (str): name of the file to be read
Returns:
A dictionary of the data that have been read.
the weights of each orbital in stored in the 'pdos_weights' array
with dimension (n_orbital, n_max_eign, n_kpoints, n_spin)
"""
esymbol = '>' if endian.upper() == 'BIG' else '>'
dint = np.dtype(esymbol + 'i4')
ddouble = np.dtype(esymbol + 'f8')
dch80 = np.dtype(esymbol + 'a80')
diarray = lambda x: '{}({},)i4'.format(esymbol, x)
ddarray = lambda x: '{}({},)f8'.format(esymbol, x)
with FortranFile(filename, header_dtype=np.dtype('>u4')) as fhandle:
fversion = fhandle.read_record(ddouble)[0]
fheader = fhandle.read_record(dch80)[0].decode()
num_kpoints = fhandle.read_record(dint)[0]
num_spins = fhandle.read_record(dint)[0]
num_popn_orb = fhandle.read_record(dint)[0]
max_eignenv = fhandle.read_record(dint)[0]
# Now we start to read more data
species = fhandle.read_record(diarray(num_popn_orb))
ion = fhandle.read_record(diarray(num_popn_orb))
am_channel = fhandle.read_record(diarray(num_popn_orb))
# Now we initialize the storage space for the weights
pdos_weights = np.zeros(
(num_popn_orb, max_eignenv, num_kpoints, num_spins),
dtype=float)
kpoint_positions = np.zeros((num_kpoints, 3), dtype=float)
num_eigenvalues = np.zeros(num_spins, dtype=int)
# Now we start to read the actual data
for nk in range(num_kpoints):
_, kpoint_positions[nk, :] = fhandle.read_record('>i4', '>(3,)f8')
for ns in range(num_spins):
_ = fhandle.read_record(dint)
num_eigenvalues[ns] = fhandle.read_record(dint)
for nb in range(num_eigenvalues[ns]):
pdos_weights[:, nb, nk, ns] = fhandle.read_record(
'>({},)f8'.format(num_popn_orb))
output = {
'fversion': fversion,
'fheader': fheader,
'num_kpoints': num_kpoints,
'num_spins': num_spins,
'num_popn_orb': num_popn_orb,
'max_eigenenv': max_eignenv,
'species': species,
'ion': ion,
'am_channel': am_channel,
'pdos_weights': pdos_weights,
'kpoints_positions': kpoint_positions,
'num_eigenvalues': num_eigenvalues,
'pdos_weights': pdos_weights,
}
return output
def reorder_pdos_data(input_items, pymatgen_labels=True, use_string_as_keys=False):
"""
Arrange the PDOS weights so it is more meaningful
The result can be used to compute PDOS for creating CompleteDos object
that can be used for Pymatgen
Args:
input_items (dict): A dictionary of the pdos information, use the
output of `read_pdos` function.
pymatgen_labels (bool): Use pymatgen Enum as the keys of the result dictionary.
Returns:
A dictionary of {Site_index: {Orbital: {Spin: weight}}}
"""
if pymatgen_labels is True:
try:
from pymatgen.electronic_structure.core import Orbital as POrbital
from pymatgen.electronic_structure.core import Spin as PSpin
except ImportError:
pymatgen_labels = False
if pymatgen_labels:
# Note that s-p labels are inferreed from dot castep output
# f labels - I know the first three is among the first three.
# There is no way to tell if they are correct, f_1 is not very informative from VASP....
orbital_mapping = [[POrbital.s], [POrbital.px, POrbital.py, POrbital.pz],
[
POrbital.dz2, POrbital.dyz, POrbital.dxz, POrbital.dx2,
POrbital.dxy
],
[
POrbital.f_1, POrbital.f_2, POrbital.f_3, POrbital.f0,
POrbital.f1, POrbital.f2, POrbital.f3
]]
Spin = PSpin
else:
# These are the orders inferred from CASTEP output
orbital_mapping = [[OrbitalEnum.s], [OrbitalEnum.px, OrbitalEnum.py, OrbitalEnum.pz],
[
OrbitalEnum.dz2, OrbitalEnum.dyz, OrbitalEnum.dxz, OrbitalEnum.dx2,
OrbitalEnum.dxy
],
[
OrbitalEnum.f_xxx, OrbitalEnum.f_yyy, OrbitalEnum.f_zzz, OrbitalEnum.f_xyz,
OrbitalEnum.f_z_xx_yy, OrbitalEnum.f_y_zz_xx, OrbitalEnum.f_x_yy_zz
]]
Spin = SpinEnum
# We take average of each kpoints from here
# One might task why not take account the kpoints weight?
# because it should be taken account of in the TDOS
weights = input_items['pdos_weights']
# Specie index for all orbitals
species = input_items['species']
# Index of each ion for all orbitals
ion = input_items['ion']
num_spins = input_items['num_spins']
# Angular momentum channel all orbitals
am_channel = input_items['am_channel']
unique_speices = np.unique(species)
unique_speices.sort()
site_index = 0
output_data = {}
# Initialise storage space
for specie in unique_speices:
specie_mask = specie == species
# Total number of ions for this specie
total_ions = ion[specie_mask].max()
# Note that indice are from one, not zero
for nion in range(1, total_ions + 1):
# Iterate through each ion
ion_mask = (ion == nion) & specie_mask
max_am = am_channel[ion_mask].max()
site_dict = {} # {Orbital: {Spin: weight}...}
for am in range(max_am + 1):
# Collect the angular momentum channels
ion_am_mask = (am_channel == am) & ion_mask
# Indices of each matched channels
ion_am_idx = np.where(ion_am_mask)[0]
for iam, iloc in enumerate(ion_am_idx):
# iloc - index of the oribtal
# You can have 4 orbitals for p channel - they have difference n numbers
this_orb = orbital_mapping[am][iam % (2 * am + 1)]
orb_dict = {} # {Spin: weight...}
if num_spins == 2:
for ispin, espin in enumerate((Spin.up, Spin.down)):
# Sumup
wtmp = weights[iloc, :, :, ispin]
orb_dict[espin] = wtmp
else:
orb_dict[Spin.up] = weights[iloc, :, :, 0]
# Now we have the orb_dict populated
# Combined the weights if this orbital has been seen...
if this_orb in site_dict:
site_dict[this_orb] = _merge_weights(
site_dict[this_orb], orb_dict)
else:
site_dict[this_orb] = orb_dict
# Now we populated site_dict add it to output_data
output_data[site_index] = site_dict
site_index += 1
return output_data
def compute_pdos(pdos_bin, eigenvalues, kpoints_weights, bins):
"""
Compute the PDOS from eigenvalue and kpoint weights
Args:
pdos_bin (str): Path to the binary pdos_bin file
eigenvealues (str): Eigenvalue as {Spin: array_)}.
kpoints_weights (np.ndarray): Weights of each kpoints.
bins: The bins for computing the density of states.
"""
# Walk through the ordred_weights dictionary and compute PDOS for each weight
ordered_weights = reorder_pdos_data(read_pdos_bin(pdos_bin))
pdos_data = {}
for site, porbs_dict in ordered_weights.items():
porbs_outdict = {}
for orb, pspin_dict in porbs_dict.items():
pdos_orbit = {
spin: np.histogram(
eigenvalue_set,
bins=bins,
weights=kpoints_weights * pspin_dict[
spin] # weight (nk, ); pspin_dict[spin] (nk, nb)
)[0]
for spin, eigenvalue_set in eigenvalues.items()
}
porbs_outdict[orb] = pdos_orbit
pdos_data[site] = porbs_outdict
return pdos_data
def _merge_weights(spin_d1, spin_d2):
"""Sum the weights stored in two dictionaries with keys being the spins"""
if len(spin_d1) != len(spin_d2):
raise RuntimeError("Critical - mismatch spin-dict length")
out = {}
for spin in spin_d1:
out[spin] = spin_d1[spin] + spin_d2[spin]
return out
| """
Returns OrbitalType of an orbital.
"""
return OrbitalType[self.name[0]] |
independent.py | """
Cart-pole balancing with independent discretization
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from rlpy.Domains import Pacman
from rlpy.Agents import Q_Learning
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
param_space = {'discretization': hp.quniform("discretization", 3, 50, 1),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/", | discretization=9):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 150000
opt["num_policy_checks"] = 30
opt["checks_per_policy"] = 1
domain = Pacman()
opt["domain"] = domain
representation = IncrementalTabular(
domain,
discretization=discretization)
policy = eGreedy(representation, epsilon=0.1)
opt["agent"] = Q_Learning(
policy, representation, discount_factor=domain.discount_factor,
lambda_=0.9, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
#from Tools.run import run_profiled
# run_profiled(make_experiment)
experiment = make_experiment(1)
experiment.run(visualize_steps=True)
experiment.plot()
# experiment.save() | lambda_=0.9,
boyan_N0=22.36,
initial_learn_rate=.068, |
setup.py | import os
import sys
from setuptools import find_packages, setup, Extension
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError:
from pip.req import parse_requirements
try:
from setuptools_rust import RustExtension
except ImportError:
import subprocess
errno = subprocess.call(
[sys.executable, '-m', 'pip', 'install', 'setuptools-rust'])
if errno:
print("Please install setuptools-rust package")
raise SystemExit(errno)
else:
from setuptools_rust import RustExtension
def | (filename):
# parse_requirements() returns generator of pip.req.InstallRequirement instance
install_requires = parse_requirements(
os.path.join(ROOT_DIR, filename),
session=False,
)
# requirements is a list of requirement
requirements = list(map(lambda x: str(x).split()[0], install_requires))
return requirements
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
version = __import__('rsglob').VERSION
setup_requires = ['setuptools-rust>=0.6.0']
install_requires = get_requirements('requirements.txt')
test_requires = get_requirements('requirements-test.txt')
rust_extensions = [RustExtension('rsglob._rsglob', 'Cargo.toml')]
setup(
name='rsglob',
version=version,
url='https://github.com/wdv4758h/rsglob',
author='Chiu-Hsiang Hsu',
author_email='[email protected]',
description=('Python glob in Rust'),
long_description=open("README.rst").read(),
download_url="https://github.com/wdv4758h/rsglob/archive/v{}.zip".format(
version
),
license='BSD',
tests_require=test_requires,
install_requires=install_requires,
packages=find_packages(),
rust_extensions=rust_extensions,
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| get_requirements |
day_15.py | import collections
import itertools
import math
import unittest
import aoc_utils.geometry
from aoc_utils import char_map, data
class TestCoordinatesUtils(unittest.TestCase):
def test_solve_tie(self):
self.assertEqual(None, solve_tie([]))
self.assertEqual((12, 34), solve_tie([(12, 34)]))
self.assertEqual((1, 1), solve_tie([(1, 1), (2, 2)]))
self.assertEqual((1, 1), solve_tie([(2, 2), (1, 1)]))
self.assertEqual((2, 1), solve_tie([(1, 2), (2, 1)]))
self.assertEqual((2, 1), solve_tie([(2, 1), (1, 2)]))
def solve_tie(options):
if len(options):
return sorted_by_priority(options)[0]
def sorted_by_priority(options):
return sorted(options, key=reverse_coordinates)
def reverse_coordinates(coordinates):
return tuple(i for i in reversed(coordinates))
class FindAllClosestRules(char_map.ProgressRules):
def __init__(self, targets, allowed_values):
super(FindAllClosestRules, self).__init__(allowed_values)
self._targets = targets
self._found_one = False
self.results = []
def stop_progressing(self):
return self._found_one
def examine(self, coordinates):
if coordinates in self._targets:
self._found_one = True
self.results.append(coordinates)
return False
return True
def solve_tie(self, coordinate_options):
return solve_tie(coordinate_options)
class TestCaves(unittest.TestCase):
def make_default_caves(self):
|
def test_init_fighters(self):
caves = self.make_default_caves()
fighters = caves.fighters
self.assertSetEqual({'E', 'G'}, set(fighters.keys()))
self.assertEqual({(1, 1): 200}, fighters['E'])
self.assertEqual({(4, 1): 200, (2, 3): 200, (5, 3): 200}, fighters['G'])
def test_get_targets(self):
caves = self.make_default_caves()
self.assertListEqual([(4, 1), (2, 3), (5, 3)], list(caves.get_targets("E")))
self.assertListEqual([(1, 1)], list(caves.get_targets("G")))
def test_get_in_range(self):
caves = self.make_default_caves()
self.assertListEqual([(3, 1), (5, 1), (2, 2), (5, 2), (1, 3), (3, 3)],
list(caves.get_in_range("E")))
self.assertListEqual([(2, 1), (1, 2)],
list(caves.get_in_range("G")))
def test_get_coordinates_around(self):
caves = self.make_default_caves()
self.assertListEqual([(2, 1), (1, 2)], list(caves.get_coordinates_around((1, 1))))
self.assertListEqual([(3, 1), (5, 1)], list(caves.get_coordinates_around((4, 1))))
self.assertListEqual([(2, 2), (1, 3), (3, 3)], list(caves.get_coordinates_around((2, 3))))
self.assertListEqual([(5, 2)], list(caves.get_coordinates_around((5, 3))))
def test_find_all_closest_rules(self):
caves = Caves([
"#######",
"#E#.G.#",
"#...#.#",
"#.G.#G#",
"#######",
])
finder = char_map.MapExplorer(caves._caves)
rules = FindAllClosestRules(
targets=[(3, 1), (5, 1), (2, 2), (5, 2), (1, 3), (3, 3)],
allowed_values=[EMPTY_VALUE]
)
finder.explore(start_point=(1, 1), rules=rules)
self.assertListEqual([(2, 2), (1, 3)], list(rules.results))
def test_iterate_units(self):
caves = self.make_default_caves()
self.assertListEqual([(1, 1), (4, 1), (2, 3), (5, 3)], caves.iterate_units())
def test_get_attack_target(self):
caves_2 = Caves([
"#######",
"#..EG.#",
"#...#.#",
"#.G.#G#",
"#######",
])
self.assertEqual((4, 1), caves_2.get_attack_target((3, 1), 'E'))
self.assertEqual((3, 1), caves_2.get_attack_target((4, 1), 'G'))
self.assertEqual(None, caves_2.get_attack_target((2, 3), 'G'))
self.assertEqual(None, caves_2.get_attack_target((5, 3), 'G'))
def test_find_next_step(self):
caves = self.make_default_caves()
self.assertEqual((2, 1), caves.find_next_step((1, 1), 'E'))
self.assertEqual((3, 1), caves.find_next_step((4, 1), 'G'))
self.assertEqual((2, 2), caves.find_next_step((2, 3), 'G'))
self.assertEqual(None, caves.find_next_step((5, 3), 'G'))
def test_play_unit(self):
caves = self.make_default_caves()
fighters = caves.fighters
caves.play_unit((1, 1), 'E')
self.assertEqual({(2, 1): 200}, fighters['E'])
self.assertEqual({(4, 1): 200, (2, 3): 200, (5, 3): 200}, fighters['G'])
caves.play_unit((2, 1), 'E')
self.assertEqual({(3, 1): 200}, fighters['E'])
self.assertEqual({(4, 1): 197, (2, 3): 200, (5, 3): 200}, fighters['G'])
for _ in range(65):
caves.play_unit((3, 1), 'E')
self.assertEqual({(3, 1): 200}, fighters['E'])
self.assertEqual({(4, 1): 2, (2, 3): 200, (5, 3): 200}, fighters['G'])
caves.play_unit((3, 1), 'E')
self.assertEqual({(3, 1): 200}, fighters['E'])
self.assertEqual({(2, 3): 200, (5, 3): 200}, fighters['G'])
def test_play_round(self):
caves = self.make_default_caves()
fighters = caves.fighters
self.assertFalse(caves.play_round())
self.assertEqual({(2, 1): 194}, fighters['E'])
self.assertEqual({(3, 1): 200, (2, 2): 200, (5, 3): 200}, fighters['G'])
self.assertTrue(caves.play_round())
self.assertEqual({(2, 1): 188}, fighters['E'])
self.assertEqual({(3, 1): 197, (2, 2): 200, (5, 3): 200}, fighters['G'])
for _ in range(31):
self.assertTrue(caves.play_round())
self.assertEqual({(2, 1): 2}, fighters['E'])
self.assertEqual({(3, 1): 104, (2, 2): 200, (5, 3): 200}, fighters['G'])
self.assertRaises(FightIsOver, caves.play_round)
self.assertEqual({}, fighters['E'])
self.assertEqual({(3, 1): 101, (2, 2): 200, (5, 3): 200}, fighters['G'])
def test_play(self):
caves = self.make_default_caves()
fighters = caves.fighters
self.assertEqual(16533, caves.play())
self.assertEqual({}, fighters['E'])
self.assertEqual({(3, 1): 101, (2, 2): 200, (5, 3): 200}, fighters['G'])
def test_play_examples(self):
def check(expected_outcome, cave_lines, echo=False):
caves = Caves(cave_lines)
outcome = caves.play()
if echo:
caves.echo()
self.assertEqual(expected_outcome, outcome)
check(27730, [
'#######',
'#.G...#',
'#...EG#',
'#.#.#G#',
'#..G#E#',
'#.....#',
'#######',
])
check(36334, [
'#######',
'#G..#E#',
'#E#E.E#',
'#G.##.#',
'#...#E#',
'#...E.#',
'#######',
])
check(39514, [
'#######',
'#E..EG#',
'#.#G.E#',
'#E.##E#',
'#G..#.#',
'#..E#.#',
'#######',
])
check(27755, [
'#######',
'#E.G#.#',
'#.#G..#',
'#G.#.G#',
'#G..#.#',
'#...E.#',
'#######',
])
check(28944, [
'#######',
'#.E...#',
'#.#..G#',
'#.###.#',
'#E#G#G#',
'#...#G#',
'#######',
])
check(18740, [
'#########',
'#G......#',
'#.E.#...#',
'#..##..G#',
'#...##..#',
'#...#...#',
'#.G...G.#',
'#.....G.#',
'#########',
])
def test_play_mine(self):
caves_lines = data.data_lines(2018, "day_15_mine.txt")
caves = Caves(caves_lines)
outcome = caves.play()
self.assertEqual(201123, outcome)
def test_find_minimum_elves_strength(self):
for elf_strength in range(13, 20):
strengths = {'E': elf_strength, 'G': 3}
caves_lines = data.data_lines(2018, "day_15_mine.txt")
caves = Caves(caves_lines, teams_strength=strengths)
num_elves = len(caves.fighters['E'])
outcome = caves.play()
if len(caves.fighters['E']) == num_elves:
break
self.assertEqual(14, elf_strength)
self.assertEqual(54188, outcome)
TEAMS_STRENGTH = {'E': 3, 'G': 3}
EMPTY_VALUE = '.'
WALL_VALUE = '#'
class FightIsOver(Exception):
pass
class Caves:
def __init__(self, initial_map, teams_strength=TEAMS_STRENGTH):
self._caves = char_map.CharMap(input_lines=initial_map)
self.strength = teams_strength
self.fighters = {team: {} for team in teams_strength}
for position, entry in self._caves.items():
if entry in teams_strength:
self.fighters[entry][position] = 200
def play(self):
rounds = 0
while True:
try:
nobody_moved = self.play_round()
rounds += 1
except FightIsOver:
break
if nobody_moved:
rounds += self.play_frozen_situation()
remaining_hit_points = sum(hp for team in self.fighters.values() for hp in team.values())
return rounds * remaining_hit_points
def play_round(self):
nobody_moved = True
for unit in self.iterate_units():
if not self.game_on():
raise FightIsOver
team = self._caves[unit]
if team == EMPTY_VALUE:
continue
nobody_moved = self.play_unit(unit, team) and nobody_moved
return nobody_moved
def play_frozen_situation(self):
attackers = collections.defaultdict(lambda: 0)
for unit in self.iterate_units():
team = self._caves[unit]
target = self.get_attack_target(unit, team)
attackers[target] += self.strength[team]
rounds = min(
math.floor(self.fighters[self._caves[unit]][unit] / attackers[unit])
for unit in self.iterate_units()
if attackers[unit] > 0
)
for unit in self.iterate_units():
team = self._caves[unit]
self.fighters[team][unit] -= rounds * attackers[unit]
return rounds
def game_on(self):
return all(team for team in self.fighters.values())
def play_unit(self, unit, team):
attack_target = self.get_attack_target(unit, team)
if attack_target:
return self.attack(attack_target, self.strength[team])
new_position = self.find_next_step(unit, team)
if new_position:
self.move_unit(team, unit, new_position)
attack_target = self.get_attack_target(new_position, team)
if attack_target:
return self.attack(attack_target, self.strength[team])
return False
return True
def attack(self, unit, strength):
target_team = self._caves[unit]
self.fighters[target_team][unit] -= strength
if self.fighters[target_team][unit] <= 0:
del self.fighters[target_team][unit]
self._caves[unit] = EMPTY_VALUE
return False
return True
def move_unit(self, team, from_coordinates, to_coordinates):
self._caves[to_coordinates] = team
self._caves[from_coordinates] = EMPTY_VALUE
self.fighters[team][to_coordinates] = self.fighters[team][from_coordinates]
del self.fighters[team][from_coordinates]
def get_attack_target(self, unit, team):
adjacents = []
min_hp = None
for adjacent in self.get_coordinates_around(unit):
opponent = self._caves[adjacent]
if opponent in [EMPTY_VALUE, team]:
continue
hp = self.fighters[opponent][adjacent]
if min_hp is None or hp < min_hp:
min_hp = hp
adjacents = [adjacent]
elif hp == min_hp:
adjacents.append(adjacent)
return solve_tie(adjacents)
def find_next_step(self, unit, team):
in_range = self.get_in_range(team)
if not in_range:
return None
finder = char_map.MapExplorer(self._caves)
rules = FindAllClosestRules(targets=in_range, allowed_values=[EMPTY_VALUE])
finder.explore(unit, rules)
closest = solve_tie(rules.results)
if not closest:
return None
path = finder.shortest_path(start_point=unit, end_point=closest, rules=rules)
return path[1]
def iterate_units(self):
all_units = itertools.chain.from_iterable(team.keys() for team in self.fighters.values())
return sorted_by_priority(all_units)
def get_coordinates_around(self, coordinates):
for delta in char_map.ADJACENT_COORDINATES_DELTAS:
adjacent = aoc_utils.geometry.add_coordinates(coordinates, delta)
if adjacent in self._caves and self._caves[adjacent] != WALL_VALUE:
yield adjacent
def get_in_range(self, opponent):
in_range = []
for target in self.get_targets(opponent):
for coordinates in self.get_coordinates_around(target):
if self._caves[coordinates] == EMPTY_VALUE:
in_range.append(coordinates)
return sorted(in_range, key=lambda tup: (tup[1], tup[0]))
def get_targets(self, opponent):
for coordinates, entry in self._caves.items():
if entry not in [WALL_VALUE, EMPTY_VALUE, opponent]:
yield coordinates
def echo(self):
all_fighters = {unit: hp for team in self.fighters.values() for unit, hp in team.items()}
for y, line in enumerate(self._caves.lines()):
line += " "
line_units = sorted_by_priority(unit for unit in all_fighters if unit[1] == y)
line += " ".join(str(all_fighters[unit]) for unit in line_units)
print(line)
| caves = Caves([
"#######",
"#E..G.#",
"#...#.#",
"#.G.#G#",
"#######",
])
return caves |
py_utils_test.py | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for py_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import os
from tensorflow_datasets import testing
from tensorflow_datasets.core import constants
from tensorflow_datasets.core.utils import py_utils
class PyUtilsTest(testing.TestCase):
def test_is_notebook(self):
self.assertFalse(py_utils.is_notebook())
def test_map_nested(self):
"""Test the mapping function."""
def map_fn(x):
return x * 10
result = py_utils.map_nested(map_fn, {
'a': 1,
'b': {
'c': 2,
'e': [3, 4, 5],
},
})
self.assertEqual(result, {
'a': 10,
'b': {
'c': 20,
'e': [30, 40, 50],
},
})
result = py_utils.map_nested(map_fn, [1, 2, 3])
self.assertEqual(result, [10, 20, 30])
result = py_utils.map_nested(map_fn, 1)
self.assertEqual(result, 10)
def test_zip_nested(self):
"""Test the zip nested function."""
arg0 = {
'a': 1,
'b': {
'c': 2,
'e': [3, 4, 5],
},
}
arg1 = {
'a': 10,
'b': {
'c': 20,
'e': [30, 40, 50],
},
}
result = py_utils.zip_nested(arg0, arg1)
self.assertEqual(result, {
'a': (1, 10),
'b': {
'c': (2, 20),
'e': [(3, 30), (4, 40), (5, 50)],
},
})
result = py_utils.zip_nested(1, 2)
self.assertEqual(result, (1, 2))
def test_dict_only(self):
def map_fn(x):
return x[0] + x[1]
arg0 = {
'a': (1, 2),
'b': {
'c': 2,
'e': [3, 4, 5],
},
}
arg1 = {
'a': (10, 20),
'b': {
'c': 20,
'e': [30, 40, 50],
},
}
result = py_utils.zip_nested(arg0, arg1, dict_only=True)
self.assertEqual(result, {
'a': ((1, 2), (10, 20)),
'b': {
'c': (2, 20),
'e': ([3, 4, 5], [30, 40, 50]),
},
})
result = py_utils.map_nested(map_fn, result, dict_only=True)
self.assertEqual(result, {
'a': (1, 2, 10, 20),
'b': {
'c': 22,
'e': [3, 4, 5, 30, 40, 50],
},
})
def test_flatten_nest_dict(self):
nest_d = {
'a': 1,
'b/c': 2,
'b': {
'e': 3,
'f': {
'g': 4
},
},
}
flat_d = {
'a': 1,
'b/c': 2,
'b/e': 3,
'b/f/g': 4,
}
self.assertEqual(py_utils.flatten_nest_dict(nest_d), flat_d)
self.assertEqual(py_utils.pack_as_nest_dict(flat_d, nest_d), nest_d)
with self.assertRaisesWithPredicateMatch(ValueError, 'Extra keys'):
py_utils.pack_as_nest_dict({
'a': 1,
'b/c': 2,
'b/e': 3,
'b/f/g': 4,
'b/h': 5, # Extra key
}, nest_d)
with self.assertRaisesWithPredicateMatch(KeyError, 'b/e'):
py_utils.pack_as_nest_dict(
{
'a': 1,
'b/c': 2,
'b/d': 3,
},
{
'a': 1,
'b': {
'c': 2,
'd': 3,
'e': 4, # Extra key
}
},
)
with self.assertRaisesWithPredicateMatch(
ValueError, 'overwrite existing key:'):
py_utils.flatten_nest_dict({
'a': {
'b': 1,
},
'a/b': 2, # Collision
})
def test_tfds_dir(self):
"""Test the proper suffix only, since the prefix can vary."""
self.assertTrue(py_utils.tfds_dir().endswith('/tensorflow_datasets'))
class ReadChecksumDigestTest(testing.TestCase):
def test_digest(self):
digest, size = py_utils.read_checksum_digest(
os.path.join(self.test_data, '6pixels.png'), hashlib.sha256)
self.assertEqual(
digest,
'04f38ebed34d3b027d2683193766155912fba647158c583c3bdb4597ad8af34c')
self.assertEqual(102, size)
class GetClassPathUrlTest(testing.TestCase):
def test_get_class_path(self):
cls_path = py_utils.get_class_path(py_utils.NonMutableDict)
self.assertEqual(cls_path, 'tfds.core.utils.py_utils.NonMutableDict')
cls_path = py_utils.get_class_path(
py_utils.NonMutableDict(), use_tfds_prefix=False)
self.assertEqual(cls_path,
'tensorflow_datasets.core.utils.py_utils.NonMutableDict')
def | (self):
cls_url = py_utils.get_class_url(py_utils.NonMutableDict)
self.assertEqual(
cls_url,
(constants.SRC_BASE_URL + 'tensorflow_datasets/core/utils/py_utils.py'))
if __name__ == '__main__':
testing.test_main()
| test_get_class_url |
conv.go | // Copyright 2014 by caixw, All rights reserved.
// Use of this source code is governed by a MIT
// license that can be found in the LICENSE file.
package conv
import (
"fmt"
"strconv"
"strings"
)
// 抛出一个类型无法转换的错误
func typeError(val interface{}, t string) error {
return fmt.Errorf("[%T:%v]无法转换成[%v]类型", val, val, t)
}
// 字符串转Bool值,供Bool()函数调用。
// 添加了一些strconv.ParseFloat不支持但又比较常用的字符串转换
func str2Bool(str string) (bool, error) {
str = strings.ToLower(strings.TrimSpace(str))
if val, err := strconv.ParseBool(str); err == nil {
| "off"(false), "true"(true), "false"(false)
func Bool(val interface{}) (bool, error) {
switch ret := val.(type) {
case bool:
return ret, nil
//case int, int8, int32, int64, float32, float64, uint, uint8, uint32, uint64:
// return ret != 0, nil
case int:
return ret != 0, nil
case int8:
return ret != 0, nil
case int32:
return ret != 0, nil
case int64:
return ret != 0, nil
case float32:
return ret != 0, nil
case float64:
return ret != 0, nil
case uint:
return ret != 0, nil
case uint8:
return ret != 0, nil
case uint32:
return ret != 0, nil
case uint64:
return ret != 0, nil
case []byte:
return str2Bool(string(ret))
case string:
return str2Bool(ret)
default:
return false, typeError(val, "bool")
}
}
// 将val转换成bool类型或是在无法转换的情况下返回def参数。
func MustBool(val interface{}, def bool) bool {
if ret, err := Bool(val); err != nil {
return def
} else {
return ret
}
}
// 将val转换成uint64类型或是在无法转换的情况下返回error。
// 将一个有符号整数转换成无符号整数,负数将返回错误,正数和零正常转换
func Uint64(val interface{}) (uint64, error) {
switch ret := val.(type) {
case uint64:
return ret, nil
case int:
if ret < 0 {
return 0, typeError(ret, "uint64")
} else {
return uint64(ret), nil
}
case int8:
if ret < 0 {
return 0, typeError(ret, "uint64")
} else {
return uint64(ret), nil
}
case int32:
if ret < 0 {
return 0, typeError(ret, "uint64")
} else {
return uint64(ret), nil
}
case int64:
if ret < 0 {
return 0, typeError(ret, "uint64")
} else {
return uint64(ret), nil
}
case uint:
return uint64(ret), nil
case uint8:
return uint64(ret), nil
case uint32:
return uint64(ret), nil
case float32:
if ret < 0 {
return 0, typeError(ret, "uint64")
} else {
return uint64(ret), nil
}
case float64:
if ret < 0 {
return 0, typeError(ret, "uint64")
} else {
return uint64(ret), nil
}
case bool:
if ret {
return 1, nil
} else {
return 0, nil
}
case []byte:
if val, err := strconv.ParseFloat(string(ret), 32); err == nil {
return uint64(val), nil
} else {
return 0, typeError(val, "uint64")
}
case string:
if val, err := strconv.ParseFloat(ret, 32); err == nil {
return uint64(val), nil
} else {
return 0, typeError(val, "uint64")
}
default:
return 0, typeError(ret, "uint64")
}
}
// 将val转换成uint64类型或是在无法转换的情况下返回def参数。
func MustUint64(val interface{}, def uint64) uint64 {
if ret, err := Uint64(val); err != nil {
return def
} else {
return ret
}
}
// 将val转换成uint类型或是在无法转换的情况下返回error。
func Uint(val interface{}) (uint, error) {
if ret, err := Uint64(val); err != nil {
return 0, err
} else {
return uint(ret), nil
}
}
// 将val转换成uint类型或是在无法转换的情况下返回def参数。
func MustUint(val interface{}, def uint) uint {
if ret, err := Uint64(val); err != nil {
return def
} else {
return uint(ret)
}
}
// 将val转换成uint8类型或是在无法转换的情况下返回error。
func Uint8(val interface{}) (uint8, error) {
if ret, err := Uint64(val); err != nil {
return 0, err
} else {
return uint8(ret), nil
}
}
// 将val转换成uint8类型或是在无法转换的情况下返回def参数。
func MustUint8(val interface{}, def uint8) uint8 {
if ret, err := Uint64(val); err != nil {
return def
} else {
return uint8(ret)
}
}
// 将val转换成uint32类型或是在无法转换的情况下返回error。
func Uint32(val interface{}) (uint32, error) {
if ret, err := Uint64(val); err != nil {
return 0, err
} else {
return uint32(ret), nil
}
}
// 将val转换成uint32类型或是在无法转换的情况下返回def参数。
func MustUint32(val interface{}, def uint32) uint32 {
if ret, err := Uint64(val); err != nil {
return def
} else {
return uint32(ret)
}
}
// 将val转换成int64类型或是在无法转换的情况下返回error。
func Int64(val interface{}) (int64, error) {
switch ret := val.(type) {
case int64:
return ret, nil
case int:
return int64(ret), nil
case int8:
return int64(ret), nil
case int32:
return int64(ret), nil
case uint:
return int64(ret), nil
case uint8:
return int64(ret), nil
case uint32:
return int64(ret), nil
case uint64:
return int64(ret), nil
case float32:
return int64(ret), nil
case float64:
return int64(ret), nil
case bool:
if ret {
return 1, nil
} else {
return 0, nil
}
case []byte:
if val, err := strconv.ParseFloat(string(ret), 32); err == nil {
return int64(val), nil
} else {
return -1, typeError(val, "int64")
}
case string:
if val, err := strconv.ParseFloat(ret, 32); err == nil {
return int64(val), nil
} else {
return -1, typeError(val, "int64")
}
default:
return -1, typeError(ret, "int64")
}
}
// 将val转换成int64类型或是在无法转换的情况下返回def参数。
func MustInt64(val interface{}, def int64) int64 {
if ret, err := Int64(val); err != nil {
return def
} else {
return ret
}
}
// 将val转换成int类型或是在无法转换的情况下返回error。
func Int(val interface{}) (int, error) {
if ret, err := Int64(val); err != nil {
return -1, err
} else {
return int(ret), err
}
}
// 将val转换成int类型或是在无法转换的情况下返回def参数。
func MustInt(val interface{}, def int) int {
if ret, err := Int64(val); err != nil {
return def
} else {
return int(ret)
}
}
// 将val转换成int8类型或是在无法转换的情况下返回error。
func Int8(val interface{}) (int8, error) {
if ret, err := Int64(val); err != nil {
return -1, err
} else {
return int8(ret), err
}
}
// 将val转换成int8类型或是在无法转换的情况下返回def参数。
func MustInt8(val interface{}, def int8) int8 {
if ret, err := Int64(val); err != nil {
return def
} else {
return int8(ret)
}
}
// 将val转换成int32类型或是在无法转换的情况下返回error。
func Int32(val interface{}) (int32, error) {
if ret, err := Int64(val); err != nil {
return -1, err
} else {
return int32(ret), err
}
}
// 将val转换成int32类型或是在无法转换的情况下返回def参数。
func MustInt32(val interface{}, def int32) int32 {
if ret, err := Int64(val); err != nil {
return def
} else {
return int32(ret)
}
}
// 将val转换成float64类型或是在无法转换的情况下返回error。
func Float64(val interface{}) (float64, error) {
switch ret := val.(type) {
case float64:
return ret, nil
case int:
return float64(ret), nil
case int8:
return float64(ret), nil
case int32:
return float64(ret), nil
case int64:
return float64(ret), nil
case uint:
return float64(ret), nil
case uint8:
return float64(ret), nil
case uint32:
return float64(ret), nil
case uint64:
return float64(ret), nil
case float32:
return float64(ret), nil
case bool:
if ret {
return 1.0, nil
} else {
return 0.0, nil
}
case []byte:
if val, err := strconv.ParseFloat(string(ret), 64); err == nil {
return float64(val), nil
} else {
return -1, typeError(val, "float64")
}
case string:
if val, err := strconv.ParseFloat(ret, 64); err == nil {
return float64(val), nil
} else {
return -1, typeError(val, "float64")
}
default:
return -1, typeError(ret, "float64")
}
}
// 将val转换成float64类型或是在无法转换的情况下返回def参数。
func MustFloat64(val interface{}, def float64) float64 {
if ret, err := Float64(val); err != nil {
return def
} else {
return ret
}
}
// 将val转换成float32类型或是在无法转换的情况下返回error。
func Float32(val interface{}) (float32, error) {
if ret, err := Float64(val); err != nil {
return -1.0, err
} else {
return float32(ret), nil
}
}
// 将val转换成float32类型或是在无法转换的情况下返回def参数。
func MustFloat32(val interface{}, def float32) float32 {
if ret, err := Float64(val); err != nil {
return def
} else {
return float32(ret)
}
}
// 将val转换成string类型或是在无法转换的情况下返回error。
func String(val interface{}) (string, error) {
switch ret := val.(type) {
case string:
return ret, nil
case []byte:
return string(ret), nil
case int64:
return strconv.FormatInt(ret, 10), nil
case int:
return strconv.FormatInt(int64(ret), 10), nil
case int8:
return strconv.FormatInt(int64(ret), 10), nil
case int32:
return strconv.FormatInt(int64(ret), 10), nil
case uint:
return strconv.FormatInt(int64(ret), 10), nil
case uint8:
return strconv.FormatInt(int64(ret), 10), nil
case uint32:
return strconv.FormatInt(int64(ret), 10), nil
case uint64:
return strconv.FormatInt(int64(ret), 10), nil
case float32:
return strconv.FormatFloat(float64(ret), 'f', -1, 32), nil
case float64:
return strconv.FormatFloat(ret, 'f', -1, 64), nil
case bool:
return strconv.FormatBool(ret), nil
case fmt.Stringer:
return ret.String(), nil
case error:
return ret.Error(), nil
default:
return "", typeError(ret, "string")
}
}
// 将val转换成string类型或是在无法转换的情况下返回def参数。
func MustString(val interface{}, def string) string {
if ret, err := String(val); err != nil {
return def
} else {
return ret
}
}
// 将val转换成[]byte类型或是在无法转换的情况下返回error。
func Bytes(val interface{}) ([]byte, error) {
switch ret := val.(type) {
case []byte:
return ret, nil
case string:
return []byte(ret), nil
case int64:
return []byte(strconv.FormatInt(ret, 10)), nil
case int:
return []byte(strconv.FormatInt(int64(ret), 10)), nil
case int8:
return []byte(strconv.FormatInt(int64(ret), 10)), nil
case int32:
return []byte(strconv.FormatInt(int64(ret), 10)), nil
case uint:
return []byte(strconv.FormatInt(int64(ret), 10)), nil
case uint8:
return []byte(strconv.FormatInt(int64(ret), 10)), nil
case uint32:
return []byte(strconv.FormatInt(int64(ret), 10)), nil
case uint64:
return []byte(strconv.FormatInt(int64(ret), 10)), nil
case float32:
return []byte(strconv.FormatFloat(float64(ret), 'f', 5, 32)), nil
case float64:
return []byte(strconv.FormatFloat(ret, 'f', 5, 64)), nil
case bool:
return []byte(strconv.FormatBool(ret)), nil
default:
return nil, typeError(ret, "[]byte")
}
}
// 将val转换成[]byte类型或是在无法转换的情况下返回def参数。
func MustBytes(val interface{}, def []byte) []byte {
if ret, err := Bytes(val); err != nil {
return def
} else {
return ret
}
}
// 将val转换成slice类型或是在无法转换的情况下返回error。
// []int, []interface{}以及数组都可以转换。
// []byte("123")返回[]interface{}{byte(49),byte(50),byte(51)}
// "123"返回[]interface{}{rune(49),rune(50),rune(51)}
func Slice(val interface{}) ([]interface{}, error) {
switch data := val.(type) {
case []interface{}:
return data, nil
case []int:
ret := make([]interface{}, len(data))
for k, v := range data {
ret[k] = v
}
return ret, nil
case []int8:
ret := make([]interface{}, len(data))
for k, v := range data {
ret[k] = v
}
return ret, nil
case []int32:
ret := make([]interface{}, len(data))
for k, v := range data {
ret[k] = v
}
return ret, nil
case []int64:
ret := make([]interface{}, len(data))
for k, v := range data {
ret[k] = v
}
return ret, nil
case []uint:
ret := make([]interface{}, len(data))
for k, v := range data {
ret[k] = v
}
return ret, nil
case []uint8:
ret := make([]interface{}, len(data))
for k, v := range data {
ret[k] = v
}
return ret, nil
case []uint32:
ret := make([]interface{}, len(data))
for k, v := range data {
ret[k] = v
}
return ret, nil
case []uint64:
ret := make([]interface{}, len(data))
for k, v := range data {
ret[k] = v
}
return ret, nil
case []float32:
ret := make([]interface{}, len(data))
for k, v := range data {
ret[k] = v
}
return ret, nil
case []string:
ret := make([]interface{}, len(data))
for k, v := range data {
ret[k] = v
}
return ret, nil
case string:
ret := make([]interface{}, len(data))
for k, v := range data {
ret[k] = v
}
return ret, nil
default:
return nil, typeError(data, "slice")
}
}
// 将val转换成slice类型或是在无法转换的情况下返回def参数。
func MustSlice(val interface{}, def []interface{}) []interface{} {
if ret, err := Slice(val); err != nil {
return def
} else {
return ret
}
}
// 将val转换成map[string]interface{}类型或是在无法转换的情况下返回error。
// 若传递的val是一个struct对象,则会将属性转换成map字段。
func Map(val interface{}) (map[string]interface{}, error) {
switch ret := val.(type) {
case map[string]interface{}:
return ret, nil
default:
return Obj2Map(val, nil)
}
}
// 将val转换成map[string]interface{}类型或是在无法转换的情况下返回def参数。
func MustMap(val interface{}, def map[string]interface{}) map[string]interface{} {
if ret, err := Map(val); err != nil {
return def
} else {
return ret
}
}
| return val, nil
} else if val, err := strconv.ParseFloat(str, 32); err == nil {
return val != 0, nil
} else if str == "on" {
return true, nil
} else if str == "off" {
return false, nil
} else {
return false, typeError(val, "bool")
}
}
// 将val转换成bool类型或是在无法转换的情况下返回error。
// 以下值被可以被正确转换:
// 123(true), 0(false),"-123"(true), "on"(true), |
insert_titantic.py | import pandas as pd
import psycopg2
import os
from dotenv import load_dotenv
load_dotenv()
# read in our data
df = pd.read_csv('./titanic.csv')
print(f"DF shape: {df.shape}")
# create connection to db we want to move the data to
conn = psycopg2.connect(
host=os.getenv('DB_HOST'),
dbname=os.getenv('DB_USER'),
user=os.getenv('DB_USER'),
password=os.getenv('DB_PASSWORD')
)
cur = conn.cursor()
# ensure the table is fresh by dropping if exists and creating from scratch
query = "select exists(select * from information_schema.tables where table_name='titantic')"
cur.execute(query)
if cur.fetchone()[0]:
print("dropping table...")
query = "DROP TABLE titantic;"
cur.execute(query)
print("creating table...")
query = """
CREATE TABLE titantic (
id SERIAL PRIMARY KEY,
survived BOOLEAN,
class TEXT,
name TEXT,
sex TEXT,
age INTEGER,
siblings BOOLEAN,
parents BOOLEAN,
fare REAL
)
"""
cur.execute(query)
def get_name(name):
return name.replace("'", "")
def get_row(row):
|
# for each row in the csv, add a row to the postgres db
print("adding rows...")
for row in df.values:
query = "INSERT INTO titantic (survived, class, name, sex, age, siblings, parents, fare) VALUES " + str(get_row(row)) + ";"
cur.execute(query)
query = "SELECT * FROM titantic"
cur.execute(query)
rows = cur.fetchall()
print(f"Num rows: {len(rows)}")
conn.commit()
cur.close() | return (bool(row[0]), row[1], get_name(row[2]), row[3], row[4], bool(row[5]), bool(row[6]), row[7]) |
Web3Context.tsx | import CPK, { EthersAdapter, Transaction } from "contract-proxy-kit";
import React, { useContext, useCallback, useEffect, useState } from "react";
import { SafeAppWeb3Modal as Web3Modal } from "@gnosis.pm/safe-apps-web3modal";
import { EthereumAuthProvider, ThreeIdConnect } from "@3id/connect";
import WalletConnectProvider from "@walletconnect/web3-provider";
import { BigNumber, ethers } from "ethers";
import type { Network } from "@ethersproject/providers";
import Ceramic from "@ceramicnetwork/http-client";
import { IDX } from "@ceramicstudio/idx";
import type { IDX as IDXApi } from "@ceramicstudio/idx";
import { DID } from "dids";
import { Resolver } from "did-resolver";
import ThreeIdResolver from "@ceramicnetwork/3id-did-resolver";
import KeyDidResolver from "key-did-resolver";
import { SUBSCRIPTION_PERIOD_DEFAULT } from "../constants";
import { getNetworkByChainId } from "../lib/networks";
import AllowanceModuleAbi from "../contracts/gnosis/AllowanceModule.json";
import ERC20Abi from "../contracts/ERC20.json";
export type Web3ContextValue = {
connectToWeb3: () => void;
authenticateCeramic: () => Promise<string>;
disconnect: () => void;
getConnectText: () => string;
getBalanceOf: (account: string, tokenAddress: string) => Promise<BigNumber>;
getProxyBalance: (tokenAddress: string) => Promise<BigNumber>;
fundProxy: (tokenAddress: string, value: string) => Promise<void>;
setupCPKModules: (
tokenAddress: string,
deposit: string,
delegateContract: string
) => Promise<Array<Transaction>>;
encodeAllowanceModuleCall(
functionName: string,
args: Array<string>
): Array<Transaction>;
signTransfer: (
guildAddress: string,
tokenAddress: string,
contributionValue: string
) => Promise<string>;
submitCPKTx: (
txs: Array<Transaction>
) => Promise<ethers.providers.TransactionResponse | null>;
ethersProvider?: ethers.providers.Web3Provider;
account: string;
providerChainId: number;
connected: boolean;
idx?: IDXApi;
did?: DID;
network?: Network;
cpk: CPK | null;
};
type Web3State = {
account: string;
providerChainId: number;
ethersProvider?: ethers.providers.Web3Provider;
cpk?: CPK;
};
const initialWeb3Context = {
connectToWeb3: () => {},
authenticateCeramic: async () => "",
disconnect: () => {},
getConnectText: () => "",
// eslint-disable-next-line @typescript-eslint/no-unused-vars
getBalanceOf: async (account: string, tokenAddress: string) =>
BigNumber.from("0"),
// eslint-disable-next-line @typescript-eslint/no-unused-vars
getProxyBalance: async (tokenAddress: string) => BigNumber.from("0"),
// eslint-disable-next-line @typescript-eslint/no-unused-vars
fundProxy: async (tokenAddress: string, value: string) => {},
// eslint-disable-next-line @typescript-eslint/no-unused-vars
setupCPKModules: async (
// eslint-disable-next-line @typescript-eslint/no-unused-vars
tokenAddress: string,
// eslint-disable-next-line @typescript-eslint/no-unused-vars
deposit: string,
// eslint-disable-next-line @typescript-eslint/no-unused-vars
delegateContract: string
) => [],
encodeAllowanceModuleCall: (
// eslint-disable-next-line @typescript-eslint/no-unused-vars
functionName: string,
// eslint-disable-next-line @typescript-eslint/no-unused-vars
args: Array<string>
) => [],
signTransfer: async (
// eslint-disable-next-line @typescript-eslint/no-unused-vars
guildAddress: string,
// eslint-disable-next-line @typescript-eslint/no-unused-vars
tokenAddress: string,
// eslint-disable-next-line @typescript-eslint/no-unused-vars
contributionValue: string
) => "",
// eslint-disable-next-line @typescript-eslint/no-unused-vars
submitCPKTx: async (txs: Array<Transaction>) => null,
account: "",
providerChainId: 0,
connected: false,
cpk: null,
};
export const Web3Context =
React.createContext<Web3ContextValue>(initialWeb3Context);
export const useWeb3Context: () => Web3ContextValue = () =>
useContext(Web3Context);
const providerOptions = {
walletconnect: {
package: WalletConnectProvider,
options: {
rpc: {
4: getNetworkByChainId(4).rpc_url,
},
},
},
};
const web3Modal = new Web3Modal({
cacheProvider: false,
providerOptions,
});
const initialWeb3State = {
account: "",
providerChainId: 0,
};
export const Web3ContextProvider: React.FC = ({ children }) => {
const [{ providerChainId, ethersProvider, account, cpk }, setWeb3State] =
useState<Web3State>(initialWeb3State);
const [connected, setConnected] = useState(false);
const [idx, setIdx] = useState<IDXApi | null>(null);
const [did, setDid] = useState<DID | null>(null);
const [network, setNetwork] = useState<Network | null>(null);
const setWeb3Provider = useCallback(
async (initialProvider: any): Promise<void> => {
try {
const provider = new ethers.providers.Web3Provider(initialProvider);
const { chainId } = initialProvider;
const currentNetwork = await provider.getNetwork();
setNetwork(currentNetwork);
const signer = provider.getSigner();
const gotAccount = await signer.getAddress();
const isSafeApp = await web3Modal.isSafeApp();
const ethLibAdapter = !isSafeApp
? new EthersAdapter({ ethers, signer })
: null;
const cpkInstance =
process.env.REACT_APP_USE_CPK === "true" && ethLibAdapter
? await CPK.create({
ethLibAdapter,
ownerAccount: gotAccount,
})
: undefined;
setWeb3State({
account: gotAccount,
ethersProvider: provider,
providerChainId: chainId,
cpk: cpkInstance,
});
} catch (error) {
console.error(error);
}
},
[]
);
const connectToWeb3 = useCallback(async () => {
web3Modal.clearCachedProvider();
const modalProvider = await web3Modal.requestProvider();
await setWeb3Provider(modalProvider);
modalProvider.on("accountsChanged", (accounts: Array<string>) => {
setWeb3State((_provider) => ({
..._provider,
account: accounts[0],
}));
window.location.reload();
});
modalProvider.on("chainChanged", () => {
window.location.reload();
});
setConnected(true);
}, [setWeb3Provider]);
const disconnect = async () => {
web3Modal.clearCachedProvider();
setWeb3State(initialWeb3State);
setConnected(false);
};
const getConnectText = useCallback(
() =>
account ? `${account.substr(0, 5)}... Connected` : "Connect to a Wallet",
[account]
);
const get3IdProvider = useCallback(async () => {
const authProvider = new EthereumAuthProvider(window.ethereum, account);
const threeIdConnect = new ThreeIdConnect();
await threeIdConnect.connect(authProvider);
return threeIdConnect.getDidProvider();
}, [account]);
const authenticateCeramic = useCallback(async (): Promise<string> => {
if (!account) {
return "";
}
const ceramic = new Ceramic(process.env.REACT_APP_CERAMIC_URL);
const threeIdProvider = await get3IdProvider();
const aliases = {
contributorProfile:
"kjzl6cwe1jw14946qcgwbeixkh2ou9hwn29zv331akhfr61a44klf9ukg9jxz8g",
contributorCSV:
"kjzl6cwe1jw14agavukkr2w9qtay6eaxddurgvelnrnf7m74z1s2hofxp15dfea",
guildCSVMapping:
"kjzl6cwe1jw146k5uh5ayrozixpj99jeamsx0tcrc1dnwenshbc8r9ou44ckmin",
};
const resolver = new Resolver(
{
...ThreeIdResolver.getResolver(ceramic),
...KeyDidResolver.getResolver(),
},
{ cache: false }
);
const genDid = new DID({
provider: threeIdProvider,
resolver,
});
await genDid.authenticate();
await ceramic.setDID(genDid);
setDid(genDid);
const genIdx = new IDX({ ceramic, aliases });
setIdx(genIdx);
return genIdx.id;
}, [account, get3IdProvider]);
const getBalanceOf = async (
account: string,
tokenAddress: string
): Promise<BigNumber> => {
if (!ethersProvider) {
throw new Error("Provider is not setup!");
}
if (tokenAddress === ethers.constants.AddressZero) {
return await ethersProvider.getBalance(account);
}
// const signer = ethersProvider.getSigner();
const erc20 = new ethers.Contract(tokenAddress, ERC20Abi, ethersProvider);
return BigNumber.from((await erc20.balanceOf(account)).toString());
};
const getProxyBalance = async (tokenAddress: string): Promise<BigNumber> => {
if (!ethersProvider || !cpk) {
throw new Error("Provider is not setup!");
}
if (tokenAddress === ethers.constants.AddressZero) {
// TODO: This should be used after https://github.com/gnosis/contract-proxy-kit/pull/150 is merged
// const balance = await cpk.getBalance();
const balance = await (
cpk.ethLibAdapter as EthersAdapter
).signer.provider.getBalance(cpk.address); // quick fix
return balance.toString() === "NaN"
? BigNumber.from("0")
: BigNumber.from(balance.toString());
}
const signer = ethersProvider.getSigner();
const erc20 = new ethers.Contract(tokenAddress, ERC20Abi, signer);
const balance = await erc20.balanceOf(cpk.address);
return BigNumber.from(balance.toString());
};
const fundProxy = async (
tokenAddress: string,
value: string
): Promise<void> => {
if (!ethersProvider || !cpk) {
throw new Error("Provider is not setup!");
}
const signer = ethersProvider.getSigner();
if (tokenAddress === ethers.constants.AddressZero) {
const tx = await signer.sendTransaction({
to: cpk.address,
value: ethers.BigNumber.from(value),
});
await tx.wait(1);
return;
}
const erc20 = new ethers.Contract(tokenAddress, ERC20Abi, signer);
const tx = await erc20.transfer(cpk.address, value);
await tx.wait(1);
};
const setupCPKModules = async (
tokenAddress: string,
deposit: string,
delegateContract: string
): Promise<Array<Transaction>> => {
if (!ethersProvider || !cpk) {
throw new Error("Provider is not setup!");
}
const { gnosisConfig } = getNetworkByChainId(providerChainId);
/* const iErc20 = new ethers.utils.Interface(ERC20Abi); */
const signer = ethersProvider.getSigner();
const isDeployed = await cpk.isProxyDeployed();
const hasAllowanceModule =
isDeployed &&
((await cpk.getContractVersion()) !== "1.1.1"
? await cpk.isModuleEnabled(gnosisConfig.allowanceModule)
: (await cpk.getModules()).includes(gnosisConfig.allowanceModule));
// Delegate MUST be a GuildApp contract
const delegate = delegateContract;
const allowanceModule = new ethers.Contract(
gnosisConfig.allowanceModule,
AllowanceModuleAbi,
signer
);
const delegates = await allowanceModule.getDelegates(cpk.address, 0, 10);
const isDelegate = delegates.results.includes(delegate);
const currentDate = new Date();
const currentPeriod = new Date(
currentDate.getFullYear(),
currentDate.getMonth(),
1
);
const allowance = await allowanceModule.allowances(
cpk.address,
delegate,
tokenAddress
);
const allowanceAmount = (allowance.amount as ethers.BigNumber)
.add(ethers.BigNumber.from(deposit))
.toString();
const txs = [
!hasAllowanceModule && {
operation: CPK.Call,
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
to: cpk?.address!,
value: 0,
data: await cpk.contractManager?.versionUtils?.encodeEnableModule(
gnosisConfig.allowanceModule
),
},
!isDelegate && {
operation: CPK.Call,
to: gnosisConfig.allowanceModule,
value: 0, | },
{
operation: CPK.Call,
to: gnosisConfig.allowanceModule,
value: 0,
data: allowanceModule.interface.encodeFunctionData("setAllowance", [
delegate,
tokenAddress,
allowanceAmount,
SUBSCRIPTION_PERIOD_DEFAULT, // Get time in minutes
(currentPeriod.getTime() / 1000 / 60).toFixed(0), // First day of current Period. Get time in minutes
]),
},
].filter((t) => t) as Array<Transaction>;
return txs;
};
const encodeAllowanceModuleCall = (
functionName: string,
args: Array<string>
): Array<Transaction> => {
if (!ethersProvider || !cpk) {
throw new Error("Provider is not setup!");
}
const { gnosisConfig } = getNetworkByChainId(providerChainId);
const signer = ethersProvider.getSigner();
const allowanceModule = new ethers.Contract(
gnosisConfig.allowanceModule,
AllowanceModuleAbi,
signer
);
return [
{
// operation: 0, // CPK.Call by default
to: allowanceModule.address,
value: "0",
data: allowanceModule.interface.encodeFunctionData(functionName, args),
},
];
};
const signTransfer = async (
guildAddress: string,
tokenAddress: string,
contributionValue: string
): Promise<string> => {
if (!ethersProvider || !cpk) {
throw new Error("Provider is not setup!");
}
const { gnosisConfig } = getNetworkByChainId(providerChainId);
// const domain = {
// chainId: providerChainId,
// verifyingContract: gnosisConfig.allowanceModule,
// };
// const types = {
// AllowanceTransfer: [
// { type: "address", name: "safe" },
// { type: "address", name: "token" },
// { type: "address", name: "to" },
// { type: "uint96", name: "amount" },
// { type: "address", name: "paymentToken" },
// { type: "uint96", name: "payment" },
// { type: "uint16", name: "nonce" },
// ],
// };
const signer = ethersProvider.getSigner();
const allowanceModule = new ethers.Contract(
gnosisConfig.allowanceModule,
AllowanceModuleAbi,
signer
);
const delegate = await signer.getAddress();
const allowance = await allowanceModule.allowances(
cpk.address,
delegate,
tokenAddress
);
const transferHash = await allowanceModule.generateTransferHash(
cpk.address,
tokenAddress,
guildAddress,
contributionValue,
ethers.constants.AddressZero,
0,
allowance.nonce
);
const signature = await signer.signMessage(transferHash);
return signature;
};
const submitCPKTx = async (
txs: Array<Transaction>
): Promise<ethers.providers.TransactionResponse | null> => {
if (!cpk) {
throw new Error("CPK was not setup!");
}
if (txs.length > 0) {
try {
const cpkTxRs = await cpk.execTransactions(txs);
return cpkTxRs.transactionResponse as ethers.providers.TransactionResponse;
} catch (error) {
console.error("Something wrong happened", error);
throw new Error(error);
}
}
throw new Error("No batch Txs sent");
};
useEffect(() => {
(async (): Promise<void> => {
if (await web3Modal.isSafeApp()) {
connectToWeb3();
}
})();
}, [connectToWeb3]);
let values = {
connectToWeb3,
authenticateCeramic,
disconnect,
getBalanceOf,
getProxyBalance,
fundProxy,
setupCPKModules,
encodeAllowanceModuleCall,
signTransfer,
submitCPKTx,
ethersProvider,
cpk,
account,
providerChainId,
getConnectText,
connected,
network,
} as Web3ContextValue;
if (idx) {
values = { idx, ...values };
}
if (did) {
values = { did, ...values };
}
return <Web3Context.Provider value={values}>{children}</Web3Context.Provider>;
}; | data: allowanceModule.interface.encodeFunctionData("addDelegate", [
delegate,
]), |
ignore.py | import logging
import os
import re
from itertools import groupby
from pathspec.patterns import GitWildMatchPattern
from pathspec.util import normalize_file
from pygtrie import StringTrie
from dvc.path_info import PathInfo
from dvc.pathspec_math import merge_patterns
from dvc.system import System
from dvc.utils import relpath
logger = logging.getLogger(__name__)
class DvcIgnore:
DVCIGNORE_FILE = ".dvcignore"
def __call__(self, root, dirs, files):
raise NotImplementedError
class | (DvcIgnore):
def __init__(self, pattern_list, dirname):
self.pattern_list = pattern_list
self.dirname = dirname
self.prefix = self.dirname + os.sep
regex_pattern_list = map(
GitWildMatchPattern.pattern_to_regex, pattern_list
)
self.ignore_spec = [
(ignore, re.compile("|".join(item[0] for item in group)))
for ignore, group in groupby(regex_pattern_list, lambda x: x[1])
if ignore is not None
]
@classmethod
def from_files(cls, ignore_file_path, tree):
assert os.path.isabs(ignore_file_path)
dirname = os.path.normpath(os.path.dirname(ignore_file_path))
with tree.open(ignore_file_path, encoding="utf-8") as fobj:
path_spec_lines = [
line for line in map(str.strip, fobj.readlines()) if line
]
return cls(path_spec_lines, dirname)
def __call__(self, root, dirs, files):
files = [f for f in files if not self.matches(root, f)]
dirs = [d for d in dirs if not self.matches(root, d, True)]
return dirs, files
def matches(self, dirname, basename, is_dir=False):
# NOTE: `relpath` is too slow, so we have to assume that both
# `dirname` and `self.dirname` are relative or absolute together.
if dirname == self.dirname:
path = basename
elif dirname.startswith(self.prefix):
rel = dirname[len(self.prefix) :]
# NOTE: `os.path.join` is ~x5.5 slower
path = f"{rel}{os.sep}{basename}"
else:
return False
if not System.is_unix():
path = normalize_file(path)
return self.ignore(path, is_dir)
def ignore(self, path, is_dir):
result = False
if is_dir:
path_dir = f"{path}/"
for ignore, pattern in self.ignore_spec:
if pattern.match(path) or pattern.match(path_dir):
result = ignore
else:
for ignore, pattern in self.ignore_spec:
if pattern.match(path):
result = ignore
return result
def __hash__(self):
return hash(self.dirname + ":" + "\n".join(self.pattern_list))
def __eq__(self, other):
if not isinstance(other, DvcIgnorePatterns):
return NotImplemented
return (self.dirname == other.dirname) & (
self.pattern_list == other.pattern_list
)
def __bool__(self):
return bool(self.pattern_list)
class DvcIgnorePatternsTrie(DvcIgnore):
trie = None
def __init__(self):
if self.trie is None:
self.trie = StringTrie(separator=os.sep)
def __call__(self, root, dirs, files):
ignore_pattern = self[root]
if ignore_pattern:
return ignore_pattern(root, dirs, files)
return dirs, files
def __setitem__(self, root, ignore_pattern):
base_pattern = self[root]
common_dirname, merged_pattern = merge_patterns(
base_pattern.dirname,
base_pattern.pattern_list,
ignore_pattern.dirname,
ignore_pattern.pattern_list,
)
self.trie[root] = DvcIgnorePatterns(merged_pattern, common_dirname)
def __getitem__(self, root):
ignore_pattern = self.trie.longest_prefix(root)
if ignore_pattern:
return ignore_pattern.value
return DvcIgnorePatterns([], root)
class DvcIgnoreDirs(DvcIgnore):
def __init__(self, basenames):
self.basenames = set(basenames)
def __call__(self, root, dirs, files):
dirs = [d for d in dirs if d not in self.basenames]
return dirs, files
def __hash__(self):
return hash(tuple(self.basenames))
def __eq__(self, other):
if not isinstance(other, DvcIgnoreDirs):
return NotImplemented
return self.basenames == other.basenames
class DvcIgnoreRepo(DvcIgnore):
def __call__(self, root, dirs, files):
def is_dvc_repo(directory):
from dvc.repo import Repo
return os.path.isdir(os.path.join(root, directory, Repo.DVC_DIR))
dirs = [d for d in dirs if not is_dvc_repo(d)]
return dirs, files
class DvcIgnoreFilterNoop:
def __init__(self, tree, root_dir):
pass
def __call__(self, root, dirs, files):
return dirs, files
def is_ignored_dir(self, _):
return False
def is_ignored_file(self, _):
return False
class DvcIgnoreFilter:
def __init__(self, tree, root_dir):
self.tree = tree
self.root_dir = root_dir
self.ignores = {
DvcIgnoreDirs([".git", ".hg", ".dvc"]),
DvcIgnoreRepo(),
}
ignore_pattern_trie = DvcIgnorePatternsTrie()
for root, dirs, _ in self.tree.walk(self.root_dir):
ignore_pattern = self._get_ignore_pattern(root)
if ignore_pattern:
ignore_pattern_trie[root] = ignore_pattern
self.ignores.add(ignore_pattern_trie)
dirs[:], _ = self(root, dirs, [])
def _get_ignore_pattern(self, dirname):
ignore_file_path = os.path.join(dirname, DvcIgnore.DVCIGNORE_FILE)
if self.tree.exists(ignore_file_path):
return DvcIgnorePatterns.from_files(ignore_file_path, self.tree)
return None
def __call__(self, root, dirs, files):
for ignore in self.ignores:
dirs, files = ignore(root, dirs, files)
return dirs, files
def is_ignored_dir(self, path):
if not self._parents_exist(path):
return True
path = os.path.abspath(path)
if path == self.root_dir:
return False
dirname, basename = os.path.split(path)
dirs, _ = self(dirname, [basename], [])
return not dirs
def is_ignored_file(self, path):
if not self._parents_exist(path):
return True
dirname, basename = os.path.split(os.path.normpath(path))
_, files = self(os.path.abspath(dirname), [], [basename])
return not files
def _parents_exist(self, path):
from dvc.repo import Repo
path = PathInfo(path)
# if parent is root_dir or inside a .dvc dir we can skip this check
if path.parent == self.root_dir or Repo.DVC_DIR in path.parts:
return True
# paths outside of the repo should be ignored
path = relpath(path, self.root_dir)
if path.startswith("..") or (
os.name == "nt"
and not os.path.commonprefix(
[os.path.abspath(path), self.root_dir]
)
):
return False
# check if parent directories are in our ignores, starting from
# root_dir
for parent_dir in reversed(PathInfo(path).parents):
dirname, basename = os.path.split(parent_dir)
if basename == ".":
# parent_dir == root_dir
continue
dirs, _ = self(os.path.abspath(dirname), [basename], [])
if not dirs:
return False
return True
| DvcIgnorePatterns |
bufferpool.go | package pool
import "bytes"
// Pool is the struct that represents our buffer pool.
type Pool struct {
c chan *bytes.Buffer
}
// New returns a new buffer pool, arbitrarily sized at 2048 bytes.
func | () *Pool {
return &Pool{
c: make(chan *bytes.Buffer, 2048),
}
}
// Get gets a buffer from the pool, creating a new one if none are available.
func (p *Pool) Get() *bytes.Buffer {
select {
case buf := <-p.c:
// Re-use this buffer
return buf
default:
// Create a new buffer
return &bytes.Buffer{}
}
}
// Put returns a buffer to the pool.
func (p *Pool) Put(buf *bytes.Buffer) {
buf.Reset()
select {
case p.c <- buf:
// Return to pool
default:
// Pool is full, discard buffer
}
}
| New |
test_connections.py | """ Tests related to connecing inputs to outputs."""
import unittest
import numpy as np
from io import StringIO
import openmdao.api as om
from openmdao.utils.assert_utils import assert_near_equal, assert_warning
from openmdao.utils.mpi import MPI
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
class TestConnections(unittest.TestCase):
def setUp(self):
self.setup_model(None, None)
def setup_model(self, c1meta=None, c3meta=None):
self.p = om.Problem()
root = self.p.model
if c1meta is None:
c1meta = {}
if c3meta is None:
c3meta = {}
self.G1 = root.add_subsystem("G1", om.Group())
self.G2 = self.G1.add_subsystem("G2", om.Group())
self.C1 = self.G2.add_subsystem("C1", om.ExecComp('y=x*2.0', **c1meta))
self.C2 = self.G2.add_subsystem("C2", om.IndepVarComp('x', 1.0))
self.G3 = root.add_subsystem("G3", om.Group())
self.G4 = self.G3.add_subsystem("G4", om.Group())
self.C3 = self.G4.add_subsystem("C3", om.ExecComp('y=x*2.0', **c3meta))
self.C4 = self.G4.add_subsystem("C4", om.ExecComp('y=x*2.0'))
def test_no_conns(self):
self.p.setup()
self.p['G1.G2.C1.x'] = 111.
self.p['G3.G4.C3.x'] = 222.
self.p['G3.G4.C4.x'] = 333.
self.p.run_model()
self.assertEqual(self.C1._inputs['x'], 111.)
self.assertEqual(self.C3._inputs['x'], 222.)
self.assertEqual(self.C4._inputs['x'], 333.)
def test_pull_size_from_source(self):
raise unittest.SkipTest("setting input size based on src size not supported yet")
class Src(ExplicitComponent):
def setup(self):
self.add_input('x', 2.0)
self.add_output('y1', np.zeros((3, )))
self.add_output('y2', shape=((3, )))
def solve_nonlinear(self, inputs, outputs, resids):
x = inputs['x']
outputs['y1'] = x * np.array([1.0, 2.0, 3.0])
outputs['y2'] = x * np.array([1.0, 2.0, 3.0])
class Tgt(ExplicitComponent):
def setup(self):
self.add_input('x1')
self.add_input('x2')
self.add_output('y1', 0.0)
self.add_output('y2', 0.0)
def solve_nonlinear(self, inputs, outputs, resids):
x1 = inputs['x1']
x2 = inputs['x2']
outputs['y1'] = np.sum(x1)
outputs['y2'] = np.sum(x2)
p = om.Problem()
p.model.add_subsystem('src', Src())
p.model.add_subsystem('tgt', Tgt())
p.model.connect('src.y1', 'tgt.x1')
p.model.connect('src.y2', 'tgt.x2')
p.setup()
p.run_model()
self.assertEqual(p['tgt.y1'], 12.0)
self.assertEqual(p['tgt.y2'], 12.0)
def test_pull_size_from_source_with_indices(self):
raise unittest.SkipTest("setting input size based on src size not supported yet")
class Src(ExplicitComponent):
def setup(self):
self.add_input('x', 2.0)
self.add_output('y1', np.zeros((3, )))
self.add_output('y2', shape=((3, )))
self.add_output('y3', 3.0)
def solve_nonlinear(self, inputs, outputs, resids):
""" counts up. """
x = inputs['x']
outputs['y1'] = x * np.array([1.0, 2.0, 3.0])
outputs['y2'] = x * np.array([1.0, 2.0, 3.0])
outputs['y3'] = x * 4.0
class Tgt(ExplicitComponent):
def setup(self):
self.add_input('x1')
self.add_input('x2')
self.add_input('x3')
self.add_output('y1', 0.0)
self.add_output('y2', 0.0)
self.add_output('y3', 0.0)
def solve_nonlinear(self, inputs, outputs, resids):
""" counts up. """
x1 = inputs['x1']
x2 = inputs['x2']
x3 = inputs['x3']
outputs['y1'] = np.sum(x1)
outputs['y2'] = np.sum(x2)
outputs['y3'] = np.sum(x3)
top = om.Problem()
top.model.add_subsystem('src', Src())
top.model.add_subsystem('tgt', Tgt())
top.model.connect('src.y1', 'tgt.x1', src_indices=(0, 1))
top.model.connect('src.y2', 'tgt.x2', src_indices=(0, 1))
top.model.connect('src.y3', 'tgt.x3')
top.setup()
top.run_model()
self.assertEqual(top['tgt.y1'], 6.0)
self.assertEqual(top['tgt.y2'], 6.0)
self.assertEqual(top['tgt.y3'], 8.0)
def test_inp_inp_conn_no_src(self):
raise unittest.SkipTest("no setup testing yet")
self.p.model.connect('G3.G4.C3.x', 'G3.G4.C4.x')
stream = StringIO()
self.p.setup(out_stream=stream)
self.p['G3.G4.C3.x'] = 999.
self.assertEqual(self.p.model.G3.G4.C3._inputs['x'], 999.)
self.assertEqual(self.p.model.G3.G4.C4._inputs['x'], 999.)
content = stream.getvalue()
self.assertTrue("The following parameters have no associated unknowns:\n"
"G1.G2.C1.x\nG3.G4.C3.x\nG3.G4.C4.x" in content)
self.assertTrue("The following components have no connections:\n"
"G1.G2.C1\nG1.G2.C2\nG3.G4.C3\nG3.G4.C4\n" in content)
self.assertTrue("No recorders have been specified, so no data will be saved." in content)
class TestConnectionsPromoted(unittest.TestCase):
def test_inp_inp_promoted_w_prom_src(self):
p = om.Problem()
root = p.model
G1 = root.add_subsystem("G1", om.Group(), promotes=['x'])
G2 = G1.add_subsystem("G2", om.Group(), promotes=['x'])
G2.add_subsystem("C1", om.ExecComp('y=x*2.0'))
G2.add_subsystem("C2", om.IndepVarComp('x', 1.0), promotes=['x'])
G3 = root.add_subsystem("G3", om.Group(), promotes=['x'])
G4 = G3.add_subsystem("G4", om.Group(), promotes=['x'])
C3 = G4.add_subsystem("C3", om.ExecComp('y=x*2.0'), promotes=['x'])
C4 = G4.add_subsystem("C4", om.ExecComp('y=x*2.0'), promotes=['x'])
p.setup()
p.set_solver_print(level=0)
# setting promoted name will set the value into the outputs, but will
# not propagate it to the inputs. That will happen during run_model().
p['x'] = 999.
p.run_model()
self.assertEqual(C3._inputs['x'], 999.)
self.assertEqual(C4._inputs['x'], 999.)
def test_inp_inp_promoted_w_explicit_src(self):
p = om.Problem()
root = p.model
G1 = root.add_subsystem("G1", om.Group())
G2 = G1.add_subsystem("G2", om.Group(), promotes=['x'])
G2.add_subsystem("C1", om.ExecComp('y=x*2.0'))
G2.add_subsystem("C2", om.IndepVarComp('x', 1.0), promotes=['x'])
G3 = root.add_subsystem("G3", om.Group())
G4 = G3.add_subsystem("G4", om.Group(), promotes=['x'])
C3 = G4.add_subsystem("C3", om.ExecComp('y=x*2.0'), promotes=['x'])
C4 = G4.add_subsystem("C4", om.ExecComp('y=x*2.0'), promotes=['x'])
p.model.connect('G1.x', 'G3.x')
p.setup()
p.set_solver_print(level=0)
# setting promoted name will set the value into the outputs, but will
# not propagate it to the inputs. That will happen during run_model().
p['G1.x'] = 999.
p.run_model()
self.assertEqual(C3._inputs['x'], 999.)
self.assertEqual(C4._inputs['x'], 999.)
def test_overlapping_system_names(self):
# This ensures that _setup_connections does not think g1 and g1a are the same system
prob = om.Problem()
model = prob.model
g1 = model.add_subsystem('g1', om.Group())
g1a = model.add_subsystem('g1a', om.Group())
g1.add_subsystem('c', om.ExecComp('y=x'))
g1a.add_subsystem('c', om.ExecComp('y=x'))
model.connect('g1.c.y', 'g1a.c.x')
model.connect('g1a.c.y', 'g1.c.x')
prob.setup(check=True)
class TestConnectionsIndices(unittest.TestCase):
def setUp(self):
class ArrayComp(om.ExplicitComponent):
def setup(self):
self.add_input('inp', val=np.ones((2)))
self.add_input('inp1', val=0)
self.add_output('out', val=np.zeros((2)))
def compute(self, inputs, outputs):
outputs['out'] = inputs['inp'] * 2.
indep_var_comp = om.IndepVarComp()
indep_var_comp.add_output('blammo', val=3.)
indep_var_comp.add_output('arrout', val=np.ones(5))
prob = om.Problem()
prob.model.add_subsystem('idvp', indep_var_comp)
prob.model.add_subsystem('arraycomp', ArrayComp())
self.prob = prob
def test_bad_shapes(self):
# Should not be allowed because the source and target shapes do not match
self.prob.model.connect('idvp.blammo', 'arraycomp.inp')
expected = "<model> <class Group>: The source and target shapes do not match or are " + \
"ambiguous for the connection 'idvp.blammo' to 'arraycomp.inp'. " + \
"The source shape is (1,) but the target shape is (2,)."
try:
self.prob.setup()
except ValueError as err:
self.assertEqual(str(err), expected)
else:
self.fail('Exception expected.')
self.prob.model._raise_connection_errors = False
with assert_warning(UserWarning, expected):
self.prob.setup()
def test_bad_length(self):
# Should not be allowed because the length of src_indices is greater than
# the shape of arraycomp.inp
self.prob.model.connect('idvp.blammo', 'arraycomp.inp', src_indices=[0, 1, 0])
expected = "<model> <class Group>: The source indices [0 1 0] do not specify a valid shape " + \
"for the connection 'idvp.blammo' to 'arraycomp.inp'. The target shape is " + \
"(2,) but indices are (3,)."
try:
self.prob.setup()
except ValueError as err:
self.assertEqual(str(err), expected)
else:
self.fail('Exception expected.')
self.prob.model._raise_connection_errors = False
with assert_warning(UserWarning, expected):
self.prob.setup()
def test_bad_value(self):
# Should not be allowed because the index value within src_indices is outside
# the valid range for the source
self.prob.model.connect('idvp.arrout', 'arraycomp.inp1', src_indices=[100000])
expected = "<model> <class Group>: The source indices do not specify a valid index " + \
"for the connection 'idvp.arrout' to 'arraycomp.inp1'. " + \
"Index '100000' is out of range for source dimension of size 5."
try:
self.prob.setup()
except ValueError as err:
self.assertEqual(str(err), expected)
else:
self.fail('Exception expected.')
self.prob.model._raise_connection_errors = False
with assert_warning(UserWarning, expected):
self.prob.setup()
def test_bad_value_bug(self):
# Should not be allowed because the 2nd index value within src_indices is outside
# the valid range for the source. A bug prevented this from being checked.
self.prob.model.connect('idvp.arrout', 'arraycomp.inp', src_indices=[0, 100000])
expected = "<model> <class Group>: The source indices do not specify a valid index " + \
"for the connection 'idvp.arrout' to 'arraycomp.inp'. " + \
"Index '100000' is out of range for source dimension of size 5."
try:
self.prob.setup()
except ValueError as err:
self.assertEqual(str(err), expected)
else:
self.fail('Exception expected.')
self.prob.model._raise_connection_errors = False
with assert_warning(UserWarning, expected):
self.prob.setup()
class TestShapes(unittest.TestCase):
def test_connect_flat_array_to_row_vector(self):
p = om.Problem()
p.model.add_subsystem('indep', om.IndepVarComp('x', val=np.arange(10)))
p.model.add_subsystem('C1',
om.ExecComp('y=dot(x, A)',
x={'value': np.zeros((1, 10))},
A={'value': np.eye(10)},
y={'value': np.zeros((1, 10))}))
p.model.connect('indep.x', 'C1.x')
p.setup()
p.run_model()
assert_near_equal(p['C1.y'], np.arange(10)[np.newaxis, :])
def test_connect_flat_array_to_col_vector(self):
p = om.Problem()
p.model.add_subsystem('indep', om.IndepVarComp('x', val=np.arange(10)))
p.model.add_subsystem('C1',
om.ExecComp('y=dot(A, x)',
x={'value': np.zeros((10, 1))},
A={'value': np.eye(10)},
y={'value': np.zeros((10, 1))}))
p.model.connect('indep.x', 'C1.x')
p.setup()
p.run_model()
assert_near_equal(p['C1.y'], np.arange(10)[:, np.newaxis])
def test_connect_row_vector_to_flat_array(self):
p = om.Problem()
p.model.add_subsystem('indep', om.IndepVarComp('x', val=np.arange(10)[np.newaxis, :]))
p.model.add_subsystem('C1', om.ExecComp('y=5*x',
x={'value': np.zeros(10)},
y={'value': np.zeros(10)}))
p.model.connect('indep.x', 'C1.x')
p.setup()
p.run_model()
assert_near_equal(p['C1.y'], 5 * np.arange(10))
def test_connect_col_vector_to_flat_array(self):
p = om.Problem()
p.model.add_subsystem('indep', om.IndepVarComp('x', val=np.arange(10)[:, np.newaxis]))
p.model.add_subsystem('C1', om.ExecComp('y=5*x',
x={'value': np.zeros(10)},
y={'value': np.zeros(10)}))
p.model.connect('indep.x', 'C1.x')
p.setup()
p.run_model()
assert_near_equal(p['C1.y'], 5 * np.arange(10))
def test_connect_flat_to_3d_array(self):
p = om.Problem()
p.model.add_subsystem('indep', om.IndepVarComp('x', val=np.arange(10)))
p.model.add_subsystem('C1', om.ExecComp('y=5*x',
x={'value': np.zeros((1, 10, 1))},
y={'value': np.zeros((1, 10, 1))}))
p.model.connect('indep.x', 'C1.x')
p.setup()
p.run_model()
assert_near_equal(p['C1.y'], 5 * np.arange(10)[np.newaxis, :, np.newaxis])
def test_connect_flat_nd_to_flat_nd(self):
p = om.Problem()
p.model.add_subsystem('indep', om.IndepVarComp('x',
val=np.arange(10)[np.newaxis, :, np.newaxis,
np.newaxis]))
p.model.add_subsystem('C1', om.ExecComp('y=5*x',
x={'value': np.zeros((1, 1, 1, 10))},
y={'value': np.zeros((1, 1, 1, 10))}))
p.model.connect('indep.x', 'C1.x')
p.setup()
p.run_model()
assert_near_equal(p['C1.y'],
5 * np.arange(10)[np.newaxis, np.newaxis, np.newaxis, :])
def test_connect_incompatible_shapes(self):
p = om.Problem()
p.model.add_subsystem('indep', om.IndepVarComp('x', val=np.arange(10)[np.newaxis, :,
np.newaxis, np.newaxis]))
p.model.add_subsystem('C1', om.ExecComp('y=5*x',
x={'value': np.zeros((5, 2))},
y={'value': np.zeros((5, 2))}))
p.model.connect('indep.x', 'C1.x')
expected = "<model> <class Group>: The source and target shapes do not match or are " + \
"ambiguous for the connection 'indep.x' to 'C1.x'. The source shape is " + \
"(1, 10, 1, 1) but the target shape is (5, 2)."
with self.assertRaises(Exception) as context:
p.setup()
self.assertEqual(str(context.exception), expected)
p.model._raise_connection_errors = False
with assert_warning(UserWarning, expected):
p.setup()
class TestMultiConns(unittest.TestCase):
def test_mult_conns(self):
class SubGroup(om.Group):
def setup(self):
self.add_subsystem('c1', om.ExecComp('y = 2*x', x=np.ones(4), y=2*np.ones(4)),
promotes=['y', 'x'])
self.add_subsystem('c2', om.ExecComp('z = 2*y', y=np.ones(4), z=2*np.ones(4)),
promotes=['z', 'y'])
prob = om.Problem()
indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_output('x', 10*np.ones(4))
indeps.add_output('y', np.ones(4))
prob.model.add_subsystem('sub', SubGroup())
prob.model.connect('x', 'sub.x')
prob.model.connect('y', 'sub.y')
expected = "<model> <class Group>: The following inputs have multiple connections: " + \
"sub.c2.y from ['indeps.y', 'sub.c1.y']"
with self.assertRaises(Exception) as context:
prob.setup()
self.assertEqual(str(context.exception), expected)
prob.model._raise_connection_errors = False
with assert_warning(UserWarning, expected):
prob.setup()
def test_mixed_conns_same_level(self):
prob = om.Problem()
indeps = prob.model.add_subsystem('indeps', om.IndepVarComp())
indeps.add_output('x', 10*np.ones(4))
# c2.y is implicitly connected to c1.y
prob.model.add_subsystem('c1', om.ExecComp('y = 2*x', x=np.ones(4), y=2*np.ones(4)),
promotes=['y'])
prob.model.add_subsystem('c2', om.ExecComp('z = 2*y', y=np.ones(4), z=2*np.ones(4)),
promotes=['y'])
# make a second, explicit, connection to y (which is c2.y promoted)
prob.model.connect('indeps.x', 'y')
expected = "<model> <class Group>: Input 'c2.y' cannot be connected to 'indeps.x' " + \
"because it's already connected to 'c1.y'"
with self.assertRaises(Exception) as context:
prob.setup()
prob.final_setup()
self.assertEqual(str(context.exception), expected)
prob.model._raise_connection_errors = False
with assert_warning(UserWarning, expected):
prob.setup()
def test_auto_ivc_ambiguous_with_src_indices_msg(self):
class TComp(om.ExplicitComponent):
def initialize(self):
self.options.declare('src_idx', [0, 1])
def setup(self):
src = self.options['src_idx']
self.add_input('x', shape=2, src_indices=src, val=-2038.0)
self.add_output('y', shape=2)
self.declare_partials('y', 'x')
def compute(self, inputs, outputs):
outputs['y'] = 2.0 * inputs['x']
prob = om.Problem()
model = prob.model
prob.model.add_subsystem('c1', TComp(src_idx=[0, 1]), promotes_inputs=['x'])
prob.model.add_subsystem('c2', TComp(src_idx=[2, 3]), promotes_inputs=['x'])
prob.model.add_subsystem('d1', TComp(src_idx=[0, 1]), promotes_inputs=[('x', 'zz')])
prob.model.add_subsystem('d2', TComp(src_idx=[1, 2]), promotes_inputs=[('x', 'zz')])
with self.assertRaises(RuntimeError) as context:
prob.setup()
msg = "The following inputs ['c1.x', 'c2.x'] are defined using src_indices but the total source "
msg += "size is undetermined. You can specify the src size by setting 'val' or 'src_shape' in a call to set_input_defaults, or by adding an IndepVarComp as the source."
err_msg = str(context.exception).split(':')[-1]
self.assertEqual(err_msg, msg)
@unittest.skipUnless(MPI and PETScVector, "MPI and PETSc are required.")
class TestConnectionsDistrib(unittest.TestCase):
N_PROCS = 2
def test_serial_mpi_error(self):
# Should still catch the bad index when we are running under mpi with no distributed comps.
# A bug formerly prevented this.
class TestComp(om.ExplicitComponent):
def initialize(self):
self.options['distributed'] = False
def setup(self):
self.add_input('x', shape=2, src_indices=[1, 2], val=-2038.0)
self.add_output('y', shape=1)
self.declare_partials('y', 'x')
def compute(self, inputs, outputs):
|
def compute_partials(self, inputs, J):
J['y', 'x'] = np.ones((2,))
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', np.array([1.0, 3.0])))
model.add_subsystem('c3', TestComp())
model.connect("p1.x", "c3.x")
rank = prob.comm.rank
expected = f"Exception raised on rank {rank}: <model> <class Group>: The source indices do not specify a valid index " + \
"for the connection 'p1.x' to 'c3.x'. " + \
"Index '2' is out of range for source dimension of size 2."
try:
prob.setup()
except Exception as err:
self.assertEqual(str(err).splitlines()[-1], expected)
else:
self.fail('Exception expected.')
def test_serial_mpi_error_flat(self):
# Make sure the flat branch works too.
class TestComp(om.ExplicitComponent):
def initialize(self):
self.options['distributed'] = False
def setup(self):
self.add_input('x', shape=2, src_indices=[1, 2], val=-2038.0, flat_src_indices=True)
self.add_output('y', shape=1)
self.declare_partials('y', 'x')
def compute(self, inputs, outputs):
outputs['y'] = np.sum(inputs['x'])
def compute_partials(self, inputs, J):
J['y', 'x'] = np.ones((2,))
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', np.array([1.0, 3.0])))
model.add_subsystem('c3', TestComp())
model.connect("p1.x", "c3.x")
rank = prob.comm.rank
expected = f"Exception raised on rank {rank}: <model> <class Group>: The source indices do not specify a valid index " + \
"for the connection 'p1.x' to 'c3.x'. " + \
"Index '2' is out of range for source dimension of size 2."
try:
prob.setup()
except Exception as err:
self.assertEqual(str(err).splitlines()[-1], expected)
else:
self.fail('Exception expected.')
@unittest.skipUnless(MPI, "MPI is required.")
class TestConnectionsError(unittest.TestCase):
N_PROCS = 2
def test_incompatible_src_indices(self):
class TestCompDist(om.ExplicitComponent):
# this comp is distributed and forces PETScTransfer
def initialize(self):
self.options['distributed'] = True
def setup(self):
self.add_input('x', shape=2)
self.add_output('y', shape=1)
self.declare_partials('y', 'x', val=1.0)
def compute(self, inputs, outputs):
outputs['y'] = np.sum(inputs['x'])
class TestComp(om.ExplicitComponent):
def initialize(self):
self.options['distributed'] = False
def setup(self):
# read SRC_INDICES on each proc
self.add_input('x', shape=2, src_indices=[1, 2], val=-2038.0)
self.add_output('y', shape=1)
self.declare_partials('y', 'x')
def compute(self, inputs, outputs):
outputs['y'] = np.sum(inputs['x'])
def compute_partials(self, inputs, J):
J['y', 'x'] = np.ones((2,))
prob = om.Problem()
model = prob.model
rank = prob.comm.rank
if rank == 0:
setval = np.array([2.0, 3.0])
else:
setval = np.array([10.0, 20.0])
# no parallel or distributed comps, so default_vector is used (local xfer only)
model.add_subsystem('p1', om.IndepVarComp('x', setval))
model.add_subsystem('c3', TestComp())
model.add_subsystem('c4', TestCompDist())
model.connect("p1.x", "c3.x")
model.connect("c3.y", "c4.x")
with self.assertRaises(ValueError) as context:
prob.setup(check=False, mode='fwd')
self.assertEqual(str(context.exception),
f"Exception raised on rank {rank}: <model> <class Group>: The source indices do not specify a valid index for "
"the connection 'p1.x' to 'c3.x'. Index '2' is out of range for source "
"dimension of size 2.")
@unittest.skipUnless(MPI, "MPI is required.")
class TestConnectionsMPIBug(unittest.TestCase):
N_PROCS = 2
def test_bug_2d_src_indices(self):
# This model gave an exception during setup.
class Burn(om.ExplicitComponent):
def setup(self):
self.add_input('x', np.arange(12))
self.add_output('y', np.arange(12))
def compute(self, inputs, outputs):
outputs['y'] = inputs['x'] * 2.0
class LinkageComp(om.ExplicitComponent):
def setup(self):
self.add_input('in1', np.zeros((3, 2)))
self.add_input('in2', np.zeros((3, 2)))
self.add_output('out', np.zeros((3, 2)))
def compute(self, inputs, outputs):
outputs['out'] = 3 * inputs['in2'] - 2.5 * inputs['in1']
class Phases(om.ParallelGroup):
def setup(self):
self.add_subsystem('burn1', Burn())
self.add_subsystem('burn2', Burn())
class Linkages(om.Group):
def setup(self):
self.add_subsystem('linkage', LinkageComp())
class Traj(om.Group):
def setup(self):
self.add_subsystem('phases', Phases())
self.add_subsystem('linkages', Linkages())
def configure(self):
self.connect('phases.burn1.y', 'linkages.linkage.in1', src_indices=np.array([[0, 3], [4, 6], [2, 1]]))
self.connect('phases.burn2.y', 'linkages.linkage.in2', src_indices=np.array([[0, 3], [4, 6], [2, 1]]))
prob = om.Problem(model=Traj())
prob.setup()
prob.run_model()
if __name__ == "__main__":
unittest.main()
| outputs['y'] = np.sum(inputs['x']) |
identity.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CESNET.
#
# CESNET-OpenID-Remote is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""CESNET OIDC Auth backend for OARepo"""
from datetime import timedelta
from flask import current_app, session, g
from flask_login import current_user, user_logged_out
from flask_principal import identity_loaded, AnonymousIdentity, identity_changed, RoleNeed, UserNeed
from invenio_oauthclient.models import RemoteAccount
from invenio_oauthclient.utils import obj_or_import_string
from werkzeug.local import LocalProxy
from cesnet_openid_remote.constants import OPENIDC_GROUPS_KEY
from cesnet_openid_remote.proxies import current_cesnet_openid
CESNET_OPENID_REMOTE_SESSION_KEY = 'identity.cesnet_provides'
"""Name of session key where CESNET roles are stored."""
CESNET_OPENID_REMOTE_REFRESH_TIMEDELTA = timedelta(minutes=-5)
"""Default interval for refreshing user's extra data (e.g. groups)."""
sconf = LocalProxy(
lambda: dict(key=current_app.config.get(
"CESNET_OPENID_REMOTE_SESSION_KEY",
CESNET_OPENID_REMOTE_SESSION_KEY),
refresh=current_app.config.get(
"CESNET_OPENID_REMOTE_REFRESH_TIMEDELTA",
CESNET_OPENID_REMOTE_REFRESH_TIMEDELTA)))
@identity_changed.connect
def on_identity_changed(sender, identity):
"""Store roles in session whenever identity changes.
:param sender: Sender of the signal
:param identity: The user identity where information are stored.
"""
remote = current_cesnet_openid.remote_app
if isinstance(identity, AnonymousIdentity):
return
logged_in_via_token = \
hasattr(current_user, 'login_via_oauth2') \
and getattr(current_user, 'login_via_oauth2')
client_id = remote.get_consumer_key()
remote_account = RemoteAccount.get(
user_id=identity.id, client_id=client_id
)
groups = []
if remote_account and not logged_in_via_token:
if sconf['refresh']:
user_info = remote.get_userinfo(remote)
resource = dict(user_info=user_info,
user_id=remote.get_user_id(remote, email=user_info.email))
groups = remote.remote_groups_and_extra_data(
remote_account, resource, refresh_timedelta=sconf['refresh']
)
else:
groups = remote_account.extra_data[OPENIDC_GROUPS_KEY] |
current_cesnet_openid.sync_user_roles(current_user, groups) | elif remote_account and logged_in_via_token:
groups = remote_account.extra_data[OPENIDC_GROUPS_KEY] |
camera-type.ts | export enum CameraType {
PLAYER_VIEW,
ORBITAL,
VR_PLAYER_VIEW, | } | VR_BALL,
VR_FLY |
test_adaptor_pytorch.py | import torch
import torch.nn as nn
import torch.nn.quantized as nnq
from torch.quantization import QuantStub, DeQuantStub
import torchvision
import unittest
import os
from neural_compressor.adaptor import FRAMEWORKS
from neural_compressor.model import MODELS
from neural_compressor.adaptor.pytorch import PyTorchVersionMode
import neural_compressor.adaptor.pytorch as nc_torch
from neural_compressor.experimental import Quantization, common
from neural_compressor.conf.config import Quantization_Conf
from neural_compressor.utils.pytorch import load
from neural_compressor.utils.utility import recover
import shutil
import copy
import numpy as np
import yaml
try:
try:
import intel_pytorch_extension as ipex
except:
import intel_extension_for_pytorch as ipex
TEST_IPEX = True
except:
TEST_IPEX = False
PT_VERSION = nc_torch.get_torch_version()
if PT_VERSION >= PyTorchVersionMode.PT18.value:
FX_MODE = True
else:
FX_MODE = False
fake_dyn_yaml = '''
model:
name: imagenet
framework: pytorch
quantization:
approach: post_training_dynamic_quant
op_wise: {
'decoder': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
}
}
evaluation:
accuracy:
metric:
topk: 1
performance:
warmup: 5
iteration: 10
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
fake_ptq_yaml = '''
model:
name: imagenet
framework: pytorch
quantization:
op_wise: {
'quant': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv1': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv2': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer2.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer3.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer1.0.add_relu': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
}
evaluation:
accuracy:
metric:
topk: 1
performance:
warmup: 1
iteration: 10
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
fake_ptq_yaml_for_fx = '''
model:
name: imagenet
framework: pytorch_fx
quantization:
op_wise: {
'quant': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv1': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv2': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer2.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer3.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer1.0.add_relu': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'default_qconfig': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
}
}
evaluation:
accuracy:
metric:
topk: 1
performance:
warmup: 5
iteration: 10
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
fake_qat_yaml = '''
model:
name: imagenet
framework: pytorch
quantization:
approach: quant_aware_training
train:
end_epoch: 1
iteration: 1
optimizer:
SGD:
learning_rate: 0.0001
criterion:
CrossEntropyLoss:
reduction: mean
op_wise: {
'quant': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv1': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer1.0.conv2': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
},
'layer2.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer3.0.conv1': {
'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']},
'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']}
},
'layer1.0.add_relu': {
'activation': {'dtype': ['fp32']},
'weight': {'dtype': ['fp32']}
}
}
evaluation:
accuracy:
metric:
topk: 1
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
def build_pytorch_yaml():
with open('ptq_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_ptq_yaml)
with open('dynamic_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_dyn_yaml)
with open('qat_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_qat_yaml)
def build_pytorch_fx_yaml():
if PT_VERSION >= PyTorchVersionMode.PT19.value:
fake_fx_ptq_yaml = fake_ptq_yaml_for_fx
else:
fake_fx_ptq_yaml = fake_ptq_yaml.replace('pytorch', 'pytorch_fx')
with open('fx_ptq_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_fx_ptq_yaml)
fake_fx_dyn_yaml = fake_dyn_yaml.replace('pytorch', 'pytorch_fx')
with open('fx_dynamic_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_fx_dyn_yaml)
fake_fx_qat_yaml = fake_qat_yaml.replace('pytorch', 'pytorch_fx')
with open('fx_qat_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_fx_qat_yaml)
def build_ipex_yaml():
fake_yaml = '''
model:
name: imagenet
framework: pytorch_ipex
evaluation:
accuracy:
metric:
topk: 1
performance:
warmup: 5
iteration: 10
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
'''
with open('ipex_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_yaml)
def build_dump_tensors_yaml():
fake_yaml = '''
model:
name: imagenet
framework: pytorch
evaluation:
accuracy:
metric:
topk: 1
tuning:
accuracy_criterion:
relative: 0.01
exit_policy:
timeout: 0
random_seed: 9527
workspace:
path: saved
tensorboard: true
'''
with open('dump_yaml.yaml', 'w', encoding="utf-8") as f:
f.write(fake_yaml)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = QuantStub()
self.conv = nn.Conv2d(3, 1, 1)
self.linear = nn.Linear(224 * 224, 5)
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = x.view(1, -1)
x = self.linear(x)
x = self.dequant(x)
return x
class FP32Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
times = x.size(1)
if times == 1:
return x + x
return x
class DynamicModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
def | (self, x):
if x is not None:
x = self.conv(x)
return x
class SubModel(torch.nn.Module):
def __init__(self, bypass=True):
super().__init__()
self.quant = QuantStub()
self.conv = nn.Conv2d(1, 1, 1)
self.conv1 = nn.Conv2d(1, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.relu = nn.ReLU()
self.fp32 = FP32Model()
self.norm = nn.LayerNorm([1, 224, 224])
self.dequant = DeQuantStub()
self.bypass = bypass
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.quant(x)
x = self.relu(x)
x = self.conv1(x)
x = self.dequant(x)
if not self.bypass:
x = self.fp32(x)
x = self.norm(x)
return x
class PartialQuantModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = QuantStub()
self.conv = nn.Conv2d(3, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.conv1 = nn.Conv2d(1, 1, 1)
self.bn1 = nn.BatchNorm2d(1)
self.conv2 = nn.Conv2d(1, 1, 1)
self.linear = nn.Linear(224 * 224, 1)
self.dequant = DeQuantStub()
self.sub = SubModel(bypass=False)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.sub(x)
x = self.quant(x)
x = self.conv2(x)
x = x.view(1, -1)
x = self.linear(x)
x = self.dequant(x)
return x
class DynamicControlModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.linear = nn.Linear(224 * 224, 1)
self.sub = SubModel()
self.fp32 = FP32Model()
self.dyn = DynamicModel()
def forward(self, x):
x = self.conv(x)
x = self.dyn(x)
x = self.bn(x)
x = self.sub(x)
x = self.fp32(x)
x = x.view(1, -1)
x = self.linear(x)
return x
def eval_func(model):
# switch to evaluate mode
model.eval()
with torch.no_grad():
input = torch.randn(1, 3, 224, 224)
# compute output
output = model(input)
return 0.0
def q_func(model):
optimizer = torch.optim.SGD(model.parameters(), lr=0.0001)
# switch to evaluate mode
model.train()
input = torch.randn(1, 3, 224, 224)
# compute output
output = model(input)
loss = output.mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
return model
class TestPytorchAdaptor(unittest.TestCase):
framework_specific_info = {"device": "cpu",
"approach": "post_training_static_quant",
"random_seed": 1234,
"q_dataloader": None,
"workspace_path": "./"}
framework = "pytorch"
adaptor = FRAMEWORKS[framework](framework_specific_info)
model = torchvision.models.quantization.resnet18()
nc_model = MODELS['pytorch'](model)
@classmethod
def setUpClass(self):
build_pytorch_yaml()
build_dump_tensors_yaml()
@classmethod
def tearDownClass(self):
os.remove('ptq_yaml.yaml')
os.remove('dynamic_yaml.yaml')
os.remove('qat_yaml.yaml')
os.remove('dump_yaml.yaml')
shutil.rmtree('./saved', ignore_errors=True)
shutil.rmtree('runs', ignore_errors=True)
def test_get_all_weight_name(self):
assert len(list(self.nc_model.get_all_weight_names())) == 62
def test_get_weight(self):
for name, param in self.model.named_parameters():
if name == "layer4.1.conv2.weight":
param.data.fill_(0.0)
if name == "fc.bias":
param.data.fill_(0.1)
assert int(torch.sum(self.nc_model.get_weight("layer4.1.conv2.weight"))) == 0
assert torch.allclose(
torch.sum(
self.nc_model.get_weight("fc.bias")),
torch.tensor(100.))
def test_get_input(self):
model = MODELS['pytorch'](torchvision.models.quantization.resnet18())
model.model.eval().fuse_model()
model.register_forward_pre_hook()
rand_input = torch.rand(100, 3, 224, 224).float()
model.model(rand_input)
assert torch.equal(model.get_inputs('x'), rand_input)
model.remove_hooks()
def test_update_weights(self):
self.nc_model.update_weights('fc.bias', torch.zeros([1000]))
assert int(torch.sum(self.nc_model.get_weight("fc.bias"))) == 0
def test_get_gradient(self):
with self.assertRaises(AssertionError):
self.nc_model.get_gradient('fc.bias')
for name, tensor in self.nc_model._model.named_parameters():
if name == 'fc.bias':
tensor.grad = torch.zeros_like(tensor)
break
assert torch.equal(torch.Tensor(self.nc_model.get_gradient('fc.bias')), torch.zeros_like(tensor))
rand_input = torch.rand(100, 3, 224, 224).float()
rand_input.grad = torch.ones_like(rand_input)
assert torch.equal(torch.Tensor(self.nc_model.get_gradient(rand_input)),
torch.ones_like(rand_input))
def test_report_sparsity(self):
df, total_sparsity = self.nc_model.report_sparsity()
self.assertTrue(total_sparsity > 0)
self.assertTrue(len(df) == 22)
def test_quantization_saved(self):
for fake_yaml in ['dynamic_yaml.yaml', 'qat_yaml.yaml', 'ptq_yaml.yaml']:
model = M()
quantizer = Quantization(fake_yaml)
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
q_model = quantizer.fit()
eval_func(q_model)
q_model.save('./saved')
# Load configure and weights by neural_compressor.utils
saved_model = load("./saved", model)
eval_func(saved_model)
# recover int8 model from history
history_file = './saved/history.snapshot'
model_recover = recover(model, history_file, 0)
eval_func(model_recover)
self.assertEqual(type(saved_model.conv), \
type(model_recover.conv))
shutil.rmtree('./saved', ignore_errors=True)
from neural_compressor.experimental import Benchmark
evaluator = Benchmark('ptq_yaml.yaml')
# Load configure and weights by neural_compressor.model
evaluator.model = model
evaluator.b_dataloader = common.DataLoader(dataset)
evaluator()
evaluator.model = model
evaluator()
for fake_yaml in ['qat_yaml.yaml', 'ptq_yaml.yaml']:
model = copy.deepcopy(self.model)
if fake_yaml == 'ptq_yaml.yaml':
model.eval().fuse_model()
conf = Quantization_Conf(fake_yaml)
quantizer = Quantization(conf)
dataset = quantizer.dataset('dummy', (100, 3, 224, 224))
quantizer.model = model
if fake_yaml == 'qat_yaml.yaml':
quantizer.q_func = q_func
else:
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_func = eval_func
q_model = quantizer.fit()
q_model.save('./saved')
# Load configure and weights by neural_compressor.utils
saved_model = load("./saved", model)
eval_func(saved_model)
shutil.rmtree('./saved', ignore_errors=True)
def test_quantization_new_saved(self):
for fake_yaml in ['dynamic_yaml.yaml', 'qat_yaml.yaml', 'ptq_yaml.yaml']:
model = M()
quantizer = Quantization(fake_yaml)
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
q_model = quantizer.fit()
eval_func(q_model)
torch.save(q_model.quantized_state_dict(), './saved/model.pt')
# Load configure and weights by neural_compressor.utils
from neural_compressor.experimental.common import Model
common_model = Model(model)
common_model.load_quantized_state_dict(torch.load('./saved/model.pt'))
eval_func(common_model)
self.assertEqual(type(q_model._model.linear), \
type(common_model._model.linear))
shutil.rmtree('./saved', ignore_errors=True)
def test_non_quant_module(self):
for fake_yaml in ['qat_yaml.yaml', 'ptq_yaml.yaml']:
model = PartialQuantModel()
conf = Quantization_Conf(fake_yaml)
quantizer = Quantization(conf)
dataset = quantizer.dataset('dummy', (1, 3, 224, 224))
non_quant_dict = {'non_quant_module_name': ['conv', 'conv1', 'sub.conv'], \
'non_quant_module_class': ['BatchNorm2d', 'FP32Model']}
quantizer.model = common.Model(model, **non_quant_dict)
if fake_yaml == 'qat_yaml.yaml':
quantizer.q_func = q_func
else:
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_func = eval_func
q_model = quantizer.fit()
q_model.save('./saved')
saved_model = load("./saved", model, **non_quant_dict)
eval_func(saved_model)
shutil.rmtree('./saved', ignore_errors=True)
def test_workspace_path(self):
model = M()
quantizer = Quantization('ptq_yaml.yaml')
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
q_model = quantizer.fit()
eval_func(q_model)
torch.save(q_model.quantized_state_dict(), './saved/best_model.pt')
# Load configure and weights by workspace_path
from neural_compressor.experimental.common import Model
common_model = Model(model)
common_model.workspace_path = './saved'
eval_func(common_model)
self.assertEqual(type(q_model._model.linear), \
type(common_model._model.linear))
shutil.rmtree('./saved', ignore_errors=True)
def test_get_graph_info(self):
from neural_compressor.model.torch_model import PyTorchModel
model = PyTorchModel(self.model)
op_map = model.graph_info
self.assertTrue(op_map['conv1'] == 'Conv2d')
def test_tensorboard(self):
model = copy.deepcopy(self.nc_model)
model.model.eval().fuse_model()
quantizer = Quantization('dump_yaml.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model.model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_func = eval_func
quantizer.fit()
self.assertTrue(True if os.path.exists('runs/eval/baseline_acc0.0') else False)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.eval_func = None
quantizer.fit()
self.assertTrue(True if os.path.exists('runs/eval/baseline_acc0.0') else False)
def test_tensor_dump_and_set(self):
model = copy.deepcopy(self.nc_model)
model.model.eval().fuse_model()
quantizer = Quantization('ptq_yaml.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
dataloader = common.DataLoader(dataset)
dataloader = common._generate_common_dataloader(dataloader, 'pytorch')
quantizer.eval_dataloader = dataloader
quantizer.calib_dataloader = dataloader
quantizer.model = model.model
q_model = quantizer.fit()
quantizer.strategy.adaptor.inspect_tensor(
model, dataloader, op_list=['conv1.0', 'layer1.0.conv1.0'],
iteration_list=[1, 2], inspect_type='all', save_to_disk=True)
load_array = lambda *a, **k: np.load(*a, allow_pickle=True, **k)
a = load_array('saved/dump_tensor/activation_iter1.npz')
w = load_array('saved/dump_tensor/weight.npz')
if PT_VERSION >= PyTorchVersionMode.PT18.value:
self.assertTrue(w['conv1.0'].item()['conv1.0.weight'].shape[0] ==
a['conv1.0'].item()['conv1.0.output0'].shape[1])
else:
self.assertTrue(w['conv1.0'].item()['conv1.0.weight'].shape[0] ==
a['conv1.0'].item()['conv1.1.output0'].shape[1])
data = np.random.random(w['conv1.0'].item()['conv1.0.weight'].shape).astype(np.float32)
quantizer.strategy.adaptor.set_tensor(q_model, {'conv1.0.weight': data})
changed_tensor = q_model.get_weight('conv1.weight')
scales = changed_tensor.q_per_channel_scales()
changed_tensor_fp32 = torch.dequantize(changed_tensor)
self.assertTrue(np.allclose(data, changed_tensor_fp32.numpy(), atol=2 / np.min(scales.numpy())))
quantizer.strategy.adaptor.inspect_tensor(
q_model, dataloader, op_list=['conv1.0', 'layer1.0.conv1.0'],
iteration_list=[1, 2], inspect_type='all', save_to_disk=False)
def test_get_graph_info(self):
from neural_compressor.adaptor.pytorch import get_ops_recursively
model = copy.deepcopy(self.model)
op_map = {}
get_ops_recursively(model, '', op_map)
self.assertTrue(op_map['conv1'] == 'Conv2d')
def test_forward_wrapper(self):
vision_model = torchvision.models.resnet18()
class dummymodel(torch.nn.Module):
def __init__(self, model):
super(dummymodel, self).__init__()
self._model = model
def forward(self,input=None):
return self._model(input)
data = [[{'input': torch.rand(3,224,224)}, torch.ones(1,1)], ]
# dataloader.batch_size=100
dataloader = common.DataLoader(data, batch_size=1)
quantizer = Quantization('dynamic_yaml.yaml')
model = dummymodel(vision_model)
quantizer.model = model
quantizer.calib_dataloader = dataloader
quantizer.eval_dataloader = dataloader
quantizer.fit()
def test_floatfunctions_fallback(self):
class ModelWithFunctionals(torch.nn.Module):
def __init__(self):
super(ModelWithFunctionals, self).__init__()
self.mycat = nnq.FloatFunctional()
self.myadd = nnq.FloatFunctional()
self.myadd_relu = nnq.FloatFunctional()
# Tracing doesnt work yet for c10 ops with scalar inputs
# https://github.com/pytorch/pytorch/issues/27097
self.my_scalar_add = nnq.FloatFunctional()
self.mymul = nnq.FloatFunctional()
self.my_scalar_mul = nnq.FloatFunctional()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
y = self.mycat.cat([x, x, x])
z = self.myadd.add(y, y)
w = self.myadd_relu.add_relu(z, z)
# Tracing doesnt work yet for c10 ops with scalar inputs
# https://github.com/pytorch/pytorch/issues/27097
w = self.my_scalar_add.add_scalar(w, -0.5)
w = self.mymul.mul(w, w)
w = self.my_scalar_mul.mul_scalar(w, 0.5)
w = self.dequant(w)
return w
model = ModelWithFunctionals()
model = MODELS['pytorch'](model)
x = torch.rand(10, 1, dtype=torch.float)
y = model.model(x)
fallback_ops = []
q_capability = self.adaptor.query_fw_capability(model)
for k, v in q_capability["opwise"].items():
if k[0] != "quant" and k[0] != "dequant":
fallback_ops.append(k[0])
model.model.qconfig = torch.quantization.default_qconfig
model.model.quant.qconfig = torch.quantization.default_qconfig
if PT_VERSION >= PyTorchVersionMode.PT18.value:
model.model.dequant.qconfig = torch.quantization.default_qconfig
nc_torch._fallback_quantizable_ops_recursively(
model.model, '', fallback_ops, op_qcfgs={})
torch.quantization.add_observer_(model.model)
model.model(x)
torch.quantization.convert(model.model, self.adaptor.q_mapping, inplace=True)
qy = model.model(x)
tol = {'atol': 1e-01, 'rtol': 1e-03}
self.assertTrue(np.allclose(y, qy, **tol))
@unittest.skipIf(not TEST_IPEX, "Unsupport Intel PyTorch Extension")
class TestPytorchIPEXAdaptor(unittest.TestCase):
@classmethod
def setUpClass(self):
build_ipex_yaml()
@classmethod
def tearDownClass(self):
os.remove('ipex_yaml.yaml')
shutil.rmtree('./saved', ignore_errors=True)
shutil.rmtree('runs', ignore_errors=True)
def test_tuning_ipex(self):
from neural_compressor.experimental import Quantization
model = M()
quantizer = Quantization('ipex_yaml.yaml')
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (100, 3, 224, 224), label=True)
quantizer.model = model
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
nc_model = quantizer.fit()
nc_model.save('./saved')
try:
script_model = torch.jit.script(model.to(ipex.DEVICE))
except:
script_model = torch.jit.trace(model.to(ipex.DEVICE), torch.randn(10, 3, 224, 224).to(ipex.DEVICE))
from neural_compressor.experimental import Benchmark
evaluator = Benchmark('ipex_yaml.yaml')
evaluator.model = script_model
evaluator.b_dataloader = common.DataLoader(dataset)
results = evaluator()
@unittest.skipIf(not FX_MODE, "Unsupport Fx Mode with PyTorch Version Below 1.8")
class TestPytorchFXAdaptor(unittest.TestCase):
@classmethod
def setUpClass(self):
build_pytorch_fx_yaml()
@classmethod
def tearDownClass(self):
os.remove('fx_ptq_yaml.yaml')
os.remove('fx_dynamic_yaml.yaml')
shutil.rmtree('./saved', ignore_errors=True)
shutil.rmtree('runs', ignore_errors=True)
def test_fx_quant(self):
for fake_yaml in ['fx_qat_yaml.yaml', 'fx_ptq_yaml.yaml']:
model_origin = torchvision.models.resnet18()
# run fx_quant in neural_compressor and save the quantized GraphModule
quantizer = Quantization(fake_yaml)
dataset = quantizer.dataset('dummy', (10, 3, 224, 224), label=True)
quantizer.eval_func = eval_func
if fake_yaml == 'fx_qat_yaml.yaml':
quantizer.q_func = q_func
else:
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = common.Model(model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
q_model = quantizer.fit()
q_model.save('./saved')
# Load configure and weights with neural_compressor.utils
model_fx = load('./saved', model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule))
# recover int8 model with only tune_cfg
history_file = './saved/history.snapshot'
model_fx_recover = recover(model_origin, history_file, 0,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertEqual(model_fx.code, model_fx_recover.code)
shutil.rmtree('./saved', ignore_errors=True)
for fake_yaml in ['fx_qat_yaml.yaml', 'fx_ptq_yaml.yaml']:
model_origin = M()
# run fx_quant in neural_compressor and save the quantized GraphModule
quantizer = Quantization(fake_yaml)
quantizer.conf.usr_cfg.tuning.exit_policy['performance_only'] = True
dataset = quantizer.dataset('dummy', (10, 3, 224, 224), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = common.Model(model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
q_model = quantizer.fit()
q_model.save('./saved')
# Load configure and weights with neural_compressor.utils
model_fx = load('./saved', model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule))
shutil.rmtree('./saved', ignore_errors=True)
@unittest.skipIf(PT_VERSION < PyTorchVersionMode.PT19.value,
"Please use PyTroch 1.9 or higher version for dynamic quantization with pytorch_fx backend")
def test_fx_dynamic_quant(self):
# Model Definition
class LSTMModel(nn.Module):
'''Container module with an encoder, a recurrent module, and a decoder.'''
def __init__(self, ntoken, ninp, nhid, nlayers, dropout=0.5):
super(LSTMModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
self.rnn = nn.LSTM(ninp, nhid, nlayers, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
self.init_weights()
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output)
return decoded, hidden
model = LSTMModel(
ntoken = 10,
ninp = 512,
nhid = 256,
nlayers = 5,
)
# run fx_quant in neural_compressor and save the quantized GraphModule
model.eval()
quantizer = Quantization('fx_dynamic_yaml.yaml')
quantizer.model = common.Model(model,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
q_model = quantizer.fit()
q_model.save('./saved')
# Load configure and weights by neural_compressor.utils
model_fx = load("./saved", model,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule))
# recover int8 model with only tune_cfg
history_file = './saved/history.snapshot'
model_fx_recover = recover(model, history_file, 0,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertEqual(model_fx.code, model_fx_recover.code)
shutil.rmtree('./saved', ignore_errors=True)
def test_fx_sub_module_quant(self):
for fake_yaml in ['fx_qat_yaml.yaml', 'fx_ptq_yaml.yaml']:
model_origin = DynamicControlModel()
# run fx_quant in neural_compressor and save the quantized GraphModule
quantizer = Quantization(fake_yaml)
dataset = quantizer.dataset('dummy', (1, 3, 224, 224), label=True)
quantizer.eval_func = eval_func
if fake_yaml == 'fx_qat_yaml.yaml':
quantizer.q_func = q_func
else:
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = common.Model(model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
q_model = quantizer.fit()
q_model.save('./saved')
# Load configure and weights with neural_compressor.utils
model_fx = load('./saved/best_model.pt', model_origin,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertTrue(isinstance(model_fx.sub, torch.fx.graph_module.GraphModule))
# recover int8 model with only tune_cfg
history_file = './saved/history.snapshot'
model_fx_recover = recover(model_origin, history_file, 0,
**{'prepare_custom_config_dict': \
{'non_traceable_module_name': ['a']},
'convert_custom_config_dict': \
{'preserved_attributes': []}
})
self.assertEqual(model_fx.sub.code, model_fx_recover.sub.code)
shutil.rmtree('./saved', ignore_errors=True)
if __name__ == "__main__":
unittest.main()
| forward |
errors2.rs | // errors2.rs
// Say we're writing a game where you can buy items with tokens. All items cost
// 5 tokens, and whenever you purchase items there is a processing fee of 1
// token. A player of the game will type in how many items they want to buy,
// and the `total_cost` function will calculate the total number of tokens.
// Since the player typed in the quantity, though, we get it as a string-- and
// they might have typed anything, not just numbers!
// Right now, this function isn't handling the error case at all (and isn't
// handling the success case properly either). What we want to do is:
// if we call the `parse` function on a string that is not a number, that
// function will return a `ParseIntError`, and in that case, we want to
// immediately return that error from our function and not try to multiply
// and add.
// There are at least two ways to implement this that are both correct-- but
// one is a lot shorter! Execute `rustlings hint errors2` for hints to both ways.
use std::num::ParseIntError;
pub fn total_cost(item_quantity: &str) -> Result<i32, ParseIntError> |
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn item_quantity_is_a_valid_number() {
assert_eq!(total_cost("34"), Ok(171));
}
#[test]
fn item_quantity_is_an_invalid_number() {
assert_eq!(
total_cost("beep boop").unwrap_err().to_string(),
"invalid digit found in string"
);
}
}
| {
let processing_fee = 1;
let cost_per_item = 5;
let qty = match item_quantity.parse::<i32>() {
Ok(t) => t,
Err(e) => return Err(e),
};
Ok(qty * cost_per_item + processing_fee)
} |
QuestionsListUnanswered.js | import { connect } from "react-redux"
import QuestionsListUnanswered from '../components/QuestionsListUnanswered'
function mapStateToProps({ authedUser, questions }) {
return {
questions,
questionListFiltered: Object.keys(questions).filter(q => {
return (!questions[q].optionOne.votes.includes(authedUser)
&& !questions[q].optionTwo.votes.includes(authedUser))
}) | }
}
export default connect(mapStateToProps)(QuestionsListUnanswered) | .sort((a, b) => {
return questions[b].timestamp - questions[a].timestamp
}) |
private_testnet.py | """
This test module will only run on a POSIX system. Windows support *may* be added at some point in the future.
"""
# Global imports
import json, operator, os, signal, sys
from argparse import ArgumentParser
from datetime import datetime
from pathlib import Path
from time import sleep
from time import time
# local imports
from surfdebugnode import DebugNode
from surfapi.surfnoderpc import SurfNodeRPC
WAITING = True
def | ( ):
global WAITING
"""
This example contains a simple parser to obtain the locations of both surfd and the data directory,
creates and runs a new debug node, replays all of the blocks in the data directory, and finally waits
for the user to interface with it outside of the script. Sending SIGINT succesfully and cleanly terminates
the program.
"""
import os, signal, sys
from argparse import ArgumentParser
if( os.name != "posix" ):
print( "This script only works on POSIX systems" )
return
parser = ArgumentParser( description='Run a Debug Node on an existing chain. This simply replays all blocks ' + \
'and then waits indefinitely to allow user interaction through RPC calls and ' + \
'the CLI wallet' )
parser.add_argument( '--surfd', '-s', type=str, required=True, help='The location of a surfd binary to run the debug node' )
parser.add_argument( '--data-dir', '-d', type=str, required=True, help='The location of an existing data directory. ' + \
'The debug node will pull blocks from this directory when replaying the chain. The directory ' + \
'will not be changed.' )
parser.add_argument( '--plugins', '-p', type=str, required=False, help='A list of plugins to load. witness and ' + \
'debug_node are always loaded.' )
parser.add_argument( '--apis', '-a', type=str, required=False, help='A list of apis to load. database_api, login_api, ' + \
'and debug_node_api are always loaded' )
args = parser.parse_args()
surfd = Path( args.surfd )
if( not surfd.exists() ):
print( 'Error: surfd does not exist.' )
return
surfd = surfd.resolve()
if( not surfd.is_file() ):
print( 'Error: surfd is not a file.' )
return
data_dir = Path( args.data_dir )
if( not data_dir.exists() ):
print( 'Error: data_dir does not exist or is not a properly constructed surfd data directory' )
data_dir = data_dir.resolve()
if( not data_dir.is_dir() ):
print( 'Error: data_dir is not a directory' )
plugins = list()
if( args.plugins ):
plugins = args.plugins.split()
apis = list()
if( args.apis ):
apis = args.apis.split()
signal.signal( signal.SIGINT, sigint_handler )
print( 'Creating and starting debug node' )
debug_node = DebugNode( str( surfd ), str( data_dir ), plugins=plugins, apis=apis, args='--replay', surfd_err=sys.stderr )
with debug_node:
debug_node.debug_generate_blocks_until( int( time() ), True )
debug_node.debug_set_hardfork( 14 )
print( 'Done!' )
print( 'Feel free to interact with this node via RPC calls for the cli wallet.' )
print( 'To shutdown the node, send SIGINT with Ctrl + C to this script. It will shut down safely.' )
while( WAITING ):
assert( debug_node.debug_generate_blocks( 1 ) == 1 )
sleep( 3 )
def sigint_handler( signum, frame ):
global WAITING
WAITING = False
sleep( 3 )
sys.exit( 0 )
main() | main |
digitalocean.py | from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import requests
from .base import Provider as BaseProvider
logger = logging.getLogger(__name__)
def ProviderParser(subparser):
subparser.add_argument("--auth-token", help="specify token used authenticate to DNS provider")
class Provider(BaseProvider):
def | (self, options, engine_overrides=None):
super(Provider, self).__init__(options, engine_overrides)
self.domain_id = None
self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.digitalocean.com/v2')
def authenticate(self):
payload = self._get('/domains/{0}'.format(self.options['domain']))
self.domain_id = self.options['domain']
def create_record(self, type, name, content):
# check if record already exists
if len(self.list_records(type, name, content)) == 0:
record = {
'type': type,
'name': self._relative_name(name),
'data': content,
}
if type == 'CNAME':
record['data'] = record['data'].rstrip('.') + '.' # make sure a the data is always a FQDN for CNAMe.
payload = self._post('/domains/{0}/records'.format(self.domain_id), record)
logger.debug('create_record: %s', True)
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
url = '/domains/{0}/records'.format(self.domain_id)
records = []
payload = {}
next = url
while next is not None:
payload = self._get(next)
if 'links' in payload \
and 'pages' in payload['links'] \
and 'next' in payload['links']['pages']:
next = payload['links']['pages']['next']
else:
next = None
for record in payload['domain_records']:
processed_record = {
'type': record['type'],
'name': "{0}.{1}".format(record['name'], self.domain_id),
'ttl': '',
'content': record['data'],
'id': record['id']
}
records.append(processed_record)
if type:
records = [record for record in records if record['type'] == type]
if name:
records = [record for record in records if record['name'] == self._full_name(name)]
if content:
records = [record for record in records if record['content'].lower() == content.lower()]
logger.debug('list_records: %s', records)
return records
# Create or update a record.
def update_record(self, identifier, type=None, name=None, content=None):
data = {}
if type:
data['type'] = type
if name:
data['name'] = self._relative_name(name)
if content:
data['data'] = content
payload = self._put('/domains/{0}/records/{1}'.format(self.domain_id, identifier), data)
logger.debug('update_record: %s', True)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, type=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self.list_records(type, name, content)
delete_record_id = [record['id'] for record in records]
else:
delete_record_id.append(identifier)
logger.debug('delete_records: %s', delete_record_id)
for record_id in delete_record_id:
payload = self._delete('/domains/{0}/records/{1}'.format(self.domain_id, record_id))
# is always True at this point, if a non 200 response is returned an error is raised.
logger.debug('delete_record: %s', True)
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
default_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(self.options.get('auth_token'))
}
if not url.startswith(self.api_endpoint):
url = self.api_endpoint + url
r = requests.request(action, url, params=query_params,
data=json.dumps(data),
headers=default_headers)
r.raise_for_status() # if the request fails for any reason, throw an error.
if action == 'DELETE':
return ''
else:
return r.json()
| __init__ |
event.go | package client
import (
"errors"
"fmt"
log "github.com/mhchlib/logger"
"github.com/mhchlib/mconfig/core/config"
"github.com/mhchlib/mconfig/core/event"
"github.com/mhchlib/mconfig/core/mconfig"
)
//avoid to referencing package loop move to package common
//const EVENT_NOTIFY_KEY event.EventKey = "client_notify"
//type Event_Type int
//
//var(
// Event_Type_Config Event_Type = 0
// Event_Type_Filter Event_Type = 1
//)
//
//type ClientNotifyEventMetadata struct {
// AppKey mconfig.AppKey
// ConfigKey mconfig.ConfigKey
// Env mconfig.ConfigEnv
// Type Event_Type
//}
func | () {
err := event.RegisterMultiEventBus(mconfig.EVENT_KEY_CLIENT_NOTIFY, []event.EventType{event.Event_Change}, notifyClient)
if err != nil {
log.Error(err)
}
}
func notifyClient(metadata event.Metadata) {
eventMetadata, err := parseClientNotifyEventMetadata(metadata)
if err != nil {
log.Error(err)
}
switch eventMetadata.Type {
case mconfig.Event_Type_Config:
notifyClientConfigChange(eventMetadata.AppKey, eventMetadata.ConfigKey, eventMetadata.Env)
case mconfig.Event_Type_Filter:
notifyClientFilterChange(eventMetadata.AppKey)
default:
log.Error("not support client notify event type", eventMetadata.Type)
}
}
func notifyClientConfigChange(appKey mconfig.AppKey, configKey mconfig.ConfigKey, env mconfig.ConfigEnv) {
clientSet := getOnlineClientSetByConfigRealtion(appKey, configKey, env)
if clientSet == nil {
return
}
val, err := config.GetConfigFromCache(appKey, configKey, env)
if err != nil {
log.Error(err)
return
}
err = clientSet.SendMsg(&mconfig.ConfigChangeNotifyMsg{
Key: configKey,
Val: val.Val,
})
if err != nil {
log.Error(err)
}
}
func notifyClientFilterChange(appKey mconfig.AppKey) {
clientSet := getOnlineClientSetByAppRealtion(appKey)
if clientSet == nil {
return
}
err := clientSet.ReCalEffectEnv()
if err != nil {
log.Error(err)
}
}
func parseClientNotifyEventMetadata(metadata event.Metadata) (*mconfig.ClientNotifyEventMetadata, error) {
eventMetadata, ok := metadata.(mconfig.ClientNotifyEventMetadata)
if !ok {
return nil, errors.New(fmt.Sprintf("parse config event metadata error, metadata : %+v", metadata))
}
return &eventMetadata, nil
}
| initEvent |
00.24.js | macDetailCallback("e8eb1b000000/24",[{"d":"2020-06-24","t":"add","s":"ieee-oui.csv","a":"2355 W. Chandler Blvd. Chandler AZ US 85224","c":"US","o":"Microchip Technology Inc."}]); | ||
sms.go | package services
import (
ran "crypto/rand"
"database/sql"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"regexp"
"time"
"github.com/TechCatsLab/sor/smservice/config"
"github.com/TechCatsLab/sor/smservice/model/mysql"
)
type SendSmsReply struct {
Message string `json:"Message,omitempty"`
RequestId string `json:"RequestId,omitempty"`
BizId string `json:"BizId,omitempty"`
Code string `json:"Code,omitempty"`
}
type SMS struct {
Mobile string
Date int64
Code string
Sign string
}
func newSms() *SMS {
sms := &SMS{}
return sms
}
//发送后存储这个信息:时间,验证码,手机号
//准备发送的结构
func (sms *SMS) prepare(mobile, sign string, digits int) {
sms.Mobile = mobile
sms.Date = time.Now().Unix()
sms.Code = Code(digits)
sms.Sign = sign
}
func (sms *SMS) getDate(db *sql.DB) int64 {
unixtime, _ := mysql.GetDate(db, sms.Sign)
return unixtime
}
func (sms *SMS) getCode(db *sql.DB) string {
code, _ := mysql.GetCode(db, sms.Sign)
return code
}
//有效检验
func (sms *SMS) checkvalid(db *sql.DB, conf *config.Config) error {
unixtime := sms.getDate(db)
if unixtime > 0 && sms.Date-unixtime < int64(conf.ResendInterval) {
return errors.New("短时间内不允许发送两次")
}
if err := VailMobile(sms.Mobile); err != nil {
return errors.New("手机号不符合规则")
}
return nil
}
//存储入数据库
func (sms *SMS) save(db *sql.DB) error {
if err := mysql.Insert(db, sms.Mobile, sms.Date, sms.Code, sms.Sign); err != nil {
return err
}
return nil
}
//删除数据库数据
func (sms *SMS) delete(sign string, db *sql.DB) { mysql.Delete(db, sign) }
//实现一个可以直接改配置就能用的send方法
//1.拼接函数,拼接成需要的url
//2.设置参数
//aliyun
func (sms *SMS) sendmsg(conf *config.Config) error {
host := conf.Host
url := host + "?" + "code=" + sms.Code + "&phone=" + sms.Mobile + "&skin=1"
client := &http.Client{}
request, err := http.NewRequest("GET", url, nil)
request.Header.Add("Authorization", "APPCODE "+conf.Appcode)
response, err := client.Do(request)
if err != nil {
return err
}
defer response.Body.Close()
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return err
}
ssr := &SendSmsReply{}
if err := json.Unmarshal(body, ssr); err != nil {
return err
}
if ssr.Code != "OK" {
return errors.New(ssr.Code)
}
return nil
}
//Send 根据手机号和id生成时间和验证码,并发送后存入数据库
func Send(mobile, sign string, conf *config.Config, db *sql.DB) error {
sms := newSms()
sms.prepare(mobile, sign, conf.Digits)
if err := sms.checkvalid(db, conf); err != nil {
return err
}
if err := sms.save(db); err != nil {
return err
}
if err := sms.sendmsg(conf); err != nil {
return err
}
return nil
}
//Check 根据sign和验证码,返回nil表示成功
func Check(code, sign string, conf *config.Config, db *sql.DB) error {
sms := newSms()
sms.Date = time.Now().Unix()
sms.Code = code
sms.Sign = sign
//验证超时
//验证
if sms.Code == sms.getCode(db) {
sms.delete(sms.Sign, db)
return nil
}
return errors.New("未知错误")
}
var numbers = []byte("012345678998765431234567890987654321")
// UID 生成uid
func UID() string {
data := make([]byte, 16)
_, err := ran.Read(data)
if err != nil {
panic(err)
}
uuid := fmt.Sprintf("%X-%X-%X-%X-%X", data[0:4], data[4:6], data[6:8], data[8:10], data[10:])
return uuid
}
// Code 生成x位数字验证码
func Code(size int) string {
data := make([]byte, size)
out := make([]byte, size)
buffer := len(numbers)
_, err := ran.Read(data)
if err != nil {
panic(err)
}
for id, key := range data {
x := byte(int(key) % buffer)
out[id] = numbers[x]
}
return string(out)
}
//可行的手机号
func VailMobile(mobile s | g) error {
if len(mobile) < 11 {
return errors.New("[mobile]参数不对")
}
reg, err := regexp.Compile("^1[3-8][0-9]{9}$")
if err != nil {
panic("regexp error")
}
if !reg.MatchString(mobile) {
return errors.New("手机号码[mobile]格式不正确")
}
return nil
}
| trin |
transform2d.rs | // Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![cfg_attr(feature = "cargo-clippy", allow(just_underscores_and_digits))]
use super::{UnknownUnit, Angle};
#[cfg(feature = "mint")]
use mint;
use num::{One, Zero};
use point::{Point2D, point2};
use vector::{Vector2D, vec2};
use rect::Rect;
use transform3d::Transform3D;
use core::ops::{Add, Mul, Div, Sub, Neg};
use core::marker::PhantomData;
use core::cmp::{Eq, PartialEq};
use core::hash::{Hash};
use approxeq::ApproxEq;
use trig::Trig;
use core::fmt;
use num_traits::NumCast;
#[cfg(feature = "serde")]
use serde;
/// A 2d transform stored as a 3 by 2 matrix in row-major order in memory.
///
/// Transforms can be parametrized over the source and destination units, to describe a
/// transformation from a space to another.
/// For example, `Transform2D<f32, WorldSpace, ScreenSpace>::transform_point4d`
/// takes a `Point2D<f32, WorldSpace>` and returns a `Point2D<f32, ScreenSpace>`.
///
/// Transforms expose a set of convenience methods for pre- and post-transformations.
/// A pre-transformation corresponds to adding an operation that is applied before
/// the rest of the transformation, while a post-transformation adds an operation
/// that is applied after.
///
/// These transforms are for working with _row vectors_, so the matrix math for transforming
/// a vector is `v * T`. If your library is using column vectors, use `row_major` functions when you
/// are asked for `column_major` representations and vice versa.
#[repr(C)]
pub struct Transform2D<T, Src, Dst> {
pub m11: T, pub m12: T,
pub m21: T, pub m22: T,
pub m31: T, pub m32: T,
#[doc(hidden)]
pub _unit: PhantomData<(Src, Dst)>,
}
impl<T: Copy, Src, Dst> Copy for Transform2D<T, Src, Dst> {}
impl<T: Clone, Src, Dst> Clone for Transform2D<T, Src, Dst> {
fn clone(&self) -> Self {
Transform2D {
m11: self.m11.clone(),
m12: self.m12.clone(),
m21: self.m21.clone(),
m22: self.m22.clone(),
m31: self.m31.clone(),
m32: self.m32.clone(),
_unit: PhantomData,
}
}
}
#[cfg(feature = "serde")]
impl<'de, T, Src, Dst> serde::Deserialize<'de> for Transform2D<T, Src, Dst>
where T: serde::Deserialize<'de>
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: serde::Deserializer<'de>
{
let (
m11, m12,
m21, m22,
m31, m32,
) = try!(serde::Deserialize::deserialize(deserializer));
Ok(Transform2D {
m11, m12,
m21, m22,
m31, m32,
_unit: PhantomData
})
}
}
#[cfg(feature = "serde")]
impl<T, Src, Dst> serde::Serialize for Transform2D<T, Src, Dst>
where T: serde::Serialize
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: serde::Serializer
{
(
&self.m11, &self.m12,
&self.m21, &self.m22,
&self.m31, &self.m32,
).serialize(serializer)
}
}
impl<T, Src, Dst> Eq for Transform2D<T, Src, Dst> where T: Eq {}
impl<T, Src, Dst> PartialEq for Transform2D<T, Src, Dst>
where T: PartialEq
{
fn eq(&self, other: &Self) -> bool {
self.m11 == other.m11 &&
self.m12 == other.m12 &&
self.m21 == other.m21 &&
self.m22 == other.m22 &&
self.m31 == other.m31 &&
self.m32 == other.m32
}
}
impl<T, Src, Dst> Hash for Transform2D<T, Src, Dst>
where T: Hash
{
fn hash<H: ::core::hash::Hasher>(&self, h: &mut H) {
self.m11.hash(h);
self.m12.hash(h);
self.m21.hash(h);
self.m22.hash(h);
self.m31.hash(h);
self.m32.hash(h);
}
}
impl<T, Src, Dst> Transform2D<T, Src, Dst> {
/// Create a transform specifying its matrix elements in row-major order.
///
/// Beware: This library is written with the assumption that row vectors
/// are being used. If your matrices use column vectors (i.e. transforming a vector
/// is `T * v`), then please use `column_major`
pub const fn | (m11: T, m12: T, m21: T, m22: T, m31: T, m32: T) -> Self {
Transform2D {
m11, m12,
m21, m22,
m31, m32,
_unit: PhantomData,
}
}
/// Create a transform specifying its matrix elements in column-major order.
///
/// Beware: This library is written with the assumption that row vectors
/// are being used. If your matrices use column vectors (i.e. transforming a vector
/// is `T * v`), then please use `row_major`
pub const fn column_major(m11: T, m21: T, m31: T, m12: T, m22: T, m32: T) -> Self {
Transform2D {
m11, m12,
m21, m22,
m31, m32,
_unit: PhantomData,
}
}
}
impl<T: Copy, Src, Dst> Transform2D<T, Src, Dst> {
/// Returns an array containing this transform's terms in row-major order (the order
/// in which the transform is actually laid out in memory).
///
/// Beware: This library is written with the assumption that row vectors
/// are being used. If your matrices use column vectors (i.e. transforming a vector
/// is `T * v`), then please use `to_column_major_array`
pub fn to_row_major_array(&self) -> [T; 6] {
[
self.m11, self.m12,
self.m21, self.m22,
self.m31, self.m32
]
}
/// Returns an array containing this transform's terms in column-major order.
///
/// Beware: This library is written with the assumption that row vectors
/// are being used. If your matrices use column vectors (i.e. transforming a vector
/// is `T * v`), then please use `to_row_major_array`
pub fn to_column_major_array(&self) -> [T; 6] {
[
self.m11, self.m21, self.m31,
self.m12, self.m22, self.m32
]
}
/// Returns an array containing this transform's 3 rows in (in row-major order)
/// as arrays.
///
/// This is a convenience method to interface with other libraries like glium.
///
/// Beware: This library is written with the assumption that row vectors
/// are being used. If your matrices use column vectors (i.e. transforming a vector
/// is `T * v`), this will return column major arrays.
pub fn to_row_arrays(&self) -> [[T; 2]; 3] {
[
[self.m11, self.m12],
[self.m21, self.m22],
[self.m31, self.m32],
]
}
/// Creates a transform from an array of 6 elements in row-major order.
///
/// Beware: This library is written with the assumption that row vectors
/// are being used. If your matrices use column vectors (i.e. transforming a vector
/// is `T * v`), please provide a column major array.
pub fn from_row_major_array(array: [T; 6]) -> Self {
Self::row_major(
array[0], array[1],
array[2], array[3],
array[4], array[5],
)
}
/// Creates a transform from 3 rows of 2 elements (row-major order).
///
/// Beware: This library is written with the assumption that row vectors
/// are being used. If your matrices use column vectors (i.e. transforming a vector
/// is `T * v`), please provide a column major array.
pub fn from_row_arrays(array: [[T; 2]; 3]) -> Self {
Self::row_major(
array[0][0], array[0][1],
array[1][0], array[1][1],
array[2][0], array[2][1],
)
}
/// Drop the units, preserving only the numeric value.
pub fn to_untyped(&self) -> Transform2D<T, UnknownUnit, UnknownUnit> {
Transform2D::row_major(
self.m11, self.m12,
self.m21, self.m22,
self.m31, self.m32
)
}
/// Tag a unitless value with units.
pub fn from_untyped(p: &Transform2D<T, UnknownUnit, UnknownUnit>) -> Self {
Transform2D::row_major(
p.m11, p.m12,
p.m21, p.m22,
p.m31, p.m32
)
}
}
impl<T0: NumCast + Copy, Src, Dst> Transform2D<T0, Src, Dst> {
/// Cast from one numeric representation to another, preserving the units.
pub fn cast<T1: NumCast + Copy>(&self) -> Transform2D<T1, Src, Dst> {
self.try_cast().unwrap()
}
/// Fallible cast from one numeric representation to another, preserving the units.
pub fn try_cast<T1: NumCast + Copy>(&self) -> Option<Transform2D<T1, Src, Dst>> {
match (NumCast::from(self.m11), NumCast::from(self.m12),
NumCast::from(self.m21), NumCast::from(self.m22),
NumCast::from(self.m31), NumCast::from(self.m32)) {
(Some(m11), Some(m12),
Some(m21), Some(m22),
Some(m31), Some(m32)) => {
Some(Transform2D::row_major(
m11, m12,
m21, m22,
m31, m32
))
},
_ => None
}
}
}
impl<T, Src, Dst> Transform2D<T, Src, Dst>
where T: Copy +
PartialEq +
One + Zero {
pub fn identity() -> Self {
let (_0, _1) = (Zero::zero(), One::one());
Transform2D::row_major(
_1, _0,
_0, _1,
_0, _0
)
}
// Intentional not public, because it checks for exact equivalence
// while most consumers will probably want some sort of approximate
// equivalence to deal with floating-point errors.
fn is_identity(&self) -> bool {
*self == Transform2D::identity()
}
}
impl<T, Src, Dst> Transform2D<T, Src, Dst>
where T: Copy + Clone +
Add<T, Output=T> +
Mul<T, Output=T> +
Div<T, Output=T> +
Sub<T, Output=T> +
PartialOrd +
One + Zero {
/// Returns the multiplication of the two matrices such that mat's transformation
/// applies after self's transformation.
///
/// Assuming row vectors, this is equivalent to self * mat
#[must_use]
pub fn post_transform<NewDst>(&self, mat: &Transform2D<T, Dst, NewDst>) -> Transform2D<T, Src, NewDst> {
Transform2D::row_major(
self.m11 * mat.m11 + self.m12 * mat.m21,
self.m11 * mat.m12 + self.m12 * mat.m22,
self.m21 * mat.m11 + self.m22 * mat.m21,
self.m21 * mat.m12 + self.m22 * mat.m22,
self.m31 * mat.m11 + self.m32 * mat.m21 + mat.m31,
self.m31 * mat.m12 + self.m32 * mat.m22 + mat.m32,
)
}
/// Returns the multiplication of the two matrices such that mat's transformation
/// applies before self's transformation.
///
/// Assuming row vectors, this is equivalent to mat * self
#[inline]
#[must_use]
pub fn pre_transform<NewSrc>(&self, mat: &Transform2D<T, NewSrc, Src>) -> Transform2D<T, NewSrc, Dst> {
mat.post_transform(self)
}
/// Returns a translation transform.
#[inline]
pub fn create_translation(x: T, y: T) -> Self {
let (_0, _1): (T, T) = (Zero::zero(), One::one());
Transform2D::row_major(
_1, _0,
_0, _1,
x, y
)
}
/// Applies a translation after self's transformation and returns the resulting transform.
#[inline]
#[must_use]
pub fn post_translate(&self, v: Vector2D<T, Dst>) -> Self {
self.post_transform(&Transform2D::create_translation(v.x, v.y))
}
/// Applies a translation before self's transformation and returns the resulting transform.
#[inline]
#[must_use]
pub fn pre_translate(&self, v: Vector2D<T, Src>) -> Self {
self.pre_transform(&Transform2D::create_translation(v.x, v.y))
}
/// Returns a scale transform.
pub fn create_scale(x: T, y: T) -> Self {
let _0 = Zero::zero();
Transform2D::row_major(
x, _0,
_0, y,
_0, _0
)
}
/// Applies a scale after self's transformation and returns the resulting transform.
#[inline]
#[must_use]
pub fn post_scale(&self, x: T, y: T) -> Self {
self.post_transform(&Transform2D::create_scale(x, y))
}
/// Applies a scale before self's transformation and returns the resulting transform.
#[inline]
#[must_use]
pub fn pre_scale(&self, x: T, y: T) -> Self {
Transform2D::row_major(
self.m11 * x, self.m12,
self.m21, self.m22 * y,
self.m31, self.m32
)
}
/// Returns the given point transformed by this transform.
///
/// Assuming row vectors, this is equivalent to `p * self`
#[inline]
#[must_use]
pub fn transform_point(&self, point: Point2D<T, Src>) -> Point2D<T, Dst> {
Point2D::new(
point.x * self.m11 + point.y * self.m21 + self.m31,
point.x * self.m12 + point.y * self.m22 + self.m32
)
}
/// Returns the given vector transformed by this matrix.
///
/// Assuming row vectors, this is equivalent to `v * self`
#[inline]
#[must_use]
pub fn transform_vector(&self, vec: Vector2D<T, Src>) -> Vector2D<T, Dst> {
vec2(vec.x * self.m11 + vec.y * self.m21,
vec.x * self.m12 + vec.y * self.m22)
}
/// Returns a rectangle that encompasses the result of transforming the given rectangle by this
/// transform.
#[inline]
#[must_use]
pub fn transform_rect(&self, rect: &Rect<T, Src>) -> Rect<T, Dst> {
let min = rect.min();
let max = rect.max();
Rect::from_points(&[
self.transform_point(min),
self.transform_point(max),
self.transform_point(point2(max.x, min.y)),
self.transform_point(point2(min.x, max.y)),
])
}
/// Computes and returns the determinant of this transform.
pub fn determinant(&self) -> T {
self.m11 * self.m22 - self.m12 * self.m21
}
/// Returns the inverse transform if possible.
#[must_use]
pub fn inverse(&self) -> Option<Transform2D<T, Dst, Src>> {
let det = self.determinant();
let _0: T = Zero::zero();
let _1: T = One::one();
if det == _0 {
return None;
}
let inv_det = _1 / det;
Some(Transform2D::row_major(
inv_det * self.m22,
inv_det * (_0 - self.m12),
inv_det * (_0 - self.m21),
inv_det * self.m11,
inv_det * (self.m21 * self.m32 - self.m22 * self.m31),
inv_det * (self.m31 * self.m12 - self.m11 * self.m32),
))
}
/// Returns the same transform with a different destination unit.
#[inline]
pub fn with_destination<NewDst>(&self) -> Transform2D<T, Src, NewDst> {
Transform2D::row_major(
self.m11, self.m12,
self.m21, self.m22,
self.m31, self.m32,
)
}
/// Returns the same transform with a different source unit.
#[inline]
pub fn with_source<NewSrc>(&self) -> Transform2D<T, NewSrc, Dst> {
Transform2D::row_major(
self.m11, self.m12,
self.m21, self.m22,
self.m31, self.m32,
)
}
}
impl<T, Src, Dst> Transform2D<T, Src, Dst>
where T: Copy + Clone +
Add<T, Output=T> +
Mul<T, Output=T> +
Div<T, Output=T> +
Sub<T, Output=T> +
Trig +
PartialOrd +
One + Zero {
/// Returns a rotation transform.
#[inline]
pub fn create_rotation(theta: Angle<T>) -> Self {
let _0 = Zero::zero();
let cos = theta.get().cos();
let sin = theta.get().sin();
Transform2D::row_major(
cos, _0 - sin,
sin, cos,
_0, _0
)
}
/// Applies a rotation after self's transformation and returns the resulting transform.
#[inline]
#[must_use]
pub fn post_rotate(&self, theta: Angle<T>) -> Self {
self.post_transform(&Transform2D::create_rotation(theta))
}
/// Applies a rotation before self's transformation and returns the resulting transform.
#[inline]
#[must_use]
pub fn pre_rotate(&self, theta: Angle<T>) -> Self {
self.pre_transform(&Transform2D::create_rotation(theta))
}
}
impl <T, Src, Dst> Transform2D<T, Src, Dst>
where T: Copy + Clone +
Add<T, Output=T> +
Sub<T, Output=T> +
Mul<T, Output=T> +
Div<T, Output=T> +
Neg<Output=T> +
PartialOrd +
Trig +
One + Zero {
/// Create a 3D transform from the current transform
pub fn to_3d(&self) -> Transform3D<T, Src, Dst> {
Transform3D::row_major_2d(self.m11, self.m12, self.m21, self.m22, self.m31, self.m32)
}
}
impl <T, Src, Dst> Default for Transform2D<T, Src, Dst>
where T: Copy + PartialEq + One + Zero
{
fn default() -> Self {
Self::identity()
}
}
impl<T: ApproxEq<T>, Src, Dst> Transform2D<T, Src, Dst> {
/// Returns true is this transform is approximately equal to the other one, using
/// T's default epsilon value.
pub fn approx_eq(&self, other: &Self) -> bool {
self.m11.approx_eq(&other.m11) && self.m12.approx_eq(&other.m12) &&
self.m21.approx_eq(&other.m21) && self.m22.approx_eq(&other.m22) &&
self.m31.approx_eq(&other.m31) && self.m32.approx_eq(&other.m32)
}
/// Returns true is this transform is approximately equal to the other one, using
/// a provided epsilon value.
pub fn approx_eq_eps(&self, other: &Self, eps: &T) -> bool {
self.m11.approx_eq_eps(&other.m11, eps) && self.m12.approx_eq_eps(&other.m12, eps) &&
self.m21.approx_eq_eps(&other.m21, eps) && self.m22.approx_eq_eps(&other.m22, eps) &&
self.m31.approx_eq_eps(&other.m31, eps) && self.m32.approx_eq_eps(&other.m32, eps)
}
}
impl<T: Copy + fmt::Debug, Src, Dst> fmt::Debug for Transform2D<T, Src, Dst>
where T: Copy + fmt::Debug +
PartialEq +
One + Zero {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.is_identity() {
write!(f, "[I]")
} else {
self.to_row_major_array().fmt(f)
}
}
}
#[cfg(feature = "mint")]
impl<T, Src, Dst> From<mint::RowMatrix3x2<T>> for Transform2D<T, Src, Dst> {
fn from(m: mint::RowMatrix3x2<T>) -> Self {
Transform2D {
m11: m.x.x, m12: m.x.y,
m21: m.y.x, m22: m.y.y,
m31: m.z.x, m32: m.z.y,
_unit: PhantomData,
}
}
}
#[cfg(feature = "mint")]
impl<T, Src, Dst> Into<mint::RowMatrix3x2<T>> for Transform2D<T, Src, Dst> {
fn into(self) -> mint::RowMatrix3x2<T> {
mint::RowMatrix3x2 {
x: mint::Vector2 { x: self.m11, y: self.m12 },
y: mint::Vector2 { x: self.m21, y: self.m22 },
z: mint::Vector2 { x: self.m31, y: self.m32 },
}
}
}
#[cfg(test)]
mod test {
use super::*;
use default;
use approxeq::ApproxEq;
#[cfg(feature = "mint")]
use mint;
use core::f32::consts::FRAC_PI_2;
type Mat = default::Transform2D<f32>;
fn rad(v: f32) -> Angle<f32> { Angle::radians(v) }
#[test]
pub fn test_translation() {
let t1 = Mat::create_translation(1.0, 2.0);
let t2 = Mat::identity().pre_translate(vec2(1.0, 2.0));
let t3 = Mat::identity().post_translate(vec2(1.0, 2.0));
assert_eq!(t1, t2);
assert_eq!(t1, t3);
assert_eq!(t1.transform_point(Point2D::new(1.0, 1.0)), Point2D::new(2.0, 3.0));
assert_eq!(t1.post_transform(&t1), Mat::create_translation(2.0, 4.0));
}
#[test]
pub fn test_rotation() {
let r1 = Mat::create_rotation(rad(FRAC_PI_2));
let r2 = Mat::identity().pre_rotate(rad(FRAC_PI_2));
let r3 = Mat::identity().post_rotate(rad(FRAC_PI_2));
assert_eq!(r1, r2);
assert_eq!(r1, r3);
assert!(r1.transform_point(Point2D::new(1.0, 2.0)).approx_eq(&Point2D::new(2.0, -1.0)));
assert!(r1.post_transform(&r1).approx_eq(&Mat::create_rotation(rad(FRAC_PI_2*2.0))));
}
#[test]
pub fn test_scale() {
let s1 = Mat::create_scale(2.0, 3.0);
let s2 = Mat::identity().pre_scale(2.0, 3.0);
let s3 = Mat::identity().post_scale(2.0, 3.0);
assert_eq!(s1, s2);
assert_eq!(s1, s3);
assert!(s1.transform_point(Point2D::new(2.0, 2.0)).approx_eq(&Point2D::new(4.0, 6.0)));
}
#[test]
fn test_column_major() {
assert_eq!(
Mat::row_major(
1.0, 2.0,
3.0, 4.0,
5.0, 6.0
),
Mat::column_major(
1.0, 3.0, 5.0,
2.0, 4.0, 6.0,
)
);
}
#[test]
pub fn test_inverse_simple() {
let m1 = Mat::identity();
let m2 = m1.inverse().unwrap();
assert!(m1.approx_eq(&m2));
}
#[test]
pub fn test_inverse_scale() {
let m1 = Mat::create_scale(1.5, 0.3);
let m2 = m1.inverse().unwrap();
assert!(m1.pre_transform(&m2).approx_eq(&Mat::identity()));
}
#[test]
pub fn test_inverse_translate() {
let m1 = Mat::create_translation(-132.0, 0.3);
let m2 = m1.inverse().unwrap();
assert!(m1.pre_transform(&m2).approx_eq(&Mat::identity()));
}
#[test]
fn test_inverse_none() {
assert!(Mat::create_scale(2.0, 0.0).inverse().is_none());
assert!(Mat::create_scale(2.0, 2.0).inverse().is_some());
}
#[test]
pub fn test_pre_post() {
let m1 = default::Transform2D::identity().post_scale(1.0, 2.0).post_translate(vec2(1.0, 2.0));
let m2 = default::Transform2D::identity().pre_translate(vec2(1.0, 2.0)).pre_scale(1.0, 2.0);
assert!(m1.approx_eq(&m2));
let r = Mat::create_rotation(rad(FRAC_PI_2));
let t = Mat::create_translation(2.0, 3.0);
let a = Point2D::new(1.0, 1.0);
assert!(r.post_transform(&t).transform_point(a).approx_eq(&Point2D::new(3.0, 2.0)));
assert!(t.post_transform(&r).transform_point(a).approx_eq(&Point2D::new(4.0, -3.0)));
assert!(t.post_transform(&r).transform_point(a).approx_eq(&r.transform_point(t.transform_point(a))));
assert!(r.pre_transform(&t).transform_point(a).approx_eq(&Point2D::new(4.0, -3.0)));
assert!(t.pre_transform(&r).transform_point(a).approx_eq(&Point2D::new(3.0, 2.0)));
assert!(t.pre_transform(&r).transform_point(a).approx_eq(&t.transform_point(r.transform_point(a))));
}
#[test]
fn test_size_of() {
use core::mem::size_of;
assert_eq!(size_of::<default::Transform2D<f32>>(), 6*size_of::<f32>());
assert_eq!(size_of::<default::Transform2D<f64>>(), 6*size_of::<f64>());
}
#[test]
pub fn test_is_identity() {
let m1 = default::Transform2D::identity();
assert!(m1.is_identity());
let m2 = m1.post_translate(vec2(0.1, 0.0));
assert!(!m2.is_identity());
}
#[test]
pub fn test_transform_vector() {
// Translation does not apply to vectors.
let m1 = Mat::create_translation(1.0, 1.0);
let v1 = vec2(10.0, -10.0);
assert_eq!(v1, m1.transform_vector(v1));
}
#[cfg(feature = "mint")]
#[test]
pub fn test_mint() {
let m1 = Mat::create_rotation(rad(FRAC_PI_2));
let mm: mint::RowMatrix3x2<_> = m1.into();
let m2 = Mat::from(mm);
assert_eq!(m1, m2);
}
}
| row_major |
_zauto.py | import _plotly_utils.basevalidators
class ZautoValidator(_plotly_utils.basevalidators.BooleanValidator):
| def __init__(self, plotly_name="zauto", parent_name="histogram2d", **kwargs):
super(ZautoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs
) |
|
bindata.go | // Code generated for package deploy by go-bindata DO NOT EDIT. (@generated)
// sources:
// cluster-predeploy.json
// databases-development.json
// env-development.json
// gateway-production-managed-identity.json
// gateway-production-parameters.json
// gateway-production-predeploy-parameters.json
// gateway-production-predeploy.json
// gateway-production.json
// rbac-development.json
// rp-development-predeploy.json
// rp-development.json
// rp-production-global-acr-replication.json
// rp-production-global-subscription.json
// rp-production-global.json
// rp-production-managed-identity.json
// rp-production-parameters.json
// rp-production-predeploy-parameters.json
// rp-production-predeploy.json
// rp-production-subscription.json
// rp-production.json
package deploy
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
if clErr != nil {
return nil, err
}
return buf.Bytes(), nil
}
type asset struct {
bytes []byte
info os.FileInfo
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
// Name return file name
func (fi bindataFileInfo) Name() string {
return fi.name
}
// Size return file size
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
// Mode return file mode
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
// Mode return file modify time
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
// IsDir return file whether a directory
func (fi bindataFileInfo) IsDir() bool {
return fi.mode&os.ModeDir != 0
}
// Sys return file is sys mode
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var _clusterPredeployJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x55\x4d\x8f\xda\x30\x10\xbd\xf3\x2b\xa2\xb4\x52\x40\xca\x17\x48\x95\x2a\x6e\x3d\x55\x3d\x74\xbb\xd2\xae\xf6\x82\x72\x18\x9c\x81\x75\xeb\xd8\x96\xed\xd0\xd2\x8a\xff\x5e\x25\x21\x81\x38\x09\xec\x22\x38\xb4\x5a\xed\x65\x63\xcf\x8c\xdf\xbc\x37\x6f\xf8\x33\x72\x1c\xc7\x71\xdf\x6b\xf2\x8c\x19\xb8\x73\xc7\x7d\x36\x46\xea\x79\x14\x55\x27\x61\x06\x1c\xd6\x98\x21\x37\x21\xfc\xce\x15\x86\x44\x64\xfb\x3b\x1d\xcd\xe2\xe9\x87\x20\x9e\x06\xf1\x34\x4a\x51\x32\xb1\x2d\xe2\x1e\x31\x93\x0c\x0c\x86\xdf\xb5\xe0\xef\x5c\xbf\x7a\x81\x08\x6e\x90\x9b\x27\x54\x9a\x0a\x5e\x3c\x34\x0d\xe3\xe2\xaf\x0e\x90\xa0\x20\x43\x83\x4a\xbb\x73\xa7\x82\x55\x25\xd2\xd6\x77\x79\x66\xb6\x12\x8b\x12\x4b\x21\xd8\x3e\xbf\xb9\x4b\x71\x05\x39\x33\x4f\xc0\xf2\x22\x66\x05\x4c\x63\x13\xb1\xf3\x8f\x0a\xb3\x5c\x1b\x54\x77\x90\xe1\xf0\x0b\xda\x28\xca\xd7\xee\xa9\x02\x0f\xa8\x36\x94\xe0\xbd\xa2\x9c\x50\x09\xec\x4b\x7a\x59\xb9\x95\xbc\x56\xa5\x0c\x0a\x5c\x9f\xd2\x54\xa1\xd6\xf7\x0a\x57\xf4\xd7\x65\x85\x94\xc8\x0d\xea\xe1\x5c\x50\x0a\xb6\x67\x04\x58\x24\xbd\xa5\x37\x1c\xcd\x15\x10\xfe\x14\xea\xc7\xe5\xad\x8e\x8e\xca\xb9\x0a\xb5\xc8\x15\x29\xfb\x5d\x34\x31\x56\x29\xa9\x84\x44\x65\x68\x0f\x2b\xe5\x3d\x54\x48\x1e\x24\x90\xee\x54\xd9\x51\x15\x5e\xeb\xc1\x4e\xf0\xe2\xe0\x8c\xb1\xd7\xa1\xcd\x9b\x24\x6e\x6f\x6e\xd2\x39\xdd\xb5\x4e\x76\x96\x6c\xbc\xf2\x81\x9b\xe2\x26\x28\x5e\xb1\x65\xad\x49\xfc\x4a\x89\x12\x5a\xac\x4c\x78\x87\xa6\xa0\x3f\xda\x50\x65\x72\x60\xfb\x4f\x6d\x27\x32\x41\xc0\xec\x3d\xbf\xa8\x49\xfe\xac\x44\x2e\xc7\x93\xb0\xbe\x4c\xec\x2c\x22\x78\x4a\x9b\xb4\x63\x0a\x08\x2d\x7a\xb6\xc2\x41\xd2\xa3\xd5\x32\x8b\x67\x71\x10\x7f\x0c\xe2\x69\xef\xd4\xbc\x52\xd3\xc6\x05\x6d\x1c\xd5\x71\x87\xff\x21\x5e\x17\x44\x70\x02\x66\xdc\x6a\xe5\xb0\x82\xbc\x89\xef\x78\x81\x32\x3d\xbd\x0d\x33\x5f\x42\x78\x84\x25\xc3\x2b\xb1\x7e\x4b\x1a\xc1\x32\x69\x9b\xcd\x9e\x9d\xd5\xa5\xe2\x20\x47\xd9\xf4\xb0\xc1\x68\xda\xea\x9b\xa6\x63\xef\x24\x7b\x9e\xef\xbc\x54\x9e\x5e\x50\x95\x4e\xb0\x2e\x5a\xe7\x39\x63\x17\x7a\xaf\x9e\x11\xaf\x36\x61\xe4\xf9\xce\x49\x44\x15\x6d\xaf\x1a\x1a\xcb\xae\x91\xce\x97\x1c\x4d\x67\x80\x06\x47\xa1\xb3\xee\x25\xf2\x54\x7f\xe3\xbd\x5b\xec\x9c\x06\x16\x18\xcf\x77\x9a\xde\x07\xe4\xbf\x95\xa8\xad\x97\x92\xcb\xdc\x74\x73\x97\xf4\xfc\xdc\xbd\xb9\xe4\xbc\x4b\x2a\xda\xfe\x1b\x97\xd4\x60\x5a\x6e\x79\x99\x36\xf5\xbe\xf8\x97\xbd\x55\xfe\x97\x8c\x76\xa3\xbf\x01\x00\x00\xff\xff\x9c\xba\xe7\x92\xbd\x0c\x00\x00")
func clusterPredeployJsonBytes() ([]byte, error) {
return bindataRead(
_clusterPredeployJson,
"cluster-predeploy.json",
)
}
func clusterPredeployJson() (*asset, error) {
bytes, err := clusterPredeployJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "cluster-predeploy.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _databasesDevelopmentJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x58\x5d\x4f\xdb\x30\x14\x7d\xef\xaf\x88\xbc\x49\x69\xa5\x36\x49\xd1\x98\xa6\xbe\xc1\x90\x18\x42\x0c\x34\xd0\x5e\xaa\x3e\x18\xe7\x42\x3c\x12\xdb\xd8\x37\x9a\xba\x89\xff\x3e\xb9\x49\xda\xa6\x49\x03\x48\x7c\xac\x51\xcc\x13\xf6\xf1\xb9\xd7\xf7\x9e\x63\xb7\xfd\xdb\x73\x1c\xc7\x21\x1f\x0d\x8b\x20\xa1\x64\xe2\x90\x08\x51\x99\x89\xef\x67\x33\x5e\x42\x05\xbd\x85\x04\x04\x7a\xf4\x4f\xaa\xc1\x63\x32\xc9\xd7\x8c\xbf\x17\x8c\xf7\x47\xc1\x78\x14\x8c\xfd\x10\x54\x2c\xe7\x16\x77\x05\x89\x8a\x29\x82\xf7\xcb\x48\xf1\x81\x0c\xb3\x08\x4c\x0a\x04\x81\x3f\x41\x1b\x2e\x85\x0d\x34\xf6\x02\xfb\x57\x00\x14\xd5\x34\x01\x04\x6d\xc8\xc4\xc9\xd2\x5a\xcc\x87\x14\xe9\x35\x35\x70\xc0\x98\x4c\x05\x7e\xa7\x09\x94\x00\x0b\x10\xce\x95\x9d\x25\x06\x35\x17\xb7\x64\xb9\xf8\x30\xac\x12\x3d\x93\xa1\xb7\xc6\x43\x34\x18\x99\x6a\x06\x36\xc7\xe9\x12\xb3\x41\xa5\xb4\x54\xa0\x91\x83\xa9\x84\x29\x91\xd4\xae\x2e\x10\x3c\xb4\xa9\x4c\x57\x25\xe9\xbb\xeb\xd9\xbb\x83\x19\xa9\x6c\x5c\x3b\xea\x92\x47\x2a\xe4\x52\xd4\xa7\x91\x9d\x3a\xd2\x32\xbd\x8d\x54\x8a\x64\xe2\xec\x07\x41\x95\xb5\xd7\x10\x83\x88\xac\x94\x64\xca\xa4\x60\x14\xfb\x75\x09\xaf\xf5\xcd\x1d\x0c\x1d\xd7\x77\x87\xce\xf6\x83\x0d\x66\x64\x58\xdf\x98\x33\xce\xb4\x34\xf2\x06\xbd\x23\xc9\x52\x2b\xb4\xa3\x43\x7f\x23\x88\xf1\xcd\x7d\x7c\x94\xcf\x99\x4d\xa6\x58\x32\x8a\xb9\xf8\xa6\x45\x13\x8e\xb5\x4c\x55\x7f\xe0\x15\x8b\x95\xf8\x54\xf1\x35\xd1\xee\x05\x7b\x56\xee\xa3\xf1\x7e\xad\xc8\x5e\x49\x09\x07\x66\x2e\xd8\xb9\x02\x4d\xb3\x7e\x56\x5b\xed\xe4\x16\x42\x6e\x11\xa7\x30\xdf\x4a\x99\x23\x31\x2a\x6b\xb8\x16\xe6\xf3\xb0\x2a\xb4\x62\xcc\xea\xb3\x58\x6c\xbc\xe3\x62\x91\xf8\x37\x6a\xa2\x7a\x86\x1a\xb9\x2e\x76\x86\x70\x43\xd3\x18\xaf\x30\x26\x13\xe7\x73\xf0\xe9\x4b\x9d\x28\x9b\xa5\xfe\xd6\x9a\xb5\x80\x8d\x0e\xb9\x2f\xa9\x63\xdf\xde\x9d\x94\x0b\x7b\x35\xbe\xae\xa4\x37\x70\x21\x28\x10\xa1\x39\x17\xb5\x4a\x59\x05\x3c\x09\xfb\xee\xf3\x8f\xb5\xa5\xa6\x1b\xb5\x6f\xbc\x2a\x4a\x39\xcd\xde\xd0\x90\x87\x3c\x8e\xed\x33\xd1\x06\x23\xee\x82\xbd\xf2\x7a\x77\xb6\x6a\xb7\xad\x8e\x29\xc2\x6f\x3a\x6f\x85\xad\x9e\xf2\xbe\x8d\xc6\xbb\x60\xbe\xbc\x2b\x9d\xf9\xda\x6d\xbe\x33\x29\x38\xca\x4a\x3f\x56\x71\x3a\xf7\xbd\x87\xfb\x8a\xb6\x74\xf6\x6b\xb7\xfd\xce\x15\x88\xcb\x88\xdf\xe0\xd7\x38\x35\x58\x6d\xcc\x2a\xe0\x8b\xfb\xb0\xc4\xf8\xc6\x8e\x4c\x05\xbf\x4f\xe1\x14\xe6\x17\x32\xe6\xec\x91\x03\x2d\xc1\x8f\x9f\x6a\x3b\xcb\x92\xed\x69\xe5\x59\xc2\xfd\xbb\xa6\xea\x14\x63\xd6\x88\xd8\x52\x85\x62\xbc\x42\xd2\x2c\x53\xd3\x8f\x75\x23\x9f\x84\x8d\x8d\x2e\xc6\xff\x77\x14\x0e\x02\x5f\x26\xf9\xed\x2a\xdf\xdd\x2f\x49\x95\x1b\xa4\x7b\x32\xda\xfd\x64\x5c\x48\x8d\x34\xee\x3e\xaf\x95\xf7\xbc\xb3\x0b\xb3\xa6\x74\xd6\x6b\xb7\xf5\x2e\xd3\x6b\xc3\x34\x57\xed\xf9\x3d\x7e\x17\xbc\x55\xaa\x7a\x67\xb1\x77\xb5\x58\x2f\xfb\xff\xa1\xf7\x2f\x00\x00\xff\xff\xc7\xe3\x9d\xb7\x43\x1e\x00\x00")
func databasesDevelopmentJsonBytes() ([]byte, error) {
return bindataRead(
_databasesDevelopmentJson,
"databases-development.json",
)
}
func databasesDevelopmentJson() (*asset, error) {
bytes, err := databasesDevelopmentJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "databases-development.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _envDevelopmentJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7c\x6b\x73\xa3\x48\xb2\xf6\xf7\xf9\x15\x0e\xbf\x1b\xa1\x76\xbc\x6d\x1b\x90\xd5\xd3\x6c\xc4\x7e\x40\xd8\x42\x48\x08\x4b\x20\x01\x62\x76\x62\xa2\x28\xca\xa8\xa4\xe2\x72\xa0\x40\x46\x1b\xf3\xdf\x4f\x80\xee\x77\xf9\xd2\xe7\xec\xd9\x69\x75\x84\xdb\x86\xcc\xac\xac\xac\xbc\x55\xd5\x63\xff\xeb\x97\xab\xab\xab\xab\xeb\xbf\x25\x70\x84\x7c\x70\xfd\xf7\xab\xeb\x11\xa5\x51\xf2\xf7\xfb\xfb\xf9\x93\x3b\x1f\x04\xc0\x43\x3e\x0a\xe8\x1d\x98\xa5\x31\xba\x83\xa1\xbf\x78\x97\xdc\x73\x0c\x5b\xbb\x65\xd8\x5b\x86\xbd\x77\x51\x44\xc2\xbc\xa0\xeb\x23\x3f\x22\x80\xa2\xbb\x71\x12\x06\xff\xef\xfa\xeb\x7c\x04\x18\x06\x14\x05\xd4\x40\x71\x82\xc3\xa0\x18\x88\xbd\x63\x8a\x7f\x4b\x82\x08\xc4\xc0\x47\x14\xc5\xc9\xf5\xdf\xaf\xe6\x6a\xcd\x19\xb1\x30\x8b\xfa\xe1\x04\x05\x5b\xcf\xcb\x77\x34\x8f\x50\x21\x2a\xa1\x31\x0e\xbc\x85\xa4\xd5\x5b\x17\xbd\x80\x94\x50\x03\x90\xb4\xa4\xba\x5e\xbd\xfe\xf3\xeb\xa6\x7c\x11\x44\x00\x62\x9a\x1f\x97\x8f\x03\x7a\x46\x38\x73\x44\x76\x37\x0c\x89\x0a\x7c\xf4\x63\x74\x8f\xe2\xf0\x35\x17\x51\x4c\xcf\x8a\x3f\xc1\x4e\x30\x0a\xe8\x07\x85\x3c\x86\x3e\xc0\x41\x31\x51\x05\x38\x88\x7c\x40\x92\xec\x03\xef\xbc\xb5\xce\xf0\x0b\x29\x1d\x9d\x90\x81\x60\x1a\xa3\xb3\x92\xda\xe8\x84\x47\x5c\x20\x23\x75\x08\x86\x72\x57\x70\xdd\x18\x25\x89\x40\x48\x08\x01\xc5\x61\xd0\x41\x74\x14\xba\x1f\x75\x08\x9d\x02\x8a\xe1\x45\x43\xeb\x93\xf4\x33\x5c\x50\xa7\x20\x70\x41\xec\x1e\x1e\x33\x49\x46\xdd\x72\xd8\xd3\x66\x3b\x61\xb0\x2c\x0a\x44\xa1\xf0\x43\xfc\x82\x21\xa0\x9f\x19\x32\xbf\x6c\x8c\x75\x1d\xa3\x24\x4c\x63\x88\x8a\x3c\xf3\xdb\x8a\x66\x67\xa8\x64\x92\xee\x8d\x5f\xbe\x08\xe6\x96\xbc\xfe\x6d\x9d\xaf\xbe\x54\x0e\x1b\xbc\x72\xf3\xfb\xf5\x96\x80\x3f\x77\x34\x8e\xe2\x30\x2a\xe6\x8b\x92\xc3\x63\xad\xc4\xee\xfb\xce\xc9\xf1\x77\xe9\xcf\x2a\xb2\x9c\x94\x8b\xb2\xdb\x2c\x0a\x6e\x23\x1c\xed\x5a\x77\x69\xfb\x0e\x86\x71\x98\x84\x2f\xf4\x4e\x45\x74\x1a\xc6\x93\xfb\x9d\xc1\x51\xb2\xcb\xba\x54\xa6\x54\x7b\x69\x7d\x29\x0e\xd3\xe8\xcb\xcd\xdd\xf2\xe5\xef\xbb\x5c\x30\x0c\x5c\xbc\x62\x43\xff\x95\x02\x92\x7c\xd9\x9c\xf4\x3a\x71\x57\x6e\xbe\x5e\x31\x37\x7b\x12\x40\x84\x37\xca\x0d\xc7\x70\xcc\x2d\xf3\xfd\x96\x61\x0f\xba\xdf\xbf\xde\xb6\x34\x60\xb1\xd2\x11\x80\xfb\x8e\xba\x4b\xd5\x8d\xd1\x0b\x7e\xdd\x71\xb8\x3d\x62\x96\x99\x17\xc4\x7b\xfe\xfa\x20\xd1\xef\x7b\x4f\x77\x96\xb1\x94\x92\xa4\x4e\x80\xe8\xf1\xa1\x0e\x6b\x7a\x75\xc1\x9c\x8f\xcf\xac\xac\xe6\x4b\xed\xb9\x87\xc3\xea\x1f\x51\x78\x25\x71\xe9\x83\x12\xa0\x68\x0a\x72\xbd\x9c\xc7\x61\x51\x47\xc4\xfc\xd0\xa9\xb1\xf3\xa9\x1d\x9f\xc0\x7c\x12\xf3\x98\xd0\x8b\xea\x80\x69\x5e\x3a\xf9\xd9\x11\x4b\x4e\xec\x6e\x85\x87\xec\x7e\xa9\xec\x87\xda\x21\xf1\x49\xe5\xeb\x55\x25\x8e\x6e\x83\xc4\xab\xec\xc5\xc0\xc1\xa1\x28\xf0\x0a\x33\x04\x29\x21\x27\x89\xff\xfc\xd0\x3a\xf6\xc3\x90\xe0\xc0\x3b\xb9\x8e\x7b\x4f\x7f\xbf\x3c\x4d\x15\x52\x2f\xce\x51\x19\x8e\x69\x0a\xc8\xe2\xc7\x4f\xca\x50\x3f\x32\xbf\xe0\x48\x0c\x83\x17\xec\xa5\x71\x39\xf6\x0f\x8f\xe7\x79\xda\xf8\x44\x57\xdd\xb1\xf8\xfd\x22\x2f\x15\xce\xba\x5c\xbe\xe2\xfb\xad\x68\xdf\xab\x53\xbb\x9f\x13\x7e\x77\x75\xa0\xf1\xf9\xc4\xe9\xec\x15\xb9\xd5\x44\xe6\xe5\xf2\xbc\xea\x1f\x0a\xa6\x45\x6f\x73\x71\x18\x1d\x28\x0c\x59\x14\xf4\x17\xf1\xa1\x85\x29\x45\x75\x90\x20\xf7\x40\xba\x38\xda\xfa\x6c\x29\x64\x44\x81\x34\x65\x8f\x64\x9b\x6b\x8a\x51\xbc\x41\x75\x51\xe1\x2a\xda\xbf\xf9\x56\x64\xd3\xf1\x8f\x6b\xb2\xa2\x5f\x2c\x49\xb1\xd3\x3a\xb9\xe0\x6f\xaa\xc6\x25\x03\xcb\x73\x77\xec\xb7\xef\x77\x5c\xad\x76\xa6\xb0\xed\xd7\xe6\x23\xd3\xdc\x56\x5d\x0b\x43\xba\xd1\xf1\x9e\x56\xe9\x4c\x04\xbf\x21\xda\xaf\xd6\xb1\x52\x8c\xfe\x08\x28\xd8\x6b\x29\x77\xbb\xf1\x0f\xc7\xe6\x6e\x8f\x09\xc1\x89\x3e\xe1\x70\x03\x74\xce\x9c\xdd\x38\xa4\x21\x0c\xc9\x99\x46\xeb\x39\x42\x81\xd1\x55\x2f\xee\xb2\xde\xd2\x3a\xbf\xb7\x24\x2d\xf2\xe0\xbf\x7f\xef\xbc\xb7\xe9\x8a\x50\xe0\x26\xcf\xc1\x41\x93\x7f\x46\x56\x3d\x90\x2a\xde\x56\x7a\xb6\x4a\xce\xae\x17\xff\x7e\x41\xad\x3e\xbb\x17\x5c\xee\x8c\xff\x78\xe4\x92\x3f\xb2\xea\x21\x95\x97\x19\x71\xb5\x89\x3e\x40\x03\xd7\xe7\x50\xdb\xb1\xb8\xb9\x5a\x1f\xdc\x52\xa6\x91\x17\x03\x17\x75\x43\x82\xe1\xfe\x3e\x7d\x45\xe6\x87\xee\xdc\x6d\x41\x90\x02\x72\x61\x02\x9f\x9b\xbd\x03\xe0\x08\x07\xa8\x1b\x87\x2f\x98\x9c\xd8\x1b\x85\xc9\x39\x92\xb9\x51\x42\x3f\x4a\x29\x8a\x8b\x0d\xf5\xba\x25\x87\xf8\xf6\x44\xa7\x7b\x0d\x5c\x1f\x07\x83\x04\xc5\xcb\x15\x82\x24\x4c\xdd\xdb\x34\x41\xf1\x29\x36\x82\x83\xf4\xf5\xb2\xf2\xb3\xe2\x71\x71\x02\x1c\x82\xba\x20\x49\xa6\x61\xec\x0a\x29\x1d\xa1\x80\xe2\x55\xd4\xd2\x38\x45\x67\x32\x63\x92\xec\x1f\x56\x1d\x24\x8c\x96\x07\x2c\xe7\x6b\xd7\xf2\x73\x5e\xea\x5a\x3a\x28\x0f\xcd\xae\xef\x47\xa1\x8f\xee\xd7\x16\xbb\xbf\x4b\x92\xd1\x3d\x48\xe9\x28\x8c\xf1\x0c\xb9\x7f\x4c\xd0\x5e\x9e\x3a\x29\x78\x82\xf2\x83\x25\x66\xf3\xc8\xe8\x6c\x79\x59\x7e\x8e\xf7\x51\xcb\xcf\xe1\x5a\x7c\x9e\xff\xf0\x9b\x63\x35\x3c\xa1\x61\x0c\xbc\xb3\x6e\x5e\xd2\x62\x1f\x78\x48\x43\x2f\x28\x46\xc1\x89\xe3\x82\x15\x7d\xb9\xce\xc9\x68\x9e\x31\x34\xe4\x36\xc1\xee\x56\x67\x8f\x25\x7c\x79\x59\x90\x37\x9f\x94\x73\xc4\xf3\x84\x76\xfd\xfd\x56\x31\x3a\xe7\x68\xb3\x75\x15\x20\x45\x8b\x72\xa4\x07\xbd\x3a\xd3\xc8\x86\xc9\x23\x4e\x26\xe7\xa7\x0e\x63\x04\x28\x7a\x8e\x96\xc5\xab\x11\x87\xfe\xfc\x48\xf8\x8c\x9e\xf3\x7b\x0a\xf7\xa2\x51\xae\x36\x16\x50\x80\x30\x4c\x03\xba\x6c\x8d\xbb\x31\xf2\x71\xea\xff\xa1\x68\xfa\xbb\xfb\xf9\x37\xf9\xd1\x62\x4b\x7f\x91\x1f\x2d\x68\xe5\x80\xa2\xf8\x05\x40\x74\xe1\x16\x71\xf9\xb9\xc0\x28\xab\x7c\x89\x6f\x33\x3f\x49\x6e\x03\x0c\x2f\x39\x51\x78\x63\xef\xb9\xe0\xc1\x3e\x88\xf3\x8b\x52\xe4\x8a\xe9\xe2\x6d\xf1\xa1\xcf\xe5\xb9\xf0\xea\x90\x2d\x70\x04\xcb\xb1\xdf\x90\xfa\xae\xde\x69\x9c\x2d\xfe\x37\x6c\xcd\x0f\xf2\x7f\xe6\x76\x7d\xeb\x50\xe7\xe2\x9c\xbd\xf9\x39\xb3\x4d\x38\x38\x83\xf7\xb8\xca\xb6\x80\xed\x3e\xf3\x6d\x25\xfe\xa8\xd4\x5d\x07\xd9\x3f\x35\xbf\x5c\xc1\x8f\xf9\xc8\x4a\x8e\x1b\x24\x3a\xa2\x14\x07\xde\xc7\x04\xcd\x85\xed\x5d\xea\x5d\x83\x38\xbc\x85\xf8\xed\xab\xbe\xfc\x9c\xaf\xde\x9f\xc3\xf5\x36\x8e\xcb\xa9\x2f\xa3\x3c\xdd\x81\x9c\x97\x73\xfc\xed\xdb\xce\x19\xd0\x2b\x45\x41\x51\xbd\x2f\xaa\x2e\x2b\xea\x1f\x52\x49\x60\x72\xae\x84\x5f\xbd\xbb\x92\x6c\xb6\x4b\xeb\xd4\x26\x94\x70\x85\xa7\xf5\xac\x2e\xac\x31\xcb\x2d\xbb\x98\x26\x34\xf4\x75\x18\xe3\xe8\x5c\xfb\xb5\xc5\xdb\x04\x81\x4b\x50\xbc\xb9\x7d\x5e\xa1\x1c\xce\x0a\x00\x29\x0d\x07\xf3\x3d\x5a\x07\x07\xe1\x86\x94\x37\x54\xc9\x64\x23\x05\x5c\x98\x6f\x0b\xc3\x53\x04\x29\x72\xdf\x95\x3f\xae\x93\xb9\x99\x8a\x42\xe3\x80\x04\x7d\x7b\xf8\x02\xc3\x00\x02\xfa\x65\xfe\x53\x3f\xd4\xcb\x3b\xdb\x2f\x15\xc8\x19\x8c\x2c\xb2\x44\xf4\xc2\x7f\x54\x6e\xbe\x56\x44\x59\xb0\xbb\xfd\xe7\xf6\x93\xfa\x8f\x4a\xa5\xf2\x75\x7b\xdb\xbb\x44\x7e\x14\x84\x95\xca\x3f\x83\x4a\x41\xdf\x7d\x7e\x56\x54\xa1\xf3\x74\x80\x7e\x89\xb6\xd8\xa0\x2f\xbe\xec\xea\x20\x06\x84\x75\x74\x81\x22\xbd\xce\x42\x49\x1b\xb9\xd2\xc0\x53\x2c\xcf\x33\x98\x46\x07\x98\x35\x16\x3d\x35\x02\xdb\xac\x31\xa2\x17\x25\xae\x6f\x3c\xb8\x92\x91\xda\xa2\x40\x1d\x51\x88\xd5\xbe\x40\x34\xd2\x6a\x68\xba\x90\xd9\x92\xc1\x29\xd5\x56\xe6\x54\x35\xce\xce\x79\x6e\x68\xb5\x12\xd7\x8b\x1e\xec\x40\x7d\xb1\xab\xad\xcc\xe5\xec\x99\x2c\x16\xcf\xe5\xb6\xe8\xbf\x72\xb6\x35\x62\x6c\xb3\x36\x91\x45\x36\x91\xc5\xe4\xb5\xf3\x78\x54\x56\xe8\x70\x2c\x71\x9a\xc3\x36\x92\xec\x99\xc5\xb9\xb9\x53\x75\x7d\x98\x0b\x19\x90\x78\x6a\xf7\xc2\x36\x0c\xea\x54\x16\x19\x0a\x4c\x76\xea\x54\x5b\x8c\x2c\x8d\x18\xb7\x59\x9f\x3d\xe3\xef\x99\x2d\x4d\x53\xdb\x37\x26\x4e\xb5\x35\x82\xcd\x56\x06\x7c\x63\xec\x8a\xb5\x0c\xfa\x30\x83\x4d\x03\x2b\x9c\x31\xb5\xcd\x69\x36\x20\x75\x55\x19\xb8\x3d\x2d\x67\x15\xcd\x98\x50\xcd\xa8\x37\xfa\x22\xf3\x20\x06\xad\x69\x61\x1f\xc5\x24\x14\x4a\x7c\xee\x8a\xf5\xd0\x6d\x6a\x53\x38\x0b\x33\xa5\x5a\x1f\x0d\x39\x3a\xb2\x39\x63\xa6\xf8\x6c\x34\xac\xb6\x32\xc8\xf1\xbe\x2b\xd6\xc6\x0e\xc7\x64\x80\x33\x6a\x30\xe7\x29\x30\xd5\xdc\xa9\xaa\x99\x1d\xf4\xd2\xa1\xa5\x8e\x45\x2f\xaa\xb9\x26\xe3\x29\xd6\xc4\x03\x66\x6d\xe6\x4a\x8d\xc4\xd9\x94\xcb\x69\x89\xe2\xdb\xc4\x96\xf8\x7c\x68\xd5\x73\x87\x8b\xc8\xb0\xda\x4b\x9d\x6a\x2b\x50\xaa\x75\x76\x88\x79\x02\x25\x23\x59\xe8\x4e\xa1\x6f\x24\xb6\xd9\x98\xd9\x3a\x9b\x0c\x2d\x8d\xc0\x6a\x8f\x3e\x8b\xb5\xd4\xe1\x1a\xf9\x90\xf3\xd2\xc2\x3e\xa2\x17\x8d\x87\x56\xcf\xeb\x62\x9e\xb8\x52\x27\x43\x96\x41\x95\xa0\x45\xa0\xc4\xcf\x14\xbf\x97\x0d\xad\x88\x85\xfe\x20\x85\xbe\x31\x75\x72\xe1\x7b\x57\x74\x1b\x7d\x66\x18\x88\xa4\xd8\xc4\x1b\xb9\xad\xb3\x63\x47\x22\xae\xe8\xd7\x46\x8e\x39\xe0\x17\xf4\x74\xc8\xbd\x46\xa2\xdf\x1a\x41\xce\x60\xa1\x3f\xe5\x41\x53\x63\x60\xb3\xf3\x4d\xc9\xf9\xe9\xd0\x54\xe3\xa1\xe9\x12\x98\xd7\xb6\x6d\xc0\xf1\x54\xa9\x12\xd6\xb1\x16\xe3\x73\x8d\x6f\xae\xd5\x22\x8a\xa9\x26\xa0\x17\x11\xc7\x6f\x60\x47\x32\x26\x5d\x8b\x10\x38\x8d\x02\x28\xb9\x63\x20\x19\x63\x30\x63\x6b\xb6\xd5\x69\x6b\x03\x5e\x3a\x62\xc3\x0d\x5d\x27\x1e\x94\xf8\x89\x63\x36\x52\xb9\x59\xcf\x6c\x89\x1d\x39\x98\x9d\x38\x9c\x1a\xdb\x96\xec\x81\xe0\xc9\xb3\x39\x75\x2c\x4b\xee\xd4\xe6\x58\xa2\x98\x1a\x71\x7d\x23\x91\xa5\xd7\x68\xe8\x37\x66\xb0\x6a\x94\xf4\xb6\x65\x13\x47\xac\x07\xc0\xea\x79\x8e\xd9\x88\x6d\xbd\xce\x38\x56\xbd\x3a\xb4\xb4\x31\x10\xeb\x81\x93\xb3\x8c\xc3\xf1\x49\x11\xcb\x4a\xff\x29\xed\xf4\x7b\x69\xa7\x2f\xd3\x8e\x5e\xa3\x0e\xa7\xb1\x8e\x34\x88\x6d\x73\xfa\xa0\x8c\x3b\x69\x47\x4c\x1e\xd4\x99\xc7\xb4\x67\x9d\x87\xce\x63\xa7\xa6\x4a\xda\x58\xf4\xc2\xb1\xdc\x50\x59\xd8\xac\xe7\xb6\xa5\xce\x64\xc9\xa0\xae\xf9\x3a\x72\x25\x92\x39\xb8\x9e\xb9\x96\x36\x75\xad\x9e\x67\xfb\x7c\xbe\x9e\x47\x3d\x72\x02\x95\xb1\xcd\xc6\x44\x96\x78\x5f\x96\xb4\x6c\xc8\x51\x02\x71\xdd\x77\xaa\xb2\x37\xb4\x42\x6f\x68\xaa\xb9\x2c\xa9\x99\x63\xd5\x47\xae\xd4\xc0\xc0\x7c\x8d\xdc\xe6\xa4\xed\x98\x74\x02\x2c\xd9\x53\x2c\xc1\x53\x38\x83\x19\xe6\xfc\xd8\xe1\x6a\xcc\xd0\x24\xa9\x6d\xb5\x66\xca\x34\x62\x9c\xaa\x31\x06\xa2\x90\xd9\x96\x36\x56\x38\x35\x73\x02\x6d\x04\xcc\x1a\x81\x41\x27\x73\x7c\x7e\x69\xc3\xb6\x48\xec\xbe\xd1\x50\x5f\x7a\x03\xb7\xd1\x27\xda\x8b\x31\x31\xf4\x01\x43\xba\xfd\x31\x93\x2b\xe3\xa7\x87\x67\xf1\x61\x26\xfa\x6c\x6c\x4b\x24\x97\xc5\x79\x1c\x2b\x9c\x9a\x38\x55\x63\xa2\x58\xc6\xcc\xb6\xe4\xac\xf0\x0f\x27\xe8\xb5\x61\xd3\x98\x01\xa9\xe7\x29\xdc\x28\x73\xcc\x41\x36\xe4\x5e\x33\xd7\xec\x51\xd7\x52\x09\xc4\x7c\x11\x63\xa9\x2b\x44\x63\xd7\x6a\x25\x9b\xf1\xed\x06\x2a\x03\xb9\x79\x8e\x82\x52\x63\x0c\x8a\xef\xf5\xda\xdc\x0f\x4c\x63\x62\x73\x83\xd4\xf1\x0d\x46\x59\xd0\x28\xb9\xf6\xab\x41\xd4\xc1\x80\xe5\xeb\x1a\x63\x3c\x1b\x0d\xde\xd4\x8c\x56\x5f\x1f\xf0\xcf\x2f\x3a\xcf\xc1\xaa\x36\x53\xcc\x05\xad\xf9\x1a\x39\x81\xf1\xa0\x58\x1e\xa7\x8a\xcc\x04\xb1\x47\xe6\x1b\x30\xa9\x2b\x35\x72\xc5\x77\xbf\xc9\xcd\xa9\x57\x7c\x2f\x8b\xec\x03\xf2\xa2\x31\x90\xf8\xaa\x83\x05\x3a\xc0\xf5\xb1\x23\xf1\xac\x2d\xb2\x2c\xe4\x8c\xfc\xd9\xdf\xb2\x83\xa7\x78\x61\x5b\xc1\x3c\x06\xe6\x43\xb6\xf2\x67\x49\x2b\xe2\x3c\xb5\x25\x23\x1d\x72\xa4\x88\xa3\x19\x10\xa2\x99\x6b\x6a\x99\x2c\xb2\xac\x2c\xed\xc8\xc0\xe5\x5a\xfa\xc0\x84\x29\xe4\x3c\x4f\xd1\x59\xd6\xf1\x1b\x8c\x3b\x97\x31\x29\x72\xe4\xae\xfd\xca\x5c\xeb\x2f\xe2\xcf\x57\x33\x47\xe7\x29\x5c\xc4\x63\x41\x3f\xb4\x0c\x06\x88\xf5\x69\x91\x3b\x14\xbd\xf0\x75\x4a\x1c\x2c\xe0\xd6\x93\xda\xea\x19\x51\xcf\x78\xe2\x15\x6d\xf0\x80\x0b\x5a\x28\xf1\x99\x23\xce\xdf\x0d\x9e\xf8\x6e\xff\xa9\x56\xef\x0f\x06\xe5\xbb\xc5\x3a\x7b\xf2\xa4\xa1\xf7\x73\x56\xd7\x9f\x8c\x8e\xa2\x6b\x72\x9f\x55\x07\xfd\x49\x43\xd5\x74\xb9\x90\x9f\xdb\x56\x3d\x19\x9a\x2a\x11\xf1\x43\x06\xab\xf6\x58\x09\xd4\x50\x96\x48\x0a\xab\xda\xc8\x91\xa6\xde\x96\x5f\x78\xd1\xd4\xa9\xd6\x27\x1b\xb9\xed\x94\x8f\x65\x4a\x50\xc4\x82\xe7\x75\x1f\xa7\x41\x91\x43\x5a\xd3\x30\x73\x2d\x35\x57\xb8\xd7\x6c\xc8\x35\x12\x85\x6b\x45\xce\x78\xf9\x6c\xf3\xfb\xc5\xfb\xaa\x5a\xac\xcf\x37\xa5\x6a\xcc\x20\xe6\x67\x43\x9f\xa4\xcf\x47\xc6\x54\xfc\x6d\x99\x65\xad\xd2\xf9\x2d\x1f\x28\xdf\x79\x51\x91\x67\xdb\xa2\xaf\x8e\x5c\x51\xf8\xff\x27\xfd\x5f\xfc\x9e\xda\x66\x8d\x93\x1f\xa7\xdf\x5b\x8c\xd1\xd5\x30\x6c\x6b\x0c\xdf\xe9\x0d\x6a\x4d\x8b\xb1\x5b\x83\x46\x87\xef\xf4\x76\xe5\xcd\x63\x1c\xfa\x7c\xaa\xf8\xa3\xac\x58\x7f\xa4\xf3\x1b\x39\x6c\x23\xd7\xe3\xce\xa8\xd4\x09\xf3\x78\x68\xa9\xa1\xe8\x45\x04\x49\xc6\x58\x16\xe7\x73\x55\xaa\x1a\x85\x4d\x77\xe4\x4a\x6a\x28\x3f\xca\x4c\x91\x23\x5c\x89\x9d\x8a\x13\xa3\xab\x95\xbe\xce\x66\xb6\x28\xc4\x68\x9d\x3b\x72\x87\x7b\x48\x81\xc4\xb3\xd0\x7f\xad\x6d\xf2\x8b\x5e\x18\x42\xee\x95\xd8\x96\xe0\x75\x66\xc2\xaf\x72\xb3\x45\x86\x3e\x9f\xb9\xe2\xc4\x6b\x95\xfd\xd0\xcd\xe7\x1d\x83\x7e\xf2\x16\xe3\xa2\x2b\x80\x30\x43\x71\x14\x87\x19\x5e\xf4\xac\x2f\x80\x24\xe8\xa2\xeb\xb3\xc5\x36\xe1\xfc\xf5\x99\x38\xbf\x0f\xb8\xdf\xbe\x6d\xd0\x21\x20\x48\x47\xf4\x87\xdc\x9f\x79\xe5\x99\x64\xfc\xe1\x0b\x34\x96\xfb\xf4\x0b\xb4\xcf\xbe\xea\x5a\xa1\x1b\x17\xdd\x3b\x05\x13\xf4\x65\xd7\x6c\x05\xd1\x57\xb6\x76\xf3\xf5\xaa\x72\x9b\x8c\x40\x8c\xdc\xb6\xb1\x7f\x53\x77\x60\xed\xda\x28\x37\x40\x4a\xe8\x7d\x56\x7c\xfd\xa4\xb5\x3a\x77\xe3\x45\x51\x00\x02\x2a\xcf\x0f\xc1\x92\xd4\x99\xef\x54\x70\x18\x7c\xb9\xb9\x5b\xbe\x3b\x78\xcb\x78\x12\x07\xf1\x02\x7c\x4c\xca\x9b\x3a\xe1\x18\x08\x62\x69\xcb\x64\x0f\x37\x7b\x60\x1d\x56\x5c\x00\xc2\x12\xcc\x40\x30\x9c\x4f\xe9\xb7\x43\x60\x0e\x14\x00\x87\x20\xb7\x11\xc6\x8f\x38\x99\x3c\x05\x30\xce\xa3\x93\x17\x4e\x0b\x8e\x6e\x1a\x97\x77\x16\xc5\x56\x6f\x4d\x7e\x32\x42\x77\x1d\x99\xe5\x6f\x19\xfe\x93\x50\x4e\x93\xf9\x65\xa7\xa6\x1f\x32\xe2\xf5\x04\xe5\x3a\x9e\x15\x36\x7c\x60\xf8\x6f\x17\xa5\x91\xa5\xdf\xbe\xdd\x7d\xbf\x5e\x55\xee\x2b\x5f\xaf\x16\x8c\x07\x79\xae\x2a\xb7\x2e\x4e\x26\xb7\x68\x65\xee\xdb\x09\xca\x2b\x37\xef\x70\xfd\xfb\x03\x77\x68\x9f\x84\x42\x5b\xad\xcf\xa7\x24\x9a\x1d\xbd\xd7\x16\x7a\x83\x69\x3f\x23\x11\xbd\x6f\x41\x81\xeb\xbe\x27\x31\xdd\xef\x04\xe1\xff\x48\x9e\xda\x0f\xfc\x83\x69\xe5\xc4\x01\xde\x7b\x33\xdd\x4a\x40\xe8\x8c\x11\x5c\x0a\x88\x97\xf7\x95\x5f\x0e\x7b\xc6\xb2\x10\xbb\x5b\x09\x48\x47\x9b\x4e\x72\x69\x18\x25\x45\xb9\x2a\x56\x8c\x63\x38\xf6\x96\x79\xb8\x65\xd8\xa2\x94\x35\x52\x42\x2a\x37\x77\xd8\x45\x01\xc5\x34\xbf\xeb\xc6\x38\x80\x38\x02\xe4\xcc\x3c\x22\x14\xfb\x38\x59\x1e\x65\x9e\xb9\x71\x9c\x5c\x7a\x9d\x7e\xed\xed\x21\x4e\x0f\x92\x4d\x63\x10\xb5\x51\x7e\x09\x69\x1a\x2c\x89\x4f\xd2\x1e\x3f\x4f\x3e\x72\xd7\xb8\xf7\xf4\x34\xae\xf6\xc7\xa6\x90\xcf\x77\x94\x77\xe4\x93\xa5\x0f\x1d\xe9\x11\x16\x19\x41\xcf\x13\x8a\x7c\x21\x49\xb0\x17\x20\xf7\x63\x48\x1b\x00\x29\xce\xd0\xa1\xdf\x86\x59\x91\xcc\xe7\x5c\x66\x9e\x33\x00\x82\x53\x57\x78\x3f\x22\x45\x1f\x99\xf4\x4a\xa1\x09\xca\x07\x31\xb9\x28\x4f\x1c\xaa\x7c\xef\xd2\xf1\x9d\xc5\xb9\xcc\x2a\x4b\x8f\xde\xc8\x2a\xeb\xf5\xbb\x2b\x67\x83\x4d\x4c\x47\x8b\x30\x38\x60\x8d\xcb\x50\x80\xbf\xbd\xc7\x9f\x2f\x28\x50\xc7\x63\xe8\x87\x41\xd9\x3f\x71\xb7\xf2\xbf\xed\x02\xff\x37\x30\x7f\xec\x87\xd2\xcd\x7f\x08\xb0\xaf\xd8\xbd\x96\xbf\x7d\xf9\x13\xdf\x77\xd1\x39\xd0\x4f\x7c\xdf\xc5\xfc\x7f\x39\x7c\xdf\xaf\x3f\xf1\x7d\x3f\xf1\x7d\x3b\x23\x6c\xfc\xb6\xc2\x3c\xcd\xfe\x84\xf9\x1d\x36\xc9\x5f\x15\xed\x17\x47\x2b\xb0\x5f\x1c\xdd\x26\x3f\x81\x7e\xc7\x5d\xe4\x2f\x80\xf7\xdb\xfe\x2b\x03\x07\xfe\xd2\xc7\xbb\x5c\x63\xf9\xf9\x89\x06\xbc\x44\xce\x7f\x12\x1a\x70\x27\x82\x7e\x82\x02\x2f\x12\xf0\x97\x01\x05\x22\x21\x6c\x57\x6e\xbe\x56\xba\xda\xb3\x35\x94\x3b\x82\xf4\xf4\x8f\xbf\x2d\x88\xaf\x6e\xdd\xab\x7f\xa6\x0c\x53\x85\x9b\x5f\x2b\x95\x25\xba\xef\xcb\x5e\xa6\x2a\xdb\xca\xca\x4d\x09\x05\xbc\x29\xb1\x80\x6b\xb1\xc2\xa0\xdf\xfc\xa8\xe8\x62\x2b\xb8\x2f\x5e\x7c\xd2\xfa\x7b\xc8\xc4\xd5\xdf\x69\xda\x00\x26\xce\xa9\x15\xf9\x49\xed\x9f\xe0\x59\xfd\x71\xa6\x5d\xce\xf6\xd3\xf0\x30\x4b\xb9\xd9\x3a\x09\x7f\x44\x96\x41\x65\x91\xad\xc9\x4d\x63\x6a\x4b\x0d\xc6\xd6\x05\x8a\xc4\xba\xd5\x1b\xcc\xa1\x3f\xbd\x05\x3c\x69\x45\xb7\x01\x45\xb1\x25\x7e\x0c\x38\x23\x17\xbd\xc8\x07\x56\x8b\xb8\x5c\x23\x71\x44\x76\xec\x98\x25\x54\x66\x64\x4b\xbd\x39\x64\xf0\x91\x61\xd4\xc7\x4e\xe6\x4a\xea\x74\x0e\x93\x31\x72\xc7\x6c\xa4\x0b\x58\x25\x05\x9c\x16\x41\x2c\x64\xb0\x84\x3f\x7c\x4f\x57\x72\x97\x10\x8e\x12\x0a\xd9\xcb\x14\x7f\x09\xf3\x5a\xc3\x7c\x40\xa0\x66\x0e\x16\xbe\x77\x9f\x4a\x08\xc6\xaf\xa2\x37\xc1\x25\x64\xa7\xd9\xc1\xcf\xb8\x5e\xfc\xdc\x92\xb1\xf6\xeb\xa0\xd1\xea\x9a\x0d\xd2\xea\x0f\x1a\x4d\x4d\x1f\x10\x25\x8f\x78\x79\x1c\x7a\x68\x1a\xb6\xc4\xde\x82\x47\x94\xbf\xc9\xa2\x3c\xd9\xa2\x1d\x34\x0c\xe3\xc9\xc3\x85\x9c\x97\x5e\xd8\x7a\xe9\x45\xfc\x02\xee\x31\x43\x96\xca\xd8\x26\x3b\x76\xa5\xa9\x57\xd8\x04\x06\xbd\x95\x4d\x94\x40\x25\x30\xb0\xa3\x21\x37\x68\x2f\x9f\xc9\xcd\x3a\x5b\xd8\x4d\xc6\x5a\x6f\x30\xe1\x87\xe6\x80\xa8\xbd\x81\xdb\x90\xbd\x70\x0d\x4b\x9b\xc3\x55\xa6\xd0\xe7\x1f\x50\x2f\xc2\x43\x4b\x25\xea\xb8\xe7\x29\x66\xcf\xeb\x3e\x4e\xbf\xaf\x79\xd5\xc6\x80\xf4\xb0\xfc\xf8\x30\x87\x9a\x54\xeb\xb9\x53\x1d\xd5\x56\xff\xfb\x6a\xee\x0a\xa7\xf8\x69\xc3\xd4\xe5\x15\x1c\x12\x36\x5b\x19\x6a\x4e\x96\xff\xa7\x80\x33\x6a\x73\x58\xe3\x80\x53\x45\x81\xda\xa2\xf0\xbd\xfb\x38\xc5\xad\x46\x5d\xef\xb3\x23\xbb\xc7\xbc\xb6\xb4\x41\x6d\xd0\x63\x0c\xdd\x10\x8f\xca\x29\xe1\x91\x25\x94\x6c\xae\xcf\x06\x1c\x4c\x78\xed\x3c\x0a\xd3\xe7\xf1\xd3\xb4\xf3\x28\xec\xce\x7b\x09\xa5\x99\xaa\x63\x61\x2a\x8b\x27\x74\xdc\x81\x75\xc2\x2a\x99\x0d\x39\x3e\xb5\x7d\x12\x2c\x6d\x21\x3f\x4e\xbf\x97\x90\x49\x32\xd7\xdd\x62\xe6\x76\xef\xea\x70\x7b\xad\x75\xb8\x84\x56\xee\xc9\x74\x25\x83\xda\x22\xbf\x58\x73\x66\xa5\x03\xe4\x8c\xdc\xf5\xc9\xd8\xd6\xb7\x20\xa3\x86\xe3\x13\xc6\xea\x45\x75\x3b\xd0\x08\x1c\xaf\x60\x97\x6b\xfa\x5e\xa4\xdb\x56\x83\x2d\xe2\x06\xce\x0e\xbd\x0f\xdb\x26\xbb\xf4\x21\xc3\x15\x27\x46\xea\xfa\x24\x77\xb8\x1a\x2d\xec\xa9\xf9\x24\xb1\xfb\xcc\x62\xed\xd5\x1a\xe4\xd4\xcc\xf1\xed\xc8\x5e\xdb\xb0\x81\x24\x63\x3c\x28\xfd\x52\xeb\x41\x7f\xc0\x2b\x3a\xcf\xc2\xaa\x9c\x95\x50\x26\x6e\x05\x99\xcc\x1d\x5d\xa0\x36\x16\x88\xe3\x6d\xf2\xf4\xf8\x05\xcc\xab\x84\xe4\xad\x7d\xb8\xc5\x16\xeb\xa7\x58\xad\x12\x72\xec\xf8\x0d\x6a\xeb\x02\x29\x9e\x41\x51\x28\xe2\xfc\xdb\xf3\x63\x8f\xe9\xe4\x02\x75\xb1\xb0\xe5\x9b\xcf\x98\x9f\xd9\xa6\x9a\xdb\x96\x36\x93\xc5\x85\x1f\x1a\xfc\xdc\xf6\x6b\x7d\x33\xf8\xc8\xac\x21\x68\x78\x05\xb7\x2c\x62\x2c\x83\x62\xa9\xa7\x6e\x5b\x2a\x33\xb4\x5a\x4c\xd7\x6c\x24\x2e\xd7\xa8\xc1\xe9\xfa\xd9\x80\x33\xc6\xdd\xfe\x53\x7b\x31\xf7\x0e\x30\xd9\xc8\x7d\x22\xa9\x2b\x15\xb6\x6d\x24\xdd\xbe\xd0\x16\x09\x6d\x39\x81\xca\x0c\xcd\xd7\xc4\xea\x45\xd6\xd0\xac\x31\xb6\xa9\x89\xa8\xcf\x52\xd7\x7c\x65\x80\xbe\x80\xa8\x05\xda\x08\xfa\x2e\x71\x85\x05\xac\x2c\x50\x6b\xb0\xaa\x11\xc7\x54\x19\x47\xac\x2f\xa0\xba\x03\x6f\xcf\x1f\x7a\x61\xbb\xdd\x54\x13\xdb\x34\xa6\xf2\x63\x67\xfa\x9c\xd7\x73\xdb\x2c\xf3\x58\x24\x8b\xc3\xf6\x5f\x18\xde\xb5\xdd\xf7\xfd\x9b\xa0\xbc\x8e\x5e\x7a\xfc\xb2\x6d\x96\xdf\x7f\xf9\xf3\x97\xff\x0e\x00\x00\xff\xff\x93\xe2\x67\x8b\x28\x53\x00\x00")
func envDevelopmentJsonBytes() ([]byte, error) {
return bindataRead(
_envDevelopmentJson,
"env-development.json",
)
}
func envDevelopmentJson() (*asset, error) {
bytes, err := envDevelopmentJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "env-development.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _gatewayProductionManagedIdentityJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\x90\x41\x6b\x3a\x31\x10\xc5\xef\x7e\x8a\xb0\xff\x3f\xa8\x60\xb2\x89\xa5\x50\xbc\xf5\x54\x7a\xf0\x56\x7a\x11\x0f\x43\x9c\x6a\x8a\xc9\x84\xcc\x2c\x65\x2b\x7e\xf7\xb2\x51\x97\xd2\xd2\xe4\x32\xbc\x79\xef\x37\xf0\x4e\x13\xa5\x94\x6a\xfe\xb3\x3f\x60\x84\x66\xa5\x9a\x83\x48\xe6\x55\xdb\x5e\x14\x13\x21\xc1\x1e\x23\x26\x31\xf0\xd9\x15\x34\x9e\xe2\x75\xc7\xed\xd2\xba\x7b\x6d\x9d\xb6\xae\xdd\x61\x3e\x52\x3f\xf8\x5e\x30\xe6\x23\x08\x9a\x77\xa6\xf4\xaf\x59\x5c\x2e\x78\x4a\x82\x49\x5e\xb1\x70\xa0\x34\x1c\x72\xc6\x0e\xff\x66\x28\xc8\xd4\x15\x8f\xdc\xac\xd4\xa6\x4a\xc3\x3b\x8d\x53\x35\x1d\xc9\x83\x5c\xf3\x9b\x5b\xe2\xa9\x50\x97\x67\x73\x73\x5b\x6e\xaf\xc8\x31\x95\x20\x62\x4d\x78\x4a\x1e\x64\x36\x85\x42\x7a\x0f\x82\x1f\xd0\xeb\xe9\x42\xfd\x05\x9a\xff\x22\x49\x9f\x2b\x69\x1d\x7c\x21\xa6\x37\x31\xeb\xda\xcf\xee\x79\x87\x49\x82\xf4\x6d\xc7\x58\x1e\x99\xc3\x3e\x8d\x62\x40\xfe\xc9\x81\x1c\xbe\x35\xb1\xb4\xee\x41\x3b\xa7\xef\x6c\x33\xda\xce\x75\xda\x4e\xce\x93\xaf\x00\x00\x00\xff\xff\x81\x44\x4f\xbd\xa3\x01\x00\x00")
func gatewayProductionManagedIdentityJsonBytes() ([]byte, error) {
return bindataRead(
_gatewayProductionManagedIdentityJson,
"gateway-production-managed-identity.json",
)
}
func gatewayProductionManagedIdentityJson() (*asset, error) {
bytes, err := gatewayProductionManagedIdentityJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "gateway-production-managed-identity.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _gatewayProductionParametersJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x95\x4f\x6f\xdb\x3c\x0c\xc6\xef\xfd\x14\x86\xdf\xf7\xd8\xe6\xcf\xba\x5d\x72\x2b\x92\x76\x28\x86\x16\x41\xbd\xe5\xb0\x61\x28\x18\x89\x71\xb4\x48\x94\x41\x4a\xee\x92\xa1\xdf\x7d\x70\x9c\x74\x0b\x96\x64\x8b\x36\x18\xf0\x81\x0f\x7f\x8f\x28\x99\xa2\xbf\x9d\x65\x59\x96\xe5\xff\x8b\x9a\xa3\x83\x7c\x90\xe5\xf3\x10\x2a\x19\x74\xbb\x6d\xa4\xe3\x80\xa0\x44\x87\x14\x3a\xb0\x8a\x8c\x1d\xe5\xdd\x46\x93\xee\xab\x5e\xff\xcd\x45\xaf\x7f\xd1\xeb\x77\x35\x56\xd6\x2f\x9b\xbc\x31\x30\x38\x0c\xc8\xd2\xf9\x22\x9e\xfe\xcb\xcf\xdb\x35\x94\xa7\x80\x14\x26\xc8\x62\x3c\x35\x4b\xf5\x3b\xbd\xe6\xd9\x26\x54\x2f\x60\x3e\xc8\xda\xc2\xd6\x71\x50\xfc\x80\xe2\x23\x2b\xbc\xd5\x3b\xd2\x5a\xae\xc1\x46\x6c\xec\xf2\x97\xf8\xf3\xf9\x4f\x74\x53\xf5\xd0\xfa\xa8\xef\xc1\x61\x1a\x5e\xa0\x1a\x83\x5a\x4c\x8a\xab\xf7\x48\x40\x21\xa1\x0c\x0d\x01\xa6\x20\x78\xa5\x94\x8f\x14\x92\x6a\xd1\xd3\xe0\x17\x48\x43\x6b\x30\xad\x86\x96\xff\xc0\xf6\x64\xb4\x84\x80\x4f\xb0\x1c\x79\x07\x86\x24\x15\xbf\x41\x08\x91\x31\x99\xbf\xd3\xa2\x87\x9e\x66\xa6\xfc\xd1\x45\x49\x46\x05\x72\x6d\x14\x8e\xd9\x90\x32\x15\xd8\x84\xc3\xdc\x3a\x05\xcf\x50\x6e\x3f\x6b\x7b\x3e\xa9\x5e\x13\x57\x98\xd5\xb1\xb6\x28\x02\x90\x06\xd6\x8f\xa3\xd7\xf2\x58\x5f\xfe\xc6\x4c\x64\x08\x15\x28\x13\x96\x87\x2d\x2f\xf7\x5a\x2c\x70\x59\x43\xb4\x61\x74\x5f\x14\x71\x36\x33\x5f\x4f\xde\xd1\xd6\x61\xcc\x98\x82\x3b\xed\x6e\x78\x3d\x2f\x74\x4a\xb7\x3a\x2d\xfa\x9a\x6a\xc3\x9e\x9a\x89\x74\x32\x4f\x9e\x3e\x7a\x02\xfb\x80\xa5\xf1\xc7\xda\xfd\xd3\x4e\x78\x2d\x21\x48\x00\x31\x90\x9f\xff\xaa\x29\xa4\xc0\x60\x0d\xe9\xfd\xfa\xc2\x33\xc2\x26\x69\x9f\x2e\x4f\x26\xac\x90\x2d\x90\x26\xcf\x61\xbe\x2f\x67\x2d\x6c\x3c\xa2\xe4\x3b\x09\x9f\xf7\xee\x96\xab\x5b\x07\xe5\xe9\xe3\x88\xab\x3b\xed\x36\x8d\x9f\x04\x8b\xfe\x3b\xba\x19\xa2\x52\x81\x4a\x29\x7d\xfb\x47\x79\xcb\x3e\x56\x49\xd3\x98\xab\x7f\x30\x45\x44\xe6\xe3\x38\xb5\x46\xbd\xc3\x23\xb7\xf4\x00\x5c\x37\x57\xdc\x22\x50\xac\xae\x09\xa6\x16\x8f\xac\x1f\x38\xe2\x41\x93\x3f\xdf\xff\x59\xfb\x7e\x3e\xfb\x1e\x00\x00\xff\xff\xe6\x2a\xa7\x6b\x39\x08\x00\x00")
func gatewayProductionParametersJsonBytes() ([]byte, error) {
return bindataRead(
_gatewayProductionParametersJson,
"gateway-production-parameters.json",
)
}
func gatewayProductionParametersJson() (*asset, error) {
bytes, err := gatewayProductionParametersJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "gateway-production-parameters.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _gatewayProductionPredeployParametersJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x8e\x4f\x4b\xc3\x40\x14\xc4\xef\xf9\x14\xcb\xea\xb1\xdd\x24\x82\x97\xdc\x3c\x15\x11\x24\x50\xf0\x22\x1e\x1e\xdb\x69\xbb\xba\x7f\xc2\xee\x4b\x6c\x94\x7e\x77\x49\xb7\x56\x84\x2a\x65\x61\x0f\x33\x6f\x7e\x33\x9f\x85\x10\x42\xc8\xeb\xa4\xb7\x70\x24\x1b\x21\xb7\xcc\x5d\x6a\xca\x32\x2b\xca\x91\xa7\x0d\x1c\x3c\x2b\xfa\xe8\x23\x94\x0e\xee\xe8\xa5\xf2\xa6\xaa\x6f\xe7\x55\x3d\xaf\xea\x72\x85\xce\x86\x71\xba\x6b\x29\x92\x03\x23\x26\xf5\x9a\x82\xbf\x92\xb3\xdc\xa1\x83\x67\x78\x7e\x42\x4c\x26\xf8\xa9\xaa\x56\xd5\xf4\xbe\x0f\xba\x53\x50\x36\x22\x0f\x3b\xe8\x19\xfd\xb8\x5c\xfc\xd6\x0f\xde\x40\xb6\x87\x6c\xc4\x9a\x6c\xc2\xc9\xda\xcf\x7e\xd2\xd8\x71\xa4\x05\x31\xde\x69\x7c\xc0\x38\x50\x6f\xf9\x4e\x6b\xa4\xd4\x06\x6b\xb4\xc1\x3f\xd4\xe7\x97\xb3\xc8\x4d\xa6\x2d\x11\x07\xa3\xd1\x46\xe3\xb5\xe9\xc8\xde\xaf\xfe\x26\x49\x79\x96\xf4\x76\x1c\xd4\x46\xac\xcd\xee\xb2\x78\x91\xff\x7d\xf1\x15\x00\x00\xff\xff\x5d\x3f\xc5\xc2\xbc\x01\x00\x00")
func gatewayProductionPredeployParametersJsonBytes() ([]byte, error) {
return bindataRead(
_gatewayProductionPredeployParametersJson,
"gateway-production-predeploy-parameters.json",
)
}
func gatewayProductionPredeployParametersJson() (*asset, error) {
bytes, err := gatewayProductionPredeployParametersJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "gateway-production-predeploy-parameters.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _gatewayProductionPredeployJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x58\x4b\x6f\xe3\x36\x10\xbe\xfb\x57\x08\x6a\x01\xdb\x45\x2c\xcb\x46\x0b\xec\xe6\x96\x6e\x16\x69\xb0\xdb\x6d\x50\x17\xb9\x18\x3e\x8c\xa9\xb1\xc2\x8d\x44\x0a\xe4\xc8\x89\xba\xf0\x7f\x2f\xa8\x87\xa3\x07\x65\x19\x6d\x2e\x5d\x44\x39\x24\x12\x87\xf3\xf8\xe6\xc1\x8f\xf9\x36\x72\x1c\xc7\x71\x7f\xd4\xec\x01\x63\x70\x2f\x1d\xf7\x81\x28\xd1\x97\xf3\x79\xf1\xc5\x8b\x41\x40\x88\x31\x0a\xf2\xe0\xef\x54\xa1\xc7\x64\x5c\xae\xe9\xf9\xd2\x5f\xfc\x32\xf3\x17\x33\x7f\x31\x0f\x30\x89\x64\x66\xe4\xfe\xc2\x38\x89\x80\xd0\xfb\xaa\xa5\xf8\xc1\xbd\x28\x2c\x30\x29\x08\x05\xdd\xa3\xd2\x5c\x0a\x63\x68\xe1\xf9\xe6\xa7\x12\xd8\x83\xe2\xb0\x8d\x50\xbb\x97\x4e\xe1\x55\xfe\x39\x04\xc2\x27\xc8\x3e\x61\xb6\x87\x34\xa2\x2b\xc6\x50\xeb\x3b\x19\x71\xc6\x73\xd1\xf5\x51\xd4\x3c\xdf\x1a\x6f\xb9\x02\x42\x01\x82\x6e\x03\x63\x72\xad\xd3\xad\x66\x8a\x27\xc4\xa5\x98\x4c\xbd\x6a\x6d\x53\x3a\xd1\xd8\x28\xb7\x5f\x91\x55\x1b\x13\x50\x10\x23\xa1\xd2\x93\x71\xe9\xd2\x0a\xd5\x9e\x33\xbc\x53\x5c\x30\x9e\x40\x74\x1b\x8c\xa7\x56\x45\x09\xaa\x98\x6b\x13\x76\x33\xb6\x86\x90\x46\xa6\x90\xba\x11\x35\x84\x42\x24\xd7\xba\xba\xe9\x7c\x3d\x8c\xec\x6f\x85\xe4\xa1\x04\xfd\x25\xac\x26\xea\x45\x36\xbf\xac\x6e\xba\x1e\xbb\x94\x25\x68\x30\xd9\x4a\x19\xb5\xc2\x75\x03\xdc\x99\x2c\xdd\x43\x94\x1a\x99\x1d\x44\x1a\x8f\x12\x87\x17\x61\x17\x9f\x49\xc1\xcd\x40\x6e\x7b\x0c\x83\x52\x90\x0d\x58\x5e\x6f\xac\x66\x7b\x53\xd7\x6f\x4d\x93\xe2\x22\x74\xad\xea\x1e\x4b\xcf\xef\x14\xee\xf8\xf3\xa0\x8e\x96\xcb\x31\x3c\x7f\x46\x11\xd2\x83\x7b\xe9\x2c\xfd\x51\x33\x57\x55\x86\x14\x6a\x99\x2a\xd6\xaa\xf5\x96\x9d\x44\xc9\x04\x15\x95\xa8\x1d\x5a\x76\x04\xc4\xb9\x17\x65\xf0\x33\xa1\x3b\xae\x54\x8e\xfe\xce\x99\x92\x5a\xee\xc8\xfb\x82\xf4\x24\xd5\xe3\x5c\x14\xbf\x57\xc8\x52\xc5\x29\xbb\x51\x32\x4d\x74\x7b\x7b\x24\x19\x50\xd9\xd6\xeb\xca\xe3\x5c\x74\x32\xf5\xaa\xc5\x76\x6f\x98\x91\x10\xf0\xe3\xb6\x7a\x87\xbd\x94\x5f\xb7\xa5\x5c\x48\x78\x6d\x8a\x2c\xfd\xa5\x3f\xf3\xdf\xcd\xfc\x85\x35\x45\x27\x81\xea\xb6\x2a\x04\x81\x42\xad\x57\x09\x30\xec\xef\xd5\x52\xaa\xc8\xba\x65\x0a\x35\x84\x17\x66\xca\xbd\xf3\xfc\xf9\xf2\xe7\xb3\x7b\xd7\x32\x44\x74\xba\x15\xa7\xe6\x83\xdd\x55\xe7\x8c\xa0\xfb\x43\xcb\xa7\x74\xcd\xfd\xae\x5b\x8d\xbd\xb6\x52\x19\xb4\x98\xef\xe4\x41\xa3\x72\x6e\x83\xc9\xf8\xcc\x4a\x1c\x5f\x38\xe3\x5a\x5d\xdb\x27\x70\xc7\x1e\x41\x68\xb0\x10\x69\x14\x9d\x14\xb6\x24\xa2\xa1\x47\x17\x73\xe4\xa3\x08\x12\xc9\xc5\xc0\xf4\xae\x9e\x61\x3c\xea\xba\x9b\x5d\x79\x65\x8e\xe0\x0f\x52\xc7\x52\x5f\xff\x7a\x46\xa4\x4e\xbd\x3b\xcf\x73\xef\xb8\xed\x27\x7b\xb9\xb6\x9f\x6e\xf9\xb6\x9f\x01\x14\x9d\xff\x88\xc9\x07\x29\x08\xb8\x40\xf5\x27\x86\x5c\x93\x6a\x1f\x0d\xbd\xda\xbe\x73\x5c\x3e\xee\x51\xd0\x6f\xe9\xf6\x0d\x8e\x1c\x8e\x15\x49\x05\x21\xbe\xa1\x91\xa3\xf1\x09\xb3\x7b\xc3\x5c\xfe\x37\x70\x9c\x94\xd8\x0c\xcc\xe9\x44\xf1\x3d\x10\x7e\xe6\xe2\xb1\xa4\x7e\xe5\xa1\x52\x23\x9b\xee\x35\xd7\xe6\xfe\x11\xf4\xbb\x7c\x22\x27\x1d\x92\x55\x9c\xd7\x76\x5d\xdd\x60\x9a\x00\x0c\x11\xb8\xbd\xd1\x7c\x36\x83\xdb\x73\x45\x29\x44\xe5\xeb\x2b\x71\xb7\x5e\x12\xd6\xa1\xe5\x09\x8a\x40\xff\x21\xac\x75\xf3\x5a\x47\xfe\xc8\x8e\x65\x3f\x0f\xac\x10\x5d\x33\x29\x18\xd0\xa4\x4e\x3e\x9b\xa4\x7e\x3c\xbd\x70\xc6\xb3\xf0\x29\xb3\x30\xd1\x2e\xe6\x55\x5b\xcd\x73\x0d\xaf\x84\xf5\x10\x69\xfd\xd7\x37\x5c\xfd\x98\xf6\x93\xdc\x1d\xc4\x3c\xca\x8c\xce\xab\x9e\x21\x71\x44\x51\x13\x88\x00\x94\xa5\x73\x6c\x4c\x16\xda\xd7\xbc\x63\x16\x8e\xff\x02\x38\xde\xb1\xed\x57\x43\x93\x93\x7a\xc6\x86\x2f\x93\xe3\xa9\xfd\x6a\x8e\xc2\xd8\x5b\xc9\x1d\x5d\x63\x84\x64\xa2\x21\x95\xe2\xc9\x6e\x6c\x97\xfe\xe2\xfd\xcc\x7f\xdf\xbc\x7f\xe4\x7f\x6d\x46\x87\xd1\x3f\x01\x00\x00\xff\xff\x99\x8a\x8c\x66\x5f\x11\x00\x00")
func gatewayProductionPredeployJsonBytes() ([]byte, error) {
return bindataRead(
_gatewayProductionPredeployJson,
"gateway-production-predeploy.json",
)
}
func gatewayProductionPredeployJson() (*asset, error) {
bytes, err := gatewayProductionPredeployJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "gateway-production-predeploy.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _gatewayProductionJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x6b\x73\xa3\x48\x96\xe8\xf7\xf9\x15\x8e\xba\x1b\xe1\xae\xd8\x72\x17\x20\xab\xca\x4c\xc4\x7e\x00\x24\x10\x48\x42\xe2\x95\x3c\x7a\x3b\x26\x20\xc1\x08\x91\x3c\x56\x3c\x64\x74\x63\xfe\xfb\x8d\x04\x24\x4b\xb2\x64\xbb\x5c\xd5\x77\xe7\xc3\x68\x62\xaa\x2d\xc8\x3c\xe7\xe4\x79\x9f\x93\x90\xfa\xbf\x7f\xbb\xb9\xb9\xb9\xf9\xf4\x1f\x05\x5c\x05\x89\xfb\xe9\xef\x37\x9f\x56\x65\x99\x17\x7f\xff\xfa\xb5\xbb\xf2\x7b\xe2\xa6\x6e\x18\x24\x41\x5a\xfe\xee\xee\xaa\x4d\xf0\x3b\xcc\x92\xfe\x5e\xf1\x95\x22\xc8\xe1\x1d\x41\xde\x11\xe4\x57\x3f\xc8\x51\xd6\xe0\x71\x7a\x90\xe4\xc8\x2d\x83\xdf\xd7\x45\x96\xfe\x9f\x4f\x5f\x3a\x0c\x30\x4b\xcb\x20\x2d\x41\xb0\x29\xa2\x2c\xc5\x88\xc8\xdf\x09\xfc\xbf\xfd\x80\xdc\xdd\xb8\x49\x50\x06\x9b\xe2\xd3\xdf\x6f\x3a\xb2\xda\xeb\x2e\xdc\xa8\x41\x91\x55\x1b\x18\x88\xfe\xc9\xad\xf6\x76\xd9\xe4\x01\x86\x56\x94\x9b\x28\x0d\x3f\x1d\x6e\xfe\xf3\xcb\x11\x08\x4c\x38\x87\xb2\xca\x97\xdd\x24\xf8\x09\x18\x5a\x00\x97\x2e\x8c\x81\xc6\xe8\x41\xea\xa6\xe5\x47\x09\xf2\xdd\xd2\xf5\xdc\x22\x60\x20\xcc\xaa\xb4\xfc\x38\x55\xbe\x57\x66\x71\x90\x72\x28\x0a\x7e\x82\x9a\x0e\x88\xb1\x41\x1f\x9b\x1f\xba\x65\xb0\x75\x9b\x51\x96\xb8\x51\x5a\xbc\x09\xe3\xcb\xe9\x5d\x3f\x78\x74\x2b\x54\x02\x17\x55\xed\xa8\x57\x71\xf0\x81\x5b\x56\x9b\xe0\xaf\x45\x32\xf7\x0b\x9f\xcb\xd2\xc7\x28\x7c\xd6\xd6\x8f\xb3\x45\x0b\x36\x75\x04\x83\xe5\x26\x4a\x61\x94\xbb\xe8\xa3\x52\xda\x83\x2b\xb3\x8d\x1b\xee\x35\xa7\xe3\xf9\x4f\x01\x04\x89\x16\xed\xde\x56\xbf\x37\x18\xaa\x95\x6e\xea\xbb\x1b\xff\x1f\xa3\xfb\xe2\x1f\xf5\xe0\x0d\x8c\x45\xc1\xb9\xb9\x0b\xa3\xb2\xb9\x8e\x37\x4a\xcb\x37\x90\x0e\x2e\x22\x89\x83\xa6\xc6\xa3\x46\xb2\xa6\x55\x8f\x8f\xd1\xd3\xc7\xb8\xb3\x07\xb3\xdc\x04\x1f\x86\x91\xf8\x09\xbf\x69\xfd\x9e\xff\x61\xe3\x4a\xfc\xc2\x1f\xa7\x75\xb4\xc9\x52\xec\x5c\x3f\x06\x24\xcd\x52\x27\x4b\x5d\xa4\x06\x61\x94\xbd\x66\xa2\xee\x66\xe3\x36\x6f\xb0\xfd\x8f\x93\xbb\xed\x88\xc0\x2d\x4a\xb7\x88\xdc\xb3\x99\xed\x3d\x18\xa4\xe5\xc6\x45\x51\xea\x5f\xbe\x1f\x67\x9b\xc0\xed\x07\x5d\xba\x5f\x6c\xa3\x72\x17\x6c\x90\x9b\xfa\x69\xb6\x29\x57\x97\xc6\xb4\x37\x7a\x18\x55\xf1\xe9\x64\xc0\x9f\x17\x39\xb2\xc9\xc5\xc4\x0d\x3f\xe8\x75\x37\xf9\xdc\x4f\x7a\x0b\xfc\x38\x84\xc2\xff\x05\x20\x70\xe8\x28\x72\x17\x7e\x78\x25\xfb\xd8\x2a\x6c\xb2\x2a\xff\x78\x20\xda\xe4\xbf\xca\xcf\x15\xc5\x6a\x59\x79\x28\x82\xd3\xe0\x15\xef\xf0\x1a\x84\x1a\xfb\x17\x14\xb8\x69\x95\x8f\x53\xd7\x43\xc1\x2b\x94\x78\x59\x76\xae\x75\xe7\xfa\x5e\x6e\xaa\xe0\x2a\x9e\x1f\xe4\xd8\xdf\x8e\x60\x7c\xda\xf4\xac\x2f\x4e\x8c\xea\x0c\x54\x11\x57\x2f\xe0\xb7\x37\xd2\x0e\xf3\xb3\xe7\x9d\xa9\xda\xa9\xe6\xff\xf3\x6c\x5d\x28\x83\x6e\xd9\x27\x5f\x7f\x6c\x8e\xe5\xfe\xdb\xe7\xdf\xf7\x37\xff\x3c\xe7\xc6\x1e\xcf\x1f\x45\xe5\x75\xeb\xf9\xed\x39\x55\xfb\xed\xf6\x95\xb0\x74\xfb\xf9\xcb\x0d\xf1\xe5\x26\x4a\xfd\xe0\x69\xf1\xf8\x23\xb3\x6e\x7f\xbf\xfd\xfc\xf9\x05\x25\x7b\xae\xce\x23\xb8\xc9\x8a\xec\xb1\xfc\xbd\x9f\xff\xb5\x38\x81\x53\x9c\x4f\x74\xf3\xe8\x28\xef\xa4\x08\x92\xbe\x23\xee\xef\x08\xf2\xa2\xfa\x7c\x94\xff\xaf\xf3\x3e\xdf\x64\x79\xb0\x29\xa3\x0b\xa9\x4b\x7b\xff\xb1\x8f\x13\xe2\xb2\xcb\x3c\xaa\x4d\x2b\x8d\xe2\xa2\xbf\x7d\x49\xe6\x8f\xe0\x3a\x5d\x5e\xe5\xa5\xc1\x4b\x0f\x74\x71\x6c\xe4\x9f\x68\x8e\xe8\xff\x76\xfb\x2c\x0a\x39\x28\xb7\xd9\x26\xfe\x5a\x47\x9b\xb2\x72\x51\xff\xb5\xf8\xda\x21\x28\x6e\xbf\xdc\xec\x85\x7e\x57\xa7\x41\x79\xfc\xbd\x1b\x72\xfb\xf9\xcf\x4f\xaf\xd2\xf0\xcf\xab\x77\xff\xf9\x32\x24\x1c\x88\xde\x8b\x69\x8f\x6c\xcf\xe8\x0b\x61\xe4\x30\x67\x97\xa5\x2d\xef\x3e\xfd\x11\x3d\xfe\x86\x0b\x17\x9c\xd7\x9e\xe8\xef\x59\x40\xbd\xfd\xfc\xa5\xcc\x66\xd9\x36\xd8\xfc\xb6\x09\x72\xe4\xc2\xe0\xb7\x6b\xf6\xf5\xe5\xe6\xf6\x06\x2f\xfe\xf6\xf3\xe7\xcf\x5f\x6e\x6f\xbf\xe4\x11\x8c\x1d\x8c\xef\x02\x33\xf1\xb8\xbc\xf5\x85\xe2\x92\xf1\xfd\x4d\x50\x14\x01\xe6\xe4\x75\xd8\x83\xcf\xd7\xb8\xf8\x92\x7b\x7f\x5e\x08\xa4\x9e\x0b\xe3\x20\xf5\x7b\x64\xcb\x2c\x43\x1f\xd2\xc0\x73\xa6\xf7\x60\x7f\x86\x32\x94\xb9\x3e\xeb\x22\x37\x85\x51\x1a\xaa\x15\x0a\xfe\x72\xd3\xb8\x62\x92\xbf\xd0\x56\x9e\xd7\x14\x6c\x8a\xaf\xd7\x5c\xc0\xb1\xa9\x20\xef\x2e\x4a\xcb\x60\x93\xba\xe8\xf8\xf2\x7e\xea\xdb\x36\x74\x5d\xeb\x6f\x2e\xcb\xff\x2f\x5b\xee\x05\x55\x7b\xc7\x4a\xfb\x59\x3f\xbd\xd0\x7c\x93\x79\x2f\x03\xf7\xaf\x5a\x5b\x0b\xfd\x3d\xcb\x69\x07\xfe\x8a\xc5\x94\x19\x6c\x65\xf5\x49\x87\xf9\x2b\xae\xed\x66\x6f\x4a\xa3\x08\xc7\x72\xaf\xda\xe7\x03\xa3\x2e\xe7\x79\x6b\xea\x5e\xd1\x96\xd9\x06\x07\x8d\xfb\xfb\xc1\xfb\x14\xea\x79\xfc\x2f\x71\xe2\xc8\xdb\x54\x28\xb8\x6b\x5b\x55\x57\x9c\xca\x65\x68\xff\x76\x0d\x57\x3f\xff\x76\x0d\x3d\xc1\xff\x76\x0d\xbf\xc2\x35\x3c\x10\x3f\xe4\x19\x1e\x88\x5f\xed\x18\x7e\x26\xd9\xe8\x84\xf4\x57\x27\x18\xc7\xb2\x99\x60\x8a\xdf\x92\xe5\x3b\x39\x9b\x56\x89\x17\x6c\x16\x8f\xcb\xfd\x2a\xa8\x37\x26\x6c\x82\xff\xa9\x82\xa2\x5c\xba\xe5\x0a\xd3\xf2\x75\x15\xb8\xa8\x5c\xed\xbe\x6e\x02\xd7\x6f\xae\xeb\xdf\x8f\x08\xa6\x33\xaa\xf7\x8a\xe4\xd5\x52\xea\xa5\xcc\x0f\xf6\xf3\x76\xc5\x78\xd1\x28\xcf\xa7\x7d\xac\x50\x3e\xaf\x32\x29\xe2\x8e\x78\x78\x67\x95\xf9\x56\x79\x78\x4c\xee\xbe\xa5\x28\xe6\x3f\x5f\x2a\xfe\x8b\xc5\xa2\xf7\x59\x67\xf4\x0b\x16\xfe\xef\x1a\xf9\x88\xe8\x17\xc6\x8a\x8a\x3b\x82\x20\xef\xd2\x08\xfe\x8c\x9c\x82\xb6\xf9\xb6\xdc\x64\x4f\xcd\xf2\xd9\xd5\x9d\xb4\xd3\x2e\x50\x76\x8d\x9a\xf7\xdb\x76\xbe\x89\x6a\xb7\x0c\x66\x51\x1a\xf7\x4d\xc9\xbf\xda\xc2\x5f\x34\x10\xf3\x20\xf5\x8b\x45\x7a\xb9\x5b\xfe\x96\x85\x5d\x72\x6c\x27\x50\x2e\xb6\xb4\x7f\xb4\x6f\xf5\xc7\x85\x7e\x5c\xb7\x17\x74\xfb\xa2\xf5\xd6\x31\x3c\x0a\x36\x27\x0d\xaf\x4b\xcd\xfe\xe7\x6d\x9d\x2b\x08\x9e\xb7\x7e\x5e\x28\xf3\x8f\xf6\xcc\xaa\x3c\xdc\xb8\x7e\xb0\xcc\x50\x04\x5f\xb6\x8a\x0f\xc3\x92\xcc\xef\x14\xc5\x4d\xab\x73\x4e\x5e\x40\xdb\xce\xe9\x2d\x74\xee\xc2\x55\x94\x62\x15\x7e\x8c\xd0\xf5\x9c\xf0\x53\x56\xbc\x35\xa4\xe3\x4e\x96\xe4\x55\x19\x6c\x64\x37\x09\x0e\xfb\x4a\x9f\xfe\x80\x59\x0a\xdd\xf2\xc0\xa3\xbb\xdb\x2f\x37\xc7\xac\xdb\xb7\x99\xdb\xc6\xe8\xdd\x65\xe1\x1c\x50\xb8\x7e\x12\xa5\x46\x81\x95\xa6\x13\x33\x44\x59\xe5\xdf\x55\x45\xb0\x79\x6d\x1a\x8a\xd2\xea\xe9\xc7\x6a\x99\x4f\x7e\x54\xb4\xf6\xed\x16\xc5\x36\xdb\xf8\x4c\x55\xae\x82\xb4\x8c\x0e\x86\x85\x0d\xfd\x8d\xc4\xa3\x28\x56\xef\x73\xab\xf9\x7e\x47\xe0\xba\xb3\x3f\xff\xbc\x0d\xf5\x19\xfa\x21\xf1\xc9\x92\xe0\xeb\x33\xc7\xbe\xfe\x5e\x14\xab\xaf\x6e\x55\xae\xb2\x4d\xb4\x0b\xfc\x7f\xc4\x98\x80\xd7\xd7\x74\x02\x38\x0e\x9a\x91\x5b\xba\x2f\xac\xe1\x78\x8f\xe3\x4d\x9f\xbe\xff\x5c\xf7\xed\xfb\xcf\x9f\x1f\x8d\x0d\x3f\x52\x42\x7f\xea\x9b\xed\xef\xd2\xf8\x28\x71\xc3\x40\x0d\x1e\x83\x4d\x90\x5e\xd8\xa3\x7a\x31\xbe\x95\x73\xb1\xea\x1c\x8d\x1a\xf8\x13\xf7\xcd\xba\x23\x7b\x7c\xec\x87\x4f\xc6\xb3\xb7\x06\x77\x5e\xf1\xd3\xf7\xbb\x19\x98\xbf\x35\xb6\x7e\x76\xf5\xc8\x2d\x83\xa2\xfc\x58\x26\x9c\x15\xa3\xa8\x88\xdf\x5e\x3a\xdc\x04\x6e\x19\x2c\xf2\x7d\x58\xe2\x37\x59\xd2\xed\x51\xbe\x41\x67\xf7\x84\x8e\xff\x2e\x2c\x37\x47\x02\xec\x77\x4b\xf4\x3e\x8e\x2e\x37\x41\x12\x55\xc9\xcb\x6d\xa4\x17\x8b\xfd\x45\x7a\x94\x76\xc1\xef\x5d\x7a\xd4\x55\x24\xcb\x77\x55\xe6\xff\xcb\x55\xf9\xab\x89\x56\x87\x5c\xc4\x80\x1f\x5d\x18\xbc\x33\x8f\xdd\x7f\xde\x21\xdb\xf3\xe4\x09\x07\x8f\x36\x8f\x7b\xdb\x6b\xfd\x48\x4e\x7c\x34\x27\x4a\xdc\x4d\xf3\x2e\x77\x7f\x98\xd4\x25\x86\x0c\x84\x01\x0a\x36\x6e\x19\xf8\xbd\x50\xa2\x34\xfc\x31\x40\xef\xae\x04\x2e\x7d\xde\x1f\x20\x6e\xae\x72\x36\xca\x61\x4b\xc0\x0f\x04\x85\x9b\x0f\xb2\xfa\x64\xfe\x0f\x94\x24\x17\xe7\xff\x2f\x97\x29\x97\x3e\x6f\x74\xb2\x2e\x2e\xe3\x23\xda\x77\x0a\xe0\x74\x77\xed\xc7\xbb\xb9\x17\xa1\x5e\x54\x95\x3c\xba\xd2\x92\x7a\xed\xf3\x11\xa6\x1c\x7b\x36\xf6\x07\xf6\xf3\xde\xfa\x7c\x8c\x19\x37\xff\x92\x3d\xe0\x6b\x9f\xb7\x73\xac\xf3\xcf\xeb\x39\xd7\xc7\xe1\xbf\x6f\xe4\xdb\xd8\x5f\x87\x73\xfd\xee\x65\xc8\xd7\x62\xb9\x1f\xb9\x61\x9a\x15\x65\x04\xdf\x57\x09\x79\x59\x56\x8e\x9e\xe7\xbc\x1d\xd3\x83\xc3\x83\x3c\xef\x29\x2b\xba\x04\xc7\xd8\x44\x27\xf5\xd5\xfe\xc9\xe6\xb3\xfa\xea\xad\x67\x51\xbe\xbe\x1e\xee\x7f\x88\x4f\xc1\x53\x19\xa4\x38\xab\x7c\x17\x97\x0e\xa3\xff\xba\xd4\x00\x16\x6f\xe5\x97\x37\x1f\x4e\x0d\x8e\x73\xf9\x67\x73\x67\xda\xa7\xc8\xc7\xcf\x4b\x7b\x67\xac\xdf\xf7\x7b\xb8\xaa\x28\xb3\x44\x83\x9b\x28\x7f\xab\x36\x38\x99\x3b\x71\x53\x1f\x05\x9b\xe3\x06\xce\xe1\xe1\xf3\x37\x01\xb8\x55\x99\x19\x5d\xbb\x61\x1e\xa5\xd9\x11\x94\x1f\xc8\x56\x8a\xa0\x2c\xa3\x34\x6c\x59\xf8\x4e\xc7\xde\xee\x11\x04\xb0\x0c\x7c\xed\x68\xf2\xfb\x0b\xd0\xa2\x63\x13\xb6\x02\xcf\x2d\x82\x6f\xf7\xbf\xf5\xc6\xd0\x7d\xd3\x33\xad\x7b\xa2\xeb\x16\x52\x80\x10\x39\x12\x05\x4c\x36\xbd\xfd\xfc\xe5\x96\xe1\x54\x75\xac\x2d\x0c\x95\x1b\x8b\xa3\xff\xfa\x8f\x7e\xfc\xcd\x9d\x7f\xf3\xdf\x15\x41\x0c\xe0\xf1\xbf\xb7\xb7\xb7\x5f\x7a\xe8\xc7\x76\x75\xf2\xe0\xfe\x6d\xfb\xb4\xcb\xed\xe7\xff\x4e\x6f\xbf\xdc\x32\x8e\xa1\x8e\xb9\xd9\xc2\x18\xc9\xcc\x7c\xfc\x41\xe8\x27\xcf\xf4\xbf\x04\xaf\x8d\xb9\x25\xc3\x4d\x81\xc6\xe8\x63\x99\x91\xf5\x0f\xaf\xe2\xf2\x73\xff\x27\x08\x47\x8c\xce\xb0\x8c\x36\x66\x38\x6e\x61\xc8\xfa\xc7\x17\x75\xe1\xbd\x80\x53\x44\xac\xbe\x98\x8e\x65\x6e\x26\x8e\x3f\xbe\xa2\xb3\x77\x06\x2e\x21\x30\xd4\xd9\x4f\xc1\x36\x36\xe8\x04\xec\x7c\x34\xe7\xd5\x85\xac\x8f\xe5\xd1\x87\x41\x9f\x3e\x4c\x7d\x06\x5e\x1b\x8d\x65\x20\xaa\x0b\x79\x3e\x96\xf5\x8f\xc2\x3f\x79\xd0\xfa\x04\x81\xc0\xe8\x63\x93\xb1\x31\x1e\x6e\x21\xf3\xa2\x00\xc6\xaa\x26\x2e\xe4\x8f\x61\xba\xf6\x9a\xc1\x25\x94\xa3\xc5\x9c\x11\x65\xed\xa7\x10\xf5\x2f\x66\x5c\x02\xcf\x8f\x19\x1d\x1b\xcb\x4f\xc1\xdf\xbf\x94\x71\x82\x60\x3a\xb6\x01\x63\xcc\xf4\x91\xac\x69\x06\xcf\x8b\xd6\xc7\x50\xbc\x78\x9a\xff\x22\x92\xa5\x3a\xfe\x69\x0c\x5d\x43\xf6\x04\xbc\xba\x14\xe7\x8c\xf0\x41\x5b\xee\x9f\x30\x3f\x03\x38\x1f\xcd\x7b\x2f\xf1\x51\xa8\xcf\x8f\x9e\xbf\x00\xad\x8d\x7e\x1a\xf6\xe1\xa1\xf4\x0b\xc0\xb1\x5f\xd3\x96\x0c\xf7\x61\x86\x9c\x3c\xb0\x7e\xee\x20\x3a\x56\xdf\xde\x7e\x0d\x83\x34\xa8\xdd\xc4\x4f\xfe\x9e\xb8\x45\x19\x6c\xfe\x41\x11\x14\x49\xd0\xc4\xf0\x77\xf2\xf6\xb6\x1d\x3d\x5b\x70\x8c\xfe\x43\xe6\x77\x6d\x8b\xe7\x88\x04\xcd\x60\x35\x4e\x15\x97\x18\xf0\x8f\xb8\xd6\xa2\xf2\xba\x28\x1b\x65\xe9\x6f\x9f\x7f\x3f\xfe\x2a\xfa\xc7\x3c\xec\xc3\xa9\xa0\x2e\x8c\xe5\x8f\x85\x88\x73\xea\x71\x26\x77\x04\x19\xff\x73\x1e\xcf\xb9\x14\x91\x9e\xc6\x94\x81\xc6\x92\x50\x50\x57\xbe\x60\x84\x33\x2b\x0c\x01\xc1\xcf\x5d\x73\x48\x06\x63\x3e\x75\xcc\x21\xc1\x85\x79\xe1\x27\xe0\xde\x17\x40\xe5\x70\x4c\xe9\x71\xcc\x46\xd6\x19\xa4\x22\x89\x57\x35\xa6\x76\x04\x40\xcd\x06\x52\xed\x0d\x54\xca\x69\xe8\xc6\xa3\x68\xc2\x9b\xd8\xd3\x40\x70\x76\x16\xe5\x37\xde\xc0\x4f\x60\xc3\xd4\x97\xe0\xcc\x75\x66\x2b\x19\x8e\xa6\x1a\x46\x38\xa3\x54\xe4\x47\xdd\x7c\x3f\x81\xb5\x9f\xf0\xcd\x25\x38\xf8\x3a\x17\x66\x6b\x51\xe0\x29\x8f\x42\xb1\xc8\x49\x08\xa6\x52\x0d\xd7\x59\xe8\x08\x22\x29\x0a\xa0\x81\x09\xdd\x4c\x39\x62\x37\x1f\xc5\xd4\x42\x8b\x43\x27\x95\x6a\x4f\x63\x63\x3b\x01\x95\x1f\x11\xff\xe9\x0d\x58\xe4\xad\xb3\x50\x89\x55\x6e\x3e\x62\x86\x73\x8d\x1d\x2b\x88\x36\x55\x20\xe9\x9a\x41\x2f\x2c\x82\x94\x0c\x82\x64\xc1\x58\x16\x17\x11\x3b\xb6\x2d\x75\x65\x27\xfc\xce\xd1\x58\xe4\xa5\x4e\x0e\x13\xba\xf2\x4c\x50\xf9\x1c\x4b\x39\x96\xb4\x73\x4d\xba\x12\x05\x32\x87\x14\xb9\xf2\x05\x39\x13\xc3\xbc\xc1\xbc\x75\xa2\x8e\xde\x19\xf5\x94\xdb\x11\xdd\x40\x81\xa8\x2d\x92\x8e\xed\x28\x9b\x72\xa9\xb4\xc5\x63\x66\x26\x2a\xa1\x40\x37\x3e\xc7\x66\xfe\x44\xdd\xc2\x5d\x56\xcf\x28\xb5\x98\x25\x0e\x72\x04\xba\xb1\x2d\xb6\xf1\xa8\x1c\xd9\x03\xa5\xf2\x06\x52\x3a\x1b\xb0\xa4\x1d\xd1\x08\x0a\xa0\x98\x91\x92\xa2\x6b\xe4\xc4\x18\xc3\x52\x23\x80\x33\x33\x80\xa2\x1a\xdb\x52\xde\xe6\x18\x57\x38\xd3\xc8\xdc\xb3\xd8\x1a\xa6\x4a\xe8\x4e\x54\x02\x4e\xe6\xdf\x66\x0d\xbd\xb5\x4d\x79\x63\x9b\x3e\x82\xcd\xb0\x74\x4d\xb9\xf1\x06\x72\xed\xa4\x4a\x65\x53\x74\x39\xa3\x4a\x14\x58\xf3\xda\x33\xd1\x1a\x26\xf4\xce\xa3\x1c\x62\x96\xf0\x3b\xfb\xfd\x30\x13\x6f\x02\x90\x97\xaa\x91\x6b\x29\x95\x6b\x3e\xd4\x4e\xf2\x44\x62\x5d\xb2\x13\x44\xcc\x92\x12\x05\x4a\x36\x75\x12\xba\x11\x05\x9e\xf0\x05\x50\xc2\x89\x12\xba\xe6\x7d\x18\xec\xc6\xd5\x6c\x0d\xe8\x45\xc3\xc6\xde\x36\x0b\xc5\xc9\x41\x47\x73\x2f\x95\x09\xdb\x7c\x2a\x44\x61\x45\xf8\x13\x76\xb7\x88\x1e\x6a\x47\xd8\x56\x4e\x02\x62\x6f\x20\xad\xe0\x44\xaa\xdd\x04\xac\x7d\x6e\x58\xc3\x04\xd6\x70\x02\xa2\x19\x05\xb6\x8e\xb9\xad\x1d\x8b\x45\x1e\x47\x36\x8e\xf9\x84\x6c\x4b\x46\x33\xf3\x69\xe5\x0b\x60\xe7\x73\xc4\x60\x96\x0c\x6b\xdb\x92\xd6\x2e\x37\x6c\xd7\x27\x45\x76\x68\xa7\x12\xb2\xcd\x62\x2a\x72\x6c\xee\x44\xac\x67\x36\x4c\x1c\x50\x7b\x5a\x55\x5a\xe4\xc8\xc2\xe7\x18\x52\xe4\x49\x7f\xd1\xb0\x84\x2b\x80\x4a\x9c\xc8\x85\x63\x82\xad\x38\x1a\x6f\x17\x0d\x8b\xbc\x89\x8c\x44\x01\xdc\xbb\x96\x12\xce\xf5\x22\x74\x92\x78\xea\x08\x74\xe5\x28\xd9\xd4\xa6\x78\x42\x1c\xdd\xd7\x8e\xa5\xae\x67\x03\xbc\xc6\x61\xe3\x60\x9e\x36\xc3\x78\x46\xf1\xdf\x7c\x4b\x42\xb3\x54\x42\x50\x78\x08\x97\xa3\x6d\xaa\x1a\xb4\x20\x6d\x73\xcf\xb6\x72\x12\x26\x46\x69\x53\x4f\xb9\xa5\xe4\x95\x6d\x92\x68\x69\xf6\xe3\x4d\xb9\x70\x95\x3c\xc2\xeb\xf3\x2d\xa9\x58\x9a\xcf\x7c\x82\x02\xbf\x76\x29\x3e\x75\xac\x79\x75\x2a\x57\xb9\xf6\x34\x7a\xe8\x9b\x64\x8f\x9f\x5e\x05\x29\x68\x1c\x8d\x5c\x7b\x42\x3c\x75\xcc\xe1\xca\x4e\x9e\x90\x33\x22\x87\x8e\x35\x9f\x3a\x03\x36\xb5\xa9\x15\xb2\xa9\x82\x0e\x4c\xb0\xe3\xc2\x3d\x4d\x60\xed\x0d\x24\x74\x4e\x93\x4d\xd1\x8d\xf3\xab\x68\x32\xe5\x1a\x26\xc6\xab\x34\x79\xc9\xc3\x14\xf3\x8a\x0b\xf3\xb5\x6d\x29\xe1\x32\xa2\x91\x2f\xcc\xeb\xc0\x02\x65\xc7\x4f\x7a\x37\x4b\x94\xda\x17\x94\x12\xeb\xaa\x97\x2a\x65\xab\x93\x17\x78\x7d\x3e\xe6\xb0\x36\x4b\x8d\x67\x66\xe7\x1b\x67\xa6\x94\xfb\xcc\xdb\xeb\x3b\xd5\x7f\x54\xcf\x28\x19\xdb\x47\x0d\x9b\x87\xc1\xac\x51\xdb\xf9\xad\x0e\x32\x39\xf2\x12\x3e\xf2\x04\x10\x2f\x2d\x84\xe0\x36\x4f\xa1\xe0\xaf\x5d\x01\xac\xdd\x5d\xb7\xde\x7e\x7d\x89\x37\x10\x43\xdb\x52\x09\xc7\x24\xb7\x3e\xc7\xe6\x5e\xc4\x7e\x9f\x6b\xf7\x95\x6c\x11\xdf\x45\x41\xad\xf7\xfe\x7d\x66\x82\xca\x36\xa5\xc2\xb1\xda\x35\xd2\x30\x59\x91\xae\x46\x36\x2e\xf6\x1f\x3a\x2c\x21\x05\x1a\x3f\x01\xcd\xcc\x92\x32\xdf\x8c\x4b\x6f\xc0\x12\xd8\x9f\xd9\xe6\xb6\x84\x29\x5b\xc2\xe6\xdc\xfe\xe4\xc2\x36\xc9\x95\x1f\xb1\xab\x20\x95\x91\xdd\x60\x5d\xe1\x4b\xdb\xb2\x43\xdb\xca\x77\x8e\x39\x2f\x3d\x93\xae\x5c\x4b\xad\x61\x3b\xe6\xa0\x4f\xe1\x5e\x7f\x3d\x53\xdd\x39\xdc\xe1\xde\xce\x31\x65\x12\x26\x88\x08\x34\x36\xf6\x28\x79\xe3\x58\x62\xd8\xfa\xe8\x54\xde\x79\x1c\xb9\x75\x2c\xa9\x10\x27\x67\x7c\xe7\x98\x44\x8a\xd8\x08\x26\x60\xe5\x62\x5f\x21\xa0\x44\xe4\x4b\x4f\xe4\xd4\xef\x07\xbe\x4c\x88\x70\x66\x3e\x11\xe2\xc8\x08\x2d\x40\x7c\x17\x27\x6a\xe6\x98\xf7\x21\xa4\x9e\x90\x63\x31\xe1\x5c\x67\xbe\x8b\x02\x28\x20\x65\x84\x8e\xb5\xc2\x30\x9f\x16\x0d\x9b\xb8\x4a\x1e\x7b\xd4\x10\x71\x61\xef\xf3\x4c\x23\x94\xb8\x55\xf7\xb7\xc5\xaf\xc4\xc9\x36\x74\x06\x12\x82\x1c\x1b\xd8\xa6\xd4\xf8\x1c\x91\x73\x89\xbc\xf2\x39\xe6\x3f\x67\x14\x20\xec\x86\xde\x05\x96\xbc\xf6\xb1\x7f\xe2\x1e\xb6\x73\x8d\x8c\x5d\x4b\x6e\xf5\x76\xd6\xe9\x31\xf6\xb5\x95\x13\x31\x0f\x4b\xce\xe7\x75\xc2\x4e\xb9\xa4\x44\x30\x19\x22\x8f\x1b\x76\xb6\x34\x60\x57\xfe\x44\x45\x30\xb9\x0f\x97\x1a\xfb\x30\xa3\xa4\xdc\x8b\x68\x02\xa6\x00\x71\x31\x58\xaa\x61\xbe\xc7\x81\x7d\x32\xfe\x9b\x70\x4c\x62\xca\x25\x38\x3e\x81\x01\x96\xd5\xcc\x94\x4b\x1c\x6b\x67\x26\x1f\x3b\x1c\xb9\xf5\x06\x12\xb1\xd4\xc3\xed\x6c\xa0\xae\x21\xbe\x6e\xb1\x08\x26\xe4\xca\xc3\x71\x91\xc9\x13\xd7\x92\x90\x4f\xf1\x85\xc7\x91\x6b\xcf\x54\x30\xdc\x95\x23\x28\x5d\x7c\x1a\x11\x84\x3c\x9a\xd7\xbe\x20\x6f\x45\x8e\x28\xa1\x00\x1a\xcf\xe4\xab\x3e\x47\x38\xb1\xb7\x17\xf6\x35\x38\x97\xdd\x70\xed\x51\xc3\x44\x1c\x6d\x1f\x24\x02\x2c\xd5\x08\x4e\x4d\x02\x2d\x0c\x1e\x18\x96\x92\x49\x7a\xc2\x97\x8e\xc6\xf6\x6b\x22\x63\x2e\x44\x86\x6d\xc2\xd0\x4d\x68\x12\x26\xc3\x95\x27\x28\x53\x0e\xc8\x43\x38\x50\x91\x67\xaa\x8f\x6a\x82\x0a\x5f\x00\x8d\xc8\xd3\x23\x9d\x20\xe5\xa5\xc9\x37\xde\x36\x9b\x9a\x84\x23\xe9\xbc\xca\x1b\x88\x98\x72\xc6\x70\xe5\x99\x46\xe8\x99\x74\xec\x9a\xce\x90\x0b\x91\x6c\x5b\xea\xda\xe5\xd8\xff\xf1\x06\xa0\xf1\x12\xbe\x70\x98\x4c\x32\x12\x50\x7a\x03\x07\x59\x03\x3f\xf7\x04\x75\x6d\x5b\x52\x2c\xf2\x0f\x53\x0e\x48\xc8\x33\x69\xca\xd1\x58\x43\x33\x48\xde\x20\x55\x56\x07\xcc\x94\x43\xe5\x12\x00\x55\x01\x40\xf5\xb9\x10\x2d\xb0\x8f\x10\x05\xa7\x86\xa9\xbf\x82\x89\x32\xe5\x8c\x3e\x27\xe0\xb2\x29\x07\xba\xb8\x39\x5f\xc7\xcd\x7c\xc7\xec\x6d\x19\x05\x93\xee\xba\x02\x72\x60\xc4\xe0\x51\x21\x9e\x96\xc0\x50\x1f\xf5\x98\x97\x55\x9d\x88\xfb\xeb\x23\x7d\x4c\x03\x75\x3c\x64\x75\xc3\x98\xda\x56\x16\x7a\x02\x9d\xe2\xb8\x39\x33\xe3\x56\x56\x9e\xf0\x54\xfb\x0d\x59\x79\x0d\xb9\xf3\x4d\x69\x67\x0f\xa4\x1c\x4e\xd4\xdc\xa3\x86\xbb\x36\x67\xe2\xd5\xcc\xd1\xd8\xd2\x36\x87\x2b\x87\x02\xb1\x28\xa0\x18\xcb\xc4\xb5\xd4\x21\xb6\x0d\x2c\x43\x5f\x58\x21\x91\x77\x64\x71\x22\x91\x5e\x3a\x0f\x6d\x6b\x1e\x7a\xd4\xb0\x08\x34\x36\xc3\x7f\xdb\x1a\xbb\x73\xcd\x61\xea\x09\x46\x08\x13\xba\x70\x4c\x7e\x07\x29\x94\x7a\x09\x89\xe5\x5d\x71\xd1\x3c\x04\xc2\x2a\x87\x0d\xce\x5b\x9e\x90\x28\x9c\xdc\x0f\x5d\x6b\x1e\x2a\x86\xac\x19\x13\x50\x78\x1c\x3b\x70\x05\x84\x65\x80\xc7\x57\xde\x40\x09\xbd\x04\xac\x1d\x4b\xde\xd9\x96\x94\x7b\x93\x38\x84\x13\x09\x41\xaa\xcd\xb5\xb0\x8f\x23\x5c\xc1\x98\x8a\x0d\x7b\xb6\xbe\xfb\xd0\xa7\x8c\x14\x26\x46\xe8\x08\x60\xeb\x09\xf4\x10\xd3\x28\x0a\xa8\xf2\x85\x87\x4a\xe4\x58\xc9\xe9\xe6\x86\xae\xa9\x62\xdf\x9b\xfb\x93\x38\x74\x04\x1a\xed\xf1\xba\x02\x8f\x65\xbb\xf2\xd2\x18\xc3\xbf\x40\xbb\xba\x13\x27\xf2\x1a\xfb\x25\x87\x63\xeb\x9e\x96\x10\x0e\x40\x04\x29\xb9\x71\xad\xce\x6f\x8a\x13\x1f\x49\x03\x09\x89\x82\x8a\xa0\xf0\x54\x07\x26\xaa\x9c\x06\xfb\x4f\xb5\x9e\x71\x6c\xee\xe3\x35\x9b\x4f\x05\xe6\x93\x97\xd0\x84\x38\x91\x33\x6f\x00\x43\xbc\x86\x33\x99\x16\xe2\xc4\xcf\x5c\x53\xce\x44\x01\xed\xf0\xdf\x81\xd6\xe1\xb4\x13\x50\xb4\x73\x04\x54\x39\x5a\xcb\xbb\xb5\x47\x91\x25\x96\xa5\x63\x76\x32\xc0\xb0\x6c\x13\xd3\xdb\xf2\x6e\xe7\x58\x4a\x38\xb3\xe6\xa1\x18\xa9\x3a\x30\x24\x5d\x21\x25\xc9\xe0\x55\x49\x27\x86\x92\xca\x89\x53\x2e\xdd\xdb\x93\x4c\x78\x1c\xbb\xf3\x05\x1e\xe7\xa4\x7b\x7f\x5c\x75\x71\x02\xad\x1d\x25\x5f\x05\x11\xbb\xb2\x07\xe2\x33\xad\x1a\xd9\xc6\x46\x91\x13\xe3\xe9\x44\x46\xd8\xe7\x38\x1a\x93\xc2\xc1\xb6\x9a\x46\xf4\xc3\x23\x07\x71\x6c\x7d\x10\x23\x95\x55\x48\x49\x53\x81\xbc\x04\x40\x1a\xa9\x06\x1a\x8b\x51\x1c\x71\x61\x2e\xab\x63\x52\xd2\x0d\x7e\xa2\xea\x44\x24\x4d\x4a\xcd\x18\x23\x59\x31\x7c\x5e\xd2\x8c\x7a\x9a\x12\xb5\x34\x29\x9f\xc7\x68\xf3\xf5\x34\xa2\x69\x31\xcc\x0f\xf1\x02\xb6\xfa\xc4\x44\xd2\x98\x1c\xeb\x46\x37\xf7\xe2\x7d\x5e\x52\x34\x83\x64\x55\xc2\xc0\x78\xf1\x5a\x0a\x8f\xf2\x6b\xdf\x52\xa6\x97\x7c\x38\xf6\x53\xae\x09\x6b\xcf\x54\x4b\xec\xaf\x5a\x9b\x8d\x31\x0e\x47\xd3\x89\xa1\xa1\x1a\xc3\x31\x00\xd2\x7c\xa9\xc1\x58\x37\x54\x59\x45\xd2\x52\x47\x2a\xaf\xc7\x2a\x30\xe2\x6d\xda\x8d\xed\xe8\xd9\x8f\xe9\xf1\x77\xf7\x0e\xbc\x00\x0b\x10\x23\x0c\x53\x56\x8d\xa1\xd1\x8e\x1d\xd3\x23\x05\x60\x19\xdd\x9f\x8e\x05\xd2\x52\x1f\x1b\xb4\x43\xf1\x84\x63\xf9\xab\x40\x69\xf9\xa7\xeb\x24\xd0\x14\x02\x68\x3a\xf1\xc4\x6b\xc6\x50\x07\x63\x7e\xa1\x10\x06\x2d\x35\x4a\xe6\x0a\xf4\xce\x17\x5a\x1f\x98\x4b\xdb\x1c\xc7\x9c\x29\x97\x90\x1b\x47\x40\x8d\xc8\xd1\x94\x6d\x89\x38\x1f\x1d\x5c\xe0\x41\xeb\x83\x67\x83\x5e\x3f\x34\xba\x74\x04\xe2\x59\x1f\xb4\xa3\x18\x86\x4a\xe0\x75\xb9\x12\xeb\xa4\x2a\x82\x6b\xf2\x92\xfe\x68\x8e\xc5\x93\x38\xd6\xc0\xdd\xa5\xfb\xd9\xd4\x24\x65\x04\x53\x27\xb7\x29\xe0\x73\x31\xa8\xfc\x04\x35\x1e\x35\x6c\xf5\x1b\xfb\x7a\x47\x27\xfa\xdc\x59\x1e\x42\x4a\xae\xbd\xc4\xc9\x9d\xa6\xa5\x6b\xaa\x5a\x2b\x64\x93\x32\x61\x5b\x12\x61\x4c\x24\xb4\xd4\x88\xda\xb7\xe4\xa6\x8f\x9f\xcf\xfa\x90\xe0\xb8\x6e\x87\x92\x71\x7f\x32\x67\xa9\xd1\x24\x1c\x88\xb5\x9d\xa0\x6a\x46\xa9\xb5\x4d\x95\x08\x46\x6c\xe3\x9b\xf7\xa1\xc5\x64\x21\x8e\x7d\x98\x0e\x98\xa2\xad\x47\xa1\xca\xe7\x98\x0e\xfe\x40\x8e\x5c\xf3\xbe\xd6\x4d\x40\xc0\x04\xad\x21\xd1\xd5\xba\x90\x42\xd8\x57\x40\x2e\x62\xb0\x9d\x64\xde\x40\x26\xbc\x36\xbe\x31\x48\xe3\x0e\xd7\x7b\xfb\x01\x0b\x91\xdf\x4e\xc5\x36\x36\x4b\xe5\xfe\x6f\x4f\x63\x1a\xa7\xe9\xc7\x5a\x76\xd8\xcb\xa6\x93\x83\x00\xca\x45\x1f\x77\xb1\x8e\xce\x52\x16\x79\xda\xd1\xd8\x81\xb3\x82\xf8\xfe\x04\x7e\x3b\xfe\x3b\xe8\x69\x3a\xb1\x97\x1e\x9f\x82\x65\x31\x76\x72\xec\xe3\xcf\x71\xed\xc7\xa8\xb8\xa6\x4e\x55\xde\x4b\x54\x00\x93\x6d\x0f\xe7\xd4\x26\xf6\x63\x75\x81\x4e\x1d\x0a\x34\xe2\x58\xae\xbd\x54\xae\x31\xdc\x8e\x97\xe4\xdc\xa3\xfc\xb9\x63\x39\xc8\xe3\x58\xcb\xb6\xa4\xaa\xf5\xdb\xfd\x3c\x63\x22\xe5\x7e\xc2\x13\x8e\x51\xa2\xc0\x78\x9d\x1e\x83\xc2\xb9\x81\x8c\x54\x73\x48\xb9\x96\x54\xef\xe3\x4d\x4b\xd7\x15\x1b\x3b\x9f\x6b\xe0\x78\xa6\x31\xad\x8d\x1a\x04\x0d\x8c\x58\xe6\x8d\x98\x9e\xab\x7b\x7e\x02\xb9\xf6\x2d\x69\xed\x00\xa9\xf6\x04\x20\x75\x39\xf1\x70\x7d\x71\x8e\x81\x16\x6d\x8e\x10\xcb\x3c\x17\x83\x7b\xc7\x94\x75\x5f\xa0\xb7\x57\xf4\x6b\x87\xef\x61\xf9\x73\x48\x42\x70\xa0\xae\x60\xaa\xd0\xb6\xf9\x34\xb0\x2d\xb4\x7b\xbe\xa6\xea\x8e\x39\xa7\xe7\x4a\xae\xb7\xfe\x79\xfc\x94\x7b\x26\x22\x34\xec\xf7\x2d\x89\xb2\xcd\x2d\x3d\x67\x70\xde\x83\x2a\x3c\xde\x13\x9e\x7c\x0e\xf9\x2b\x2f\x55\x91\x33\x96\x86\x4b\x93\x24\xbd\x89\x9a\xcf\x2c\xb0\xc3\x36\x87\x61\x38\x14\x20\xba\x7c\xf2\xbc\x2e\x3d\xb1\xad\x15\x4c\x1e\xca\x83\xbf\xc1\x36\x3f\x6e\x73\x50\xec\xcf\x1f\x8d\x18\xec\x7d\xcf\xa3\x66\x28\xb4\xd4\xbc\xf4\xf3\xd2\x36\x1f\x2b\x40\x65\x95\x98\xd7\x55\x40\xb3\x0a\x21\x2f\x81\x31\x34\x2c\xa2\xcd\x67\xf0\x9c\xe7\xfb\x06\x3f\x52\x08\x1a\xe8\x48\x5d\x28\x06\xc9\x4b\xdb\x9c\x35\x11\xd0\x54\x40\x8f\x15\xa4\x2e\x35\x02\x2c\x2c\x42\x9e\x6b\x58\x9e\x3c\x2d\xa9\x23\x22\x95\xc6\x2a\x07\xc6\xf4\x4c\x35\x86\x23\x7d\x8c\x78\x1d\xa9\x92\xca\xc1\xa9\x3a\x96\x0c\x9d\x28\x79\x1d\x61\xd9\x6c\x3b\x3c\x3d\x0c\xac\xa3\xd2\xb6\xf5\x9d\x8f\x8a\x21\x8f\x74\x12\x2c\xc0\x88\x68\x63\x04\x96\xe7\xe1\x1a\x27\x4e\xf1\xf7\x8e\x56\xa0\x1b\x63\x7e\xa4\xea\xe4\xc4\xb6\x54\x9c\x2f\x0f\xb9\xd8\x67\xc1\x18\x58\x0a\x40\x8f\xea\x98\x96\x15\x2c\xfb\x1d\xa6\xe9\x70\x7d\xac\x13\x24\x8b\x7d\xb2\xb4\xcd\x27\x0a\x50\x79\x40\xf0\x8e\x45\x38\xbc\x02\x70\x9c\x00\x3a\xf6\xf7\x2a\xc1\x1b\x2a\xf0\x59\xd3\x78\xbe\x2e\x6d\xf3\x43\x2c\xc4\x63\x0c\xc4\xf6\x31\x10\x5e\xac\x75\xe1\x00\xed\x7c\x01\x94\x0e\xb7\xf7\xdd\x44\x6d\x5b\x52\x3d\x33\xfd\x95\x2f\x80\x81\x6d\xc5\xff\x8a\xbe\xfb\x54\xbf\x94\x9c\x0f\x04\xb0\x36\x3a\x9d\x57\x60\x62\xd0\xb3\x2b\x7e\xb9\xef\xb3\x21\x3d\x3c\x9e\xa3\xd0\xb3\x01\xd8\xc1\x88\x6e\x7d\xb2\x23\xd0\x6b\x17\xfb\x1f\x9c\xd3\xfe\xa4\x2f\x76\x34\xf6\x92\xce\xef\xfd\x19\x12\xc7\x2a\x0b\xc6\x3c\xa7\x00\x99\xb7\x88\x83\x1e\x77\xb9\xfc\xde\x8f\x98\xc6\x21\xd7\x3f\xd2\xcf\xbd\xde\x9e\xc3\xdb\xeb\xf5\xe3\xb1\x4f\x75\x34\xf6\x48\x8f\xd4\xa5\x6e\xf0\x92\x8e\xe6\x47\xf3\x8e\x74\x32\x06\x2c\xe0\xb1\xfd\x1c\xdf\xc7\x7e\xf1\xd9\x0e\x8f\xe0\xb6\xf6\xd0\xd2\x0b\x64\x45\x31\x64\xfe\x72\x1c\x62\xc2\xc5\x88\xf9\xb6\x18\x31\xf7\x73\xee\xf9\x9a\x3c\x52\x76\x8b\x75\x88\x6b\xc5\x3d\x2e\x4a\xe4\x68\x1c\x3f\x8f\x75\xf3\x50\x73\x2d\xa2\xeb\xf7\xf6\x78\xfd\x43\x1f\x15\x10\xfe\x2e\x3b\xfa\x3b\xff\xd6\x8f\x39\xb2\x8b\x8e\x96\x0b\xba\x54\xc3\xd1\x95\x5c\x00\xdf\xc3\x78\x38\x66\x27\xaf\x99\xde\x0f\xab\xb9\x67\x82\xda\xb7\xd4\xd6\x6f\x1b\x14\x58\x2f\xf5\x39\x35\x1f\x31\x53\x23\x01\x5d\x7e\x3c\x22\x57\xde\xc4\x5f\x05\xd6\xfc\xf9\x1a\x2f\x23\x7b\x47\x3c\x71\xa8\xcb\x27\x74\x01\x95\xae\xa5\x4a\xd8\x07\xc3\xd4\x59\x79\x23\x62\xcb\x85\xb9\xa7\x99\x43\x3c\xbe\xf0\x78\x62\x0a\x28\xbe\xf2\x05\x10\x2b\x69\x4c\x7b\x16\x28\x7c\x21\x2e\x7d\x4b\x46\x30\x1a\x62\x18\xa9\x63\x29\x07\x5b\x77\x05\xb9\xf6\x22\xa6\x34\xa2\x43\x7d\xfc\xe8\xeb\x79\x6d\x27\x39\xb2\x07\xea\x23\x5c\xe7\x94\x6d\x49\x8f\x38\x37\xb7\x06\xca\x37\xb8\x63\xf6\x39\x40\x0d\x27\xca\x69\x5f\x97\x7a\xca\xbd\x14\xdc\x7b\x6d\xbf\xa7\xad\xd7\x8f\x72\x42\x72\xbb\xcf\x0b\x3d\x01\x45\xb3\x81\xbf\xea\xc7\xd4\xfa\x31\x8c\xb8\xef\x3f\xc6\x25\x0a\x80\xb3\xf2\xcd\x27\x62\x86\x64\xc2\x1b\x48\x68\xdf\xa7\xc2\xf2\xc2\xb5\x98\xc8\x49\xa5\x33\x91\x63\x31\x62\xa2\x36\x9f\x5e\x17\xa1\x23\x3c\xbc\x8c\x3b\x6d\x1d\xa2\x1c\x74\x02\xdb\xbb\x4f\x0d\x0b\x8f\xe2\xe3\x99\xa6\x62\x9a\x4a\x7b\x20\x21\x47\x00\x95\x2f\xa0\x95\x37\x99\x9f\xfa\xb4\x2e\x36\x79\xc0\x1c\xe6\x3e\x4f\x4c\x55\x01\x1c\xd7\x8a\xb4\x21\x80\xc6\x6d\xfb\x00\xf3\x50\x9a\x60\xde\xb0\x6b\x98\x80\xae\x16\x36\xf9\x02\xd7\xae\x8e\xe9\x34\x8e\x25\x67\x58\x56\xc6\x1e\x36\x20\xa6\x60\x82\xb6\x8e\x4e\xd6\x5e\x02\x76\xae\x40\x13\xcf\xb1\x1d\xeb\xc3\x5e\xbf\x9e\x6a\x9b\xe2\x8b\x83\x9e\x0d\xfc\xca\x13\xe8\x95\xc3\x91\x67\x78\x86\x3b\x97\x63\xe2\x76\x4f\xe3\x5a\x0c\xfe\x00\x2f\x7c\x01\x95\x58\xaf\x97\xa3\xa7\x36\xcf\x37\x49\x50\xb9\x96\xea\x73\xb1\x8a\x8e\x6b\xd5\x25\x60\x11\x4c\x50\xed\x08\x68\x2d\x72\x2d\xac\xf0\x0c\x56\x08\x13\x90\x40\xbc\x56\x9c\x57\x90\xad\x4d\x34\x96\x92\x2f\xbd\xb8\xdd\xf3\xe9\xf2\x11\x81\xcc\xbd\x10\x5f\x93\x57\x9e\x00\x2a\x47\xe0\x9b\xa5\xce\xd4\x73\x5d\xfc\x36\x1f\x31\xf8\xff\x53\xc5\x94\xd7\xbe\x25\xad\xec\x01\x6a\xe7\xc8\xd6\x7c\xca\xa1\x52\xda\xf7\x15\x2d\x25\xb7\xec\xb6\x7e\x55\xb9\x40\x27\x09\xd7\x24\x11\x4c\xe7\x67\x39\xca\xa1\x2f\xb7\x8f\x79\x6d\x4c\xf0\x04\x7a\x6d\x9b\xdb\x43\x6c\x38\xf0\xc7\x94\x1b\xa7\xaf\xf7\x6d\xf3\x69\x37\x4b\xe5\xec\x50\xc3\x45\xf3\x55\x2f\x9f\xc8\xc6\x72\x4e\x65\xe4\x63\xff\x67\x19\x53\x2e\x96\x97\x3a\x60\x97\x7a\xdc\xe5\x03\x16\xa7\x3c\x89\x61\x8e\x6c\x6a\x55\x8b\x9c\x34\x7e\x96\x27\x0b\xa5\xf1\xd1\x58\xee\x5c\x8f\xc4\x29\x87\x6b\x41\xc0\x3e\xaa\x63\xa4\x2d\xc1\x36\x9e\x0a\xe4\xa6\xdd\x47\xc0\xb8\xb8\x78\xea\x58\xab\xb6\x2f\x27\x8e\x79\x17\x00\x89\xb7\x08\x79\xa9\xc7\x8e\xa4\x92\xf4\x58\x03\x22\x6d\x71\x4a\xe6\x99\x65\xdb\xcf\x14\x39\x32\x9e\x2a\x87\x9a\x35\xc7\x7e\xc0\x6d\x7b\xbc\x7c\xe1\x09\xf4\x60\x66\x0e\xeb\x99\x25\x93\x76\x2a\xaf\x61\x82\xb6\xbe\x80\x6a\x2f\x9d\x4f\xb9\x54\x6d\x6c\x8b\x09\xc5\x44\x2e\x1c\x93\xaf\x7c\x8b\x89\xc4\x31\xb0\x35\xd0\xd6\xbc\xfb\x6b\xd9\x54\x63\xbf\x63\xdf\xb9\xef\x43\x78\x03\x40\xe0\xef\x26\x59\x84\x22\xda\xc6\x60\x0c\x64\x83\x6f\xe9\x8a\xc4\x11\xf9\x9f\x22\x47\x13\x9e\xc5\xd4\xb3\xa8\x08\x2d\xd0\xed\x9d\xb4\x75\x95\x25\x25\x22\x7f\x32\x7e\x2a\x72\xed\x3e\x4a\x64\x71\xea\x3e\x97\x1b\xe9\xc4\x50\xd0\x0c\xbf\xe5\x8d\x18\x31\xf4\x23\xf6\xf3\x02\xb9\x9d\x35\xf7\x1b\x91\x27\x7d\x91\xb3\x13\x71\x22\x95\x22\x47\x36\x4e\x84\x79\x7d\x91\x47\xd3\x47\x25\x9b\xba\xa6\x1d\xf6\xf0\x47\x3a\x41\x2a\x3a\x31\xe4\x75\xa4\x44\xe2\x88\x08\xc5\x84\x8c\x3d\x4d\x0c\x2d\xbd\x68\xfb\x5b\x5e\x98\x85\xe2\x58\x06\x06\x92\xda\x78\x8b\x6b\x66\xc0\xd3\x82\x66\x3c\xf1\x4b\x4d\xec\x6c\x8f\xc2\x73\x86\x5b\xc7\x24\x22\x2e\x01\xc5\x5b\xf0\x21\xa5\x44\x22\x7f\xe8\x31\xe3\xf5\x8e\x00\x90\x34\xb5\xcd\x71\x65\xde\x40\xea\xa3\x1a\xa3\xb9\xaa\x13\x51\xef\x8f\x0b\xd7\x14\x6b\x9f\xe2\x57\x4e\xdb\xdf\xa2\xe5\xe3\xbd\x38\xa5\xeb\x95\x57\x1a\x05\x86\x20\xe1\x49\x6f\xa2\x54\x6d\x0c\x4b\x8c\xba\xeb\x9f\x3f\xd3\x06\x29\x03\xe3\xeb\xf4\x73\x8c\x2a\x3f\xe1\x0b\xd7\x54\x42\xdf\x92\x57\x0e\x65\xb4\x72\xd9\xf7\xb7\xb9\xc4\xc9\xb9\x30\xd7\x55\x43\xd6\x54\x70\xe8\x31\x46\xce\xc0\x1f\xce\xc0\x36\x0e\x88\x23\x7d\x9e\x10\x11\x17\x0f\x79\x40\xd2\x23\x15\x48\x86\x45\x38\x92\x3e\x36\xe8\x33\x5d\xa8\xcf\x78\xd2\xd5\x65\xef\xd9\xa3\xe8\x75\xcd\xa5\xc0\xd0\xef\xd6\x18\x42\x0a\x60\x1b\x22\xf0\xfd\xbd\x1d\xb7\x35\x37\xce\x1d\x35\xac\x03\x2f\xe8\x69\xf7\x24\x1d\x8e\x89\x9e\xf7\x23\x1f\x62\x8d\x00\x0e\x88\x79\xa0\xf3\xaa\x62\xc4\x40\xd0\x40\x58\xb6\x6b\x8c\xd4\x99\x0a\x90\xa9\x00\x30\x07\x63\x75\x61\x90\x32\x50\x63\x47\x32\x39\x7a\xe7\x60\x7f\x61\xa9\xbb\x19\xb9\x8d\x0d\x02\x8c\x8c\x18\xec\xeb\x93\xa8\xd5\x45\x41\x6a\x1c\x93\xdf\x60\xba\x5b\x7d\x20\x8b\xd0\xe2\xd4\xb6\x4f\xef\x59\x2c\xf1\xb3\xfb\x82\xbd\x8e\x95\x9d\xae\xbf\x5c\xe7\x89\xfe\xb6\xfb\x1d\x9d\x5d\x2a\x04\x2d\x1b\x63\x7a\xd1\xd6\xb4\x11\x43\x3f\xc7\x59\xf6\x40\x07\xa6\x59\xe4\xd8\xb5\x2b\xd0\x03\xaf\xcb\x1d\x0a\x8f\x82\xdf\x70\x5c\xc1\xb6\x8e\xed\x55\x8f\x81\x75\xa2\xab\x0a\xc6\xb3\xd7\x2f\x26\x14\xfb\xde\x1e\xf6\x39\x5e\x62\x84\xd2\x6e\x5c\xcc\x08\xb0\x50\x39\xb6\xa5\x53\x33\x1c\x49\xc1\x35\x8c\xf6\x10\x42\x0e\x62\xde\x2c\x54\xe0\x9f\xd8\x57\x9b\x9b\x25\xf1\xb4\xa3\x85\xac\x1d\x8e\xd9\xb6\x39\xd7\x55\xfc\x24\x75\xf1\x5e\xab\x0b\x97\x6d\xf8\xa5\x4d\x38\x2b\xd7\x7c\x42\x0e\xc7\x12\xde\x51\x6c\x17\x05\x5c\xab\xa8\xb9\x93\xa0\x35\xae\xe5\xda\xbe\x7d\x74\xee\xd3\xdb\x7d\x26\x14\x08\x88\x10\x47\xe3\x29\xa6\xfd\x39\x17\xc3\xf4\xb3\xe4\x74\x10\x86\x7d\xbd\x51\x78\x94\xbc\xf2\xb8\x43\xdd\x31\xf0\x92\xa7\xda\x36\x95\x0b\xf9\x4a\x78\xde\x73\xed\xf7\xfe\x8c\xf0\x78\x5e\x67\xeb\x2f\xf2\x86\x2e\x3e\x76\x7b\x40\xd8\xbe\xda\x7d\xa0\xfd\xde\xe8\xb1\xed\xcc\x4c\xec\xc7\xae\xcd\xcf\xa6\x1f\xa0\x3b\x6c\x69\x62\xb2\x0f\xe4\x3a\x6c\xdb\xa3\xd3\x0d\x55\x57\xc7\xad\x2c\x25\x35\x46\x23\x5c\xc7\x18\x04\xbf\x58\x6a\x4a\xf6\xbc\xef\xc7\xde\xcb\x3a\x33\x14\x39\x12\xc7\xb9\xe3\xe7\x28\x06\xf6\x7e\x2f\x89\x20\x73\x7b\x20\xd5\x90\xa2\x13\x9f\x1b\xb2\xdd\x7e\xe2\x70\xe6\x58\xc8\xb4\x71\x1e\xcd\x0d\x71\xee\xde\x38\x1a\xdd\xda\x43\xd7\x1b\xc3\xba\x4b\xd7\x7e\xdb\x0f\x97\xc9\x2e\x6f\x56\xc2\x47\x8e\xdd\x39\xa6\xd2\xed\xf5\x0d\xe6\xf5\x2c\xca\x47\xfa\x9a\xa8\x67\x0d\xc4\xfe\xb2\x74\x29\x35\x87\x11\xf3\x4a\x5e\xd6\xc6\x80\xa3\x3c\x74\x18\xbf\xab\x97\xda\xd2\xb5\xaf\x99\x8d\xca\xe1\xe8\xda\x4f\x40\x03\x13\x14\x3b\xda\x85\x7d\xb2\x43\x2e\xc7\x27\xbe\x00\x9a\xa5\x39\x44\xfe\xc4\xaf\x61\x52\x94\x1e\x35\x2c\x5c\x73\x88\x66\xa9\xba\x82\x89\x8f\x7c\x66\xdf\xe7\x3d\xa1\x23\x76\xcc\x3e\x4f\xef\x68\x3e\xe4\x89\xad\x5c\x78\xba\xed\x1f\x5b\x24\xab\xa9\x86\x23\x99\x38\xa7\xc5\xbc\x1f\xe0\xda\xb9\xa7\x97\x52\x91\xd3\xf9\xeb\xfd\x9c\x65\xbf\x97\xa0\x2f\x35\xb1\x54\x70\x4d\xce\x31\x65\x67\x4b\xe4\xd8\x20\xd4\xc7\xb6\x27\x06\xe8\xbd\x2f\x8e\x8e\xf7\xbd\x74\x83\x5e\x68\x40\x5d\x1a\x31\x5a\xa8\x24\x3d\x51\x48\xf9\x51\x35\x86\xa6\x06\xa4\xa5\x1e\x93\x38\xa6\xd0\x52\xa3\xca\x2a\x2f\x8f\xcf\xae\xa7\x5c\x02\xee\xfb\xe7\x67\x64\x9d\x18\x4a\x60\x4c\x6b\x9a\x31\x9c\x58\x84\x3f\x32\xc8\xe7\xfa\xb6\xef\x9d\xb4\x30\x9e\xfb\x3a\xf0\x28\xff\x22\x97\x7a\x8c\x0c\x9d\x94\x24\x3d\xf6\x1f\x55\x42\xd6\x2d\x52\xe2\x55\x02\x2d\xf5\x35\x91\x4a\xe3\xa7\x25\xf6\x6d\x9a\x41\x2f\xa4\xed\xeb\xb4\x2b\x00\x18\x1a\x4f\x4b\x2a\x4f\x1b\x26\x60\xf9\xa5\xc1\x93\xbe\xb0\x7a\xd6\x4b\xe6\x9d\xf3\xdb\xbe\x56\xcb\xbf\x53\xff\x0a\x64\x56\x8f\xde\xa0\xfd\xa4\x57\x45\xa4\x7d\x4f\x4b\xef\xf6\x0f\xfb\xeb\xda\x75\x18\x0a\x41\x2f\xd4\x18\x4d\x2c\xd2\xe1\x0d\x24\x4b\x3a\x71\x8f\x65\x70\xe8\x39\xf4\x36\xdc\xe7\x61\x87\x31\xd7\xe5\x41\x02\x5d\x05\xf4\x44\x35\x86\x3c\x88\xf9\x23\xf8\xd8\x0f\xe0\x75\x19\xb4\x3f\x91\x48\x1c\x07\xaf\xd1\x04\xc6\x60\xa1\x5c\xda\x1b\xb9\x8a\xf3\xc5\x3e\xc9\x35\xbe\x77\x3a\x4f\x3c\xf7\x6c\x31\x8e\xa9\xb0\xaa\xe1\x40\x6d\xfb\x41\xd3\x43\xbf\x2d\x5b\xe3\x38\xe8\xe3\x5a\x32\x81\x1f\x93\x7f\xeb\x6b\x40\x09\x9b\x36\x1e\x65\xb6\xe5\x20\x51\x90\x57\xb8\xe6\x77\xb0\x8f\xc4\xb1\x5b\x18\xd6\x3e\x83\x71\xa9\xb5\x28\xac\x6a\x2f\xa1\x49\x18\xb1\xba\x41\x3c\x9d\xc4\x3a\x51\xe0\xab\x40\x63\x4b\x5c\x6f\xcf\x38\x36\x73\x4c\x9e\x72\xcc\xfb\xfd\x9e\xed\xc6\x4b\xe8\x01\x6c\xd8\x81\x3b\x89\xab\xb3\xda\xbe\xaf\x9d\x70\x6d\x8f\xfd\x2d\xbd\x76\x2c\x89\x80\xdb\x7c\x0d\x07\x6c\x81\xf3\x93\x99\x69\x1f\xe2\x1a\xce\x55\xe1\x40\x2e\x66\x54\x1b\x33\x77\xdd\x7f\x15\x9c\x9b\x47\x22\x67\x6c\xe7\x54\x9f\xf7\x71\x7d\x1f\x52\x28\xf3\xd9\x40\x2d\x60\xd3\xc3\x6d\xe8\xb5\xad\x91\x91\x6f\x0e\x63\x4f\x30\x2a\x1b\xcb\x97\xa3\x03\x89\x7b\x78\x12\x27\xc5\xff\x3c\x6a\xd8\x3f\xb5\xcf\x00\x56\xbe\xf9\x54\x70\x89\xfc\x08\x13\x90\xe1\xda\xec\x3a\xad\xd9\x54\x6c\xd8\x81\xa3\xb1\x45\xbb\x6e\x8d\x5d\x7b\x02\xc2\xf5\x9e\xe4\x70\x6c\xe4\x09\x7c\xe5\x36\xec\x0a\x36\xb8\x5e\x1a\xa6\xa2\xc0\xef\x44\x81\xae\xbc\x49\x1c\xce\xaf\xed\x83\x1f\xf6\x7c\x41\xdc\xf2\x7e\xe2\x94\x70\x30\xc7\x78\x72\x27\x6a\x71\xf5\xf2\x22\x71\x2e\x8e\xf3\xb1\x95\x17\x31\x4f\x33\xae\xbd\xd7\xee\xed\x8a\xc2\x10\xe1\x58\xe2\x0b\x0f\x21\x14\xe8\xad\xdf\x3e\x6f\x66\x84\x3e\x85\x08\x97\x3b\xa1\x91\x84\x2d\xfe\x36\xef\xa8\x9d\xc4\xc6\xb1\x29\x76\x34\x76\x67\x53\x7c\xe5\x25\xa8\x72\xb6\xa7\xfd\x5d\x47\x00\x49\xa7\x43\x34\x05\xa9\x71\x37\xde\x92\xd7\xb6\x79\xbf\x7f\xe6\xa2\x7b\xb6\x24\x41\xe9\xa1\xd6\x4d\x8b\x2e\x7f\xe3\xa4\x85\x6b\xca\x48\x5c\x67\xe1\x5c\x8f\x8b\x2e\x27\x64\x22\x20\xa0\xd2\x31\x69\xd2\xe7\xc4\x6f\x6d\x9e\x3a\x62\xb6\xb3\xb6\x77\xc7\x84\x62\x2c\x17\xae\x09\x2a\x7f\x8c\x62\x3c\x4f\x8c\xc4\xa3\x79\xa0\xc2\x35\xbb\x66\x2a\xd1\x22\x62\xa2\x7d\x7d\xd6\xe6\xce\x63\x7e\xa4\x91\x8e\x8e\xfd\x85\x1e\xf3\x0b\x30\x46\x63\x31\xda\xee\xe9\x50\x60\x42\xb7\xfb\xfc\xa7\xb8\xe7\xdb\x39\x77\x18\x33\xf2\x28\x12\xcb\x28\x56\x05\x50\xd8\x56\x8c\x71\x6c\x31\xee\x47\x65\x1f\xd7\x5a\xbe\xed\x1c\x4b\xa2\xda\x75\x09\xa8\x12\x05\xbe\xf1\x1a\x32\xdd\xf7\xe6\x45\x81\x27\x3d\x8a\xdc\x89\x02\xff\x0d\xd7\x16\xdd\x73\x47\x32\xb2\x29\xb2\xf6\x12\x25\xec\x9f\x45\xc2\x79\x49\x68\x53\xab\xc6\xa3\x86\x43\x9c\x27\x3a\x1c\xd9\xd7\x65\x64\xe4\x5a\xca\xa1\x5e\xd9\xc7\x7b\x7b\xa0\x16\xa2\xd0\x3f\x5b\xa5\x31\xf1\x51\xdc\x3f\xd0\xc3\x25\x6a\xed\x25\xc6\x81\x4e\x2c\xd3\x8e\x46\x69\x05\x29\xd0\xc6\xea\xa3\xe7\xa9\xda\x7e\x9a\x4f\xf1\x8d\xa3\x77\x7d\x31\x71\xbf\xb7\x78\x9a\x63\xf5\xf4\x2b\xe1\x41\xce\x1c\xb9\x13\x39\xb5\x83\xcf\x91\xb1\xc8\x33\x4f\x2a\xb3\xef\x95\x64\xd9\xe1\x79\xa7\x1d\xf3\x5d\x9c\x48\xc8\xc6\xf9\x0f\x17\x87\x52\x98\xfd\xd7\xed\xe7\xab\x27\xc2\x9e\x7f\xde\x7e\xe3\xf0\x17\xbf\x4b\xf8\xae\x93\x58\xb2\x3a\xd8\xe4\x9b\xac\x8e\xfa\xf7\xad\x1e\x5d\x54\xbc\x7e\x7e\x50\xe4\x07\x69\x79\xe9\xa7\x05\x6e\x8e\x5f\x23\x33\x8a\x60\xc3\x14\x45\x14\xa6\xc1\xc5\x93\x6c\xaa\xa3\xfb\x62\x07\xf1\xb5\xb7\xdf\xae\xbd\xdc\x3a\xef\x0e\x67\xe8\x21\x34\x5f\x2f\x83\xbd\xfd\x72\xb3\x7f\x49\xd1\xdd\x64\x77\x47\x07\xc1\x5c\x7d\x70\xfd\xf3\x9f\xed\x6b\x64\x2f\x79\xf8\xae\xb3\x95\x5e\x9c\x39\xd3\xbe\x0c\x78\xf5\xe0\x99\xf7\x9c\xc5\xcd\x75\x87\xdb\x7c\x3d\x3d\x3a\x47\x83\x2e\x0a\xb4\xe0\xe5\x99\xdc\xbf\xec\x04\x26\x92\xfa\xf1\x13\x98\x7e\xe4\x4d\xe4\x6b\x6f\x1d\x5f\x3e\x24\xe9\x0a\xe4\x2b\x07\x95\xdf\x7e\xb9\xf9\xff\x7d\xaa\xfa\xb9\x37\x78\xcf\x59\x52\x6f\x1d\xc4\xe4\x22\x94\x6d\xc1\xc9\x69\x01\x0c\x84\x41\x51\x5c\x7d\x3f\xb2\x9b\xc2\x67\x9b\xad\xbb\xf1\x03\x5f\xdf\xb8\x8f\x8f\x11\x7c\x63\xb8\xd0\x2d\x4f\xdf\xb8\x69\x11\x95\x7b\x5f\x70\xd9\x76\xd5\x20\xc9\xca\xa0\x9f\x51\xbc\x32\x76\xd3\x0e\x3c\x25\xfe\xba\x95\xbf\x7c\x91\xfd\xf4\x0d\x97\x17\xbf\xa6\xd0\xf2\xfd\xcd\xa3\x15\xb0\x8e\x6d\xf2\xee\x34\x85\x4b\xee\xfa\x7d\x46\x7d\x7c\x28\xc3\xd7\x3c\x08\xb0\x56\xdd\xf5\x60\xdf\x7f\x7a\xda\xf9\xa9\x0f\xa7\xdf\x97\x1d\xd8\x37\x8f\xd8\xbf\x76\x34\xda\x3b\xcd\xfe\x1d\x4a\x79\xd5\x95\xf5\xe7\xc5\x61\x9e\x7e\x3d\x7a\xed\xb8\x3f\xc5\xa9\x45\xf0\xf5\xf6\xcb\x4d\x58\x45\xfe\x6f\xaf\xfb\x81\x0b\x07\xc9\x9d\x9c\x05\xd3\x23\xfa\x7c\xea\x36\x2f\xfd\x14\x46\xab\x06\xf7\x3e\xfd\xdd\xa3\x1f\xbc\x3b\xd2\xbf\x7f\xbc\xbb\xff\xfe\xf0\xfd\xce\xa5\x68\xf2\x0e\x7e\xfb\xfe\x30\xb8\xf7\x29\x92\x0a\xbe\xbf\xcf\xd7\xbe\x42\xe0\xd7\x36\x58\xfa\xc1\xa6\xf8\xba\xc9\x50\xd0\xc5\x9a\x24\xb8\xf0\xab\x08\x6f\x59\x75\x01\xb3\x0e\xef\x1b\xde\xf2\xbd\x5c\xba\xe8\x2f\x31\x8d\xa3\xe0\x31\x4a\xa3\xee\x85\xa9\xc3\x2f\x4d\xec\xdf\xa1\x52\x2f\xe2\x3e\x95\xe6\x29\x90\x16\xfb\xfb\x78\x7d\x91\xa4\xfc\xe4\x17\x4c\x4e\x0f\xee\xba\x2c\xdb\xd7\xc1\xec\x0f\x58\x3a\x9f\xf9\xfa\xe1\x77\x2f\x7e\xb3\xe2\xe1\x8e\xa0\xef\x08\xf2\x2e\xdf\x04\x75\x14\x6c\x7f\x6d\xc4\x7b\xbf\x0c\xaf\x05\x8f\xbf\x75\xdf\xff\xf9\xb7\xff\x17\x00\x00\xff\xff\x00\x10\x6c\xc8\x52\x6e\x00\x00")
func gatewayProductionJsonBytes() ([]byte, error) {
return bindataRead(
_gatewayProductionJson,
"gateway-production.json",
)
}
func gatewayProductionJson() (*asset, error) {
bytes, err := gatewayProductionJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "gateway-production.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _rbacDevelopmentJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x96\x5b\x6f\xda\x30\x14\xc7\xdf\xf9\x14\x56\x36\x09\x2a\x61\x92\x98\x40\x42\xdf\xd0\xaa\x4e\x7d\xe8\x8a\xda\x6e\x2f\x88\x07\xc7\x3e\x50\x4f\x89\x6d\xd9\x0e\x55\x3b\xf1\xdd\xa7\x84\x4b\xb9\xa4\x97\x55\x55\xb5\x6a\x83\x17\x88\x8f\xff\xfe\xfb\x9c\xdf\x39\xca\xaf\x06\x42\x08\x79\x9f\x2d\xbb\x81\x9c\x7a\xc7\xc8\xbb\x71\x4e\xdb\x63\xdf\x5f\x3e\xe9\xe4\x54\xd2\x19\xe4\x20\x5d\x87\xde\x17\x06\x3a\x4c\xe5\xab\x35\xeb\x93\x20\xec\xe1\x20\xc4\x41\xe8\x73\xd0\x99\xba\x2b\xe3\xae\x21\xd7\x19\x75\xd0\xf9\x69\x95\xfc\xe4\xb5\x97\x27\x30\x25\x1d\x48\xf7\x03\x8c\x15\x4a\x96\x07\x85\x9d\xa0\xfc\xae\x03\x34\x35\x34\x07\x07\xc6\x7a\xc7\x68\x69\xab\x7a\x4e\x4d\x7e\x05\x66\x2e\x18\x8c\x8c\x90\x4c\x68\x9a\x9d\xf1\x9d\x90\x2a\xcc\xdd\x69\x28\x55\xad\x33\x42\xce\xbc\xcd\xe2\xa2\xfd\x20\x35\xd5\x6f\xa5\xc4\x61\x5e\x27\xf5\x52\xa5\xc6\x96\x9e\x67\xc0\xaa\xc2\x30\x28\xef\x3d\xde\xc4\xec\x49\x49\x9a\x57\x52\xf1\x00\x78\x14\x47\x14\xc7\xa4\x1f\xe3\x68\x3a\x4d\x70\x4a\x48\x1f\x0f\xfa\x61\x14\xa4\x10\xf4\x09\x25\x5e\xbb\xde\xc6\xb9\x60\x46\x59\x35\x75\x9d\x61\xe1\x6e\x94\x11\xf7\xd4\x09\x25\x7d\xa3\x32\x38\x81\xa9\x90\xa2\xfc\x6b\xf7\xb7\x6b\xa3\x34\x18\x27\xc0\x1e\xe4\x6a\xe9\x5f\x65\xf0\x6d\x65\x6f\x78\x79\x81\xe6\x11\x3a\x81\x39\x64\x4a\x97\x34\xa0\x53\x61\xac\x43\x23\x6a\xdc\x1d\xba\x2a\x52\xcb\x8c\xd0\xe5\x39\x7b\xc7\x2c\x8f\x02\x93\x0b\x6b\x2b\x17\xdb\xc9\xd8\xfe\x1c\x3a\xd8\x6c\xa7\xcc\x3d\xb9\x75\x13\xf8\x90\x89\xcb\x75\xf2\x7d\xbb\xe5\xcd\xfa\xeb\xa2\x7c\x35\xaa\xd0\xd6\xbf\x35\xc2\x81\xf7\xa8\xe8\xa4\x76\x65\x71\xf0\x74\x52\x73\x67\x6a\xad\x98\x49\x9a\x66\x70\xc5\x94\x86\xc7\xdd\x7b\xe3\x6d\x8b\xad\xa3\x8e\xe0\x93\x43\x4b\xbb\x56\x16\x7b\xb5\xa4\x5a\x6c\x75\x20\x09\xc2\x64\xd9\xbe\x58\x1b\x98\x0b\xb8\xad\xa5\xfd\x11\x14\xc7\xb3\x42\xf0\xd6\x81\xa7\x36\x6a\x9e\x8e\x90\x8f\x56\x28\x9c\x8e\x76\xaa\xde\x3c\x9a\xbc\x06\xcf\x61\x95\xa4\x92\xa7\x03\x3c\x39\x68\x90\xdc\x5e\xc8\xda\xc4\xed\x26\x6d\x5d\xed\x33\xde\x6a\xbe\xb0\x1b\x9a\x6d\xd4\x7c\x49\xd3\x95\x17\x6b\x3c\x51\xeb\x67\xdb\xc8\x96\xd5\xaf\xf2\x5a\x53\xe6\x1a\x6e\x76\x7d\x56\x83\xec\x5d\x2f\x5b\xd7\xbe\x3b\x63\xd5\x1b\x3f\x8c\xf4\x56\xb3\x6e\xf6\x3e\x27\x73\xbd\x22\x63\x7f\xa7\xf7\xc7\x8c\x0f\xde\x9c\xf1\xe1\xe5\x39\xf2\xd1\x77\x0b\x06\x0d\x19\x03\x6b\xd1\x90\xe7\x42\x0a\xeb\x0c\x75\xca\xbc\x3d\xe7\x1f\x85\x9f\x30\xe1\x31\x4f\x12\x8e\x79\xb7\x07\x38\x9a\xa6\x3d\x4c\x7b\xac\x8b\xe3\x38\xee\x32\x12\xd0\x98\xf0\xc1\x2b\xf8\xa9\x7d\x0d\xf8\xc8\x00\x9d\xc0\x1c\xf9\xe8\x8b\x92\xce\x88\xb4\xf8\xa7\x91\x49\x49\x34\x48\x12\xca\x70\x3f\x4c\x02\x1c\x11\x1a\x60\x9a\x26\x09\x26\xc1\x34\xee\x26\x84\x73\x12\xb1\x57\x20\x53\xfb\x92\xf6\xf1\x91\xf9\x3f\x73\xde\x6b\xe6\xfc\x9d\x00\x55\xbf\x26\x8d\x45\xe3\x77\x00\x00\x00\xff\xff\xa7\x46\xc2\xc5\xbc\x0d\x00\x00")
func rbacDevelopmentJsonBytes() ([]byte, error) {
return bindataRead(
_rbacDevelopmentJson,
"rbac-development.json",
)
}
func rbacDevelopmentJson() (*asset, error) {
bytes, err := rbacDevelopmentJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "rbac-development.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _rpDevelopmentPredeployJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x58\xdb\x6f\xdb\xb6\x17\x7e\xf7\x5f\x21\xf0\xf7\x03\xec\x0c\xbe\xc8\x5e\x0a\xac\x7e\x2b\xd0\x61\x08\x76\x69\xd0\x14\x7d\x31\x82\x80\xa6\x8e\x1d\x36\x14\x49\x90\x94\x5b\x6f\xc8\xff\x3e\x50\xb2\x7c\x91\xa8\x5b\x2b\x6f\xa9\x27\xe5\x21\xb0\x78\x2e\xe4\xe1\x77\xbe\x8f\xe2\x5f\x3d\xcf\xf3\x3c\xf4\x7f\x4d\x1e\x21\xc4\x68\xee\xa1\x47\x63\xa4\x9e\x4f\x26\xc9\x9b\x71\x88\x39\x5e\x43\x08\xdc\x8c\xf1\x9f\x91\x82\x31\x11\xe1\x6e\x4c\x4f\x66\xfe\xf4\xd5\xc8\x9f\x8e\xfc\xe9\x24\x00\xc9\xc4\xd6\xda\x7d\x80\x50\x32\x6c\x60\xfc\x49\x0b\xfe\x3f\x34\x4c\x32\x10\xc1\x0d\x70\xf3\x11\x94\xa6\x82\xdb\x44\xd3\xb1\x6f\xff\x52\x03\x89\x15\x0e\xc1\x80\xd2\x68\xee\x25\xd3\x8a\xdf\xe3\x20\xa4\xfc\xdd\xf2\x13\x10\x73\x13\x9c\x0c\xc5\xc3\x66\x2b\xc1\x46\xd3\x46\x51\xbe\x46\xfb\xc1\xe7\xe1\x21\xc4\x4a\xde\x81\xda\x50\x02\xb7\x8a\x72\x42\x25\x66\x5f\x1b\xe9\x09\xb6\x1b\x1c\x31\x73\xab\x60\x45\xbf\x54\xc6\x18\x9e\x8e\x86\xf8\xcb\x6f\xc0\xd7\xe6\x11\xcd\xbd\x99\xef\x4c\xa0\xbe\x6d\xaa\xbd\xa3\x78\x48\x81\x16\x91\x22\x60\x0b\xba\xd8\xdb\x64\x42\x49\x25\x24\x28\x43\x41\xe7\xd2\xc4\xe3\x1a\x48\xa4\xa8\xd9\xbe\x8f\x58\x26\xd0\xf1\x93\x77\xac\x9b\x20\x6b\x6b\x04\x11\xcc\xae\xed\x03\x91\x99\xfa\xe5\xe7\x16\x2f\xef\x56\x28\xf3\x1e\xf3\x75\x5c\x91\x1f\xaa\x7c\x02\xd0\x86\x72\x6c\xa8\xe0\x27\x8e\xd7\xd7\x3f\xd6\x4b\xf7\x26\x08\x14\x68\xbd\x47\x40\xa3\x94\xcd\x9d\x31\x21\xa0\x6d\xe9\xd0\x1b\xc6\xc4\xe7\x2a\x73\xa9\xa8\xb0\xdb\x85\xe6\xde\x74\xe6\x57\x4d\x8c\x2a\x20\x66\xd7\x8e\x37\x7c\x29\x22\x1e\xa0\x42\x97\xe7\xe2\x68\x88\xe3\x30\xae\xa2\x92\x0f\x94\x3f\x60\x15\xba\xc3\x14\x84\xf8\xfe\xd1\x33\x9b\x5d\x1c\x78\x5e\xfd\xe3\xe0\xd1\xfa\xf1\x81\xf2\x02\xe4\xe4\xde\xde\xf7\x4a\xe2\x1f\x01\x72\xc4\x75\x8e\x88\x53\xfe\xfc\x9d\x12\x25\xb4\x58\x99\xf1\x1f\x60\x3e\x0b\xf5\x34\xe1\xc9\xff\xbb\x1d\xeb\xfd\xa2\x44\x24\x75\xd6\x9d\x09\x82\xd3\x95\x2f\x52\x96\x8d\x4d\x07\x57\xe3\x74\xf0\x3e\xeb\x85\x25\x3d\x52\xbf\x99\x3f\xf3\x47\xfe\x4f\x23\x7f\xea\x54\x9a\x52\x96\x2e\x59\xac\x84\x0b\x5c\x6f\x5e\x95\x70\xd2\x0a\x77\x12\x13\x28\x24\x86\xd4\x2a\x69\x98\x12\xf5\x8a\x8d\xa7\x7e\x72\x1e\x99\xcc\xae\xdd\x10\xbc\xcf\xbd\x75\x80\x1a\xe9\x68\xc9\xc1\x9c\x5d\x29\x71\x96\x0b\x8e\xa7\x5f\xd1\xb9\xae\x3d\xaf\xcc\x18\x7b\xd2\xe0\x04\x02\x37\xc1\xa0\x5f\x13\x52\xfd\xa1\xd7\x4f\x7a\xb1\x7f\x95\x45\x8a\x33\x95\xc1\x6b\x5b\x06\x1e\x31\x56\x6a\x9c\x27\x86\xfd\x48\x2d\xbd\x1a\x25\x1b\xd6\x3a\xeb\x6c\x6c\xd0\xda\x6d\xb8\xa1\xca\x44\x98\xed\x7e\x9e\xbb\x01\x33\x76\x01\x48\xe0\x81\x7e\xc7\x9d\xa0\x6d\x61\xbb\x7b\xee\x0a\xbe\xbc\xf6\xbf\xb6\xfd\x33\xfb\x6e\xdb\x7f\x37\xfd\x97\xdb\xfe\x89\x3a\xb5\xcd\x00\xd5\xa7\x9a\x0d\x36\xf0\x33\x0f\xa4\xa0\xdc\xec\xe6\x79\x2b\x18\x25\x49\xad\xd1\x5b\xaa\xf1\x92\xc1\x37\x9f\x7d\xed\xfa\xce\x44\x27\x12\x62\x46\x19\xf9\xf9\xf6\xfd\xcf\xb1\xca\x01\x45\x8d\x89\x25\xad\xe8\x82\x08\x4e\xb0\x19\x1c\x2e\x1d\x06\xfd\xd3\x6f\xfb\xfe\xd5\xd0\xeb\x8f\x08\xd3\x79\xb4\x3a\x2a\xfe\x2b\x6c\x3f\x5a\xdf\x49\x1c\xa1\xa5\x4a\x57\xb1\xa0\x01\x8e\x79\x72\x29\x82\x16\x3a\x5a\x6a\xa2\xa8\xb4\x91\x06\x57\xe3\x74\xcc\xd5\x69\x48\x3f\x45\xc5\xac\xb9\xc2\x21\x65\xdb\xf8\x9b\xa1\xa0\x4b\x0f\x07\x76\x83\x79\x80\x95\xa3\x6f\x5c\xd4\x98\x7c\x8c\x1c\xf5\x5d\x63\x86\xfc\xda\x05\xef\x03\x88\xc3\x35\x12\x5a\x1c\xef\xbd\xeb\x86\xa8\x9c\xa6\x90\x04\x15\x52\x6d\xfb\xa0\x06\x5f\x6b\x20\xaa\x4c\x16\x4e\x8c\xd7\x45\xfc\x91\x3e\xf7\x15\x8c\x47\x2c\x6a\x56\x94\x60\x53\xa1\x7a\x07\x0f\x05\xd8\x40\x1d\x56\x0e\x80\x41\x3d\xcb\x75\xee\x00\xe4\x34\x8b\x64\x60\x53\x97\x2f\xb8\x98\x98\x5b\xba\x67\x38\x1f\xb4\x4e\xee\x2f\xdb\xc4\x54\xf3\x5d\xae\xb9\x23\x8c\xea\x2a\x00\x36\xdd\x8f\xbc\xf0\x39\x08\x02\xb8\x55\xe1\x3b\xb1\x32\x6f\x13\x90\xcd\x3d\xa3\x22\x28\x95\xc8\xac\x1a\x4d\x5f\x8f\xfc\xd7\x35\x3f\x32\x9b\x8a\x41\xb0\x34\x9d\x18\xe4\x4c\x2e\x4f\x0c\x5c\x77\xf0\x2f\x58\x0c\x3a\x6e\xcc\x1b\x37\x2a\xb1\x6e\x8d\x16\x5b\xd7\xe5\xd6\xd5\x96\x86\x52\xa8\x4e\x05\x8e\x7c\x9b\xaa\x80\x14\xaa\x53\x81\x9c\x49\xa7\x02\x9d\x0a\x74\x2a\x10\x3f\x9d\x0a\xec\x9e\x4b\x56\x01\xbd\x21\x9d\x0a\xe4\x4c\x3a\x15\x38\xbb\x0a\xfc\x3b\x3d\xdb\xa9\x45\xc6\xb8\xe6\x56\x74\xa2\xe2\xb2\xbc\x30\x51\xe9\x25\x33\x7e\xee\xfd\x1d\x00\x00\xff\xff\xe9\x07\x8c\x5b\x4d\x29\x00\x00")
func rpDevelopmentPredeployJsonBytes() ([]byte, error) {
return bindataRead(
_rpDevelopmentPredeployJson,
"rp-development-predeploy.json",
)
}
func rpDevelopmentPredeployJson() (*asset, error) {
bytes, err := rpDevelopmentPredeployJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "rp-development-predeploy.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _rpDevelopmentJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x58\x5b\x4f\xe3\x46\x14\x7e\xe7\x57\x58\x6e\x25\x13\x29\x93\xd8\x8e\x83\x1d\xde\xe8\xd2\xdd\x22\x2d\x6c\x1a\xd0\x3e\x14\xf1\x30\x9e\x39\x0e\x53\x9c\x99\xd1\xcc\x38\x34\x5b\xf1\xdf\x2b\x3b\x09\x89\x2f\x09\x10\x02\x12\x6d\xe1\x29\xe3\x73\x9b\xef\x7c\xe7\x62\xff\x7d\x60\x59\x96\x65\xff\xac\xc9\x2d\x4c\xb0\x7d\x6c\xd9\xb7\xc6\x48\x7d\xdc\xed\xce\x4f\x3a\x13\xcc\xf1\x18\x26\xc0\x4d\x07\xff\xc8\x14\x74\x88\x98\x2c\x9e\xe9\xae\xef\x7a\x7d\xe4\x7a\xc8\xf5\xba\x14\x64\x2a\x66\xb9\xdc\x15\x4c\x64\x8a\x0d\x74\xfe\xd4\x82\xff\x64\xb7\xe7\x1e\x88\xe0\x06\xb8\xf9\x0e\x4a\x33\xc1\x73\x47\x5e\xc7\xcd\xff\x97\x02\x12\x2b\x3c\x01\x03\x4a\xdb\xc7\xd6\x3c\xac\xb9\x62\x9a\x69\x03\x6a\x88\x15\x70\x73\x2a\x26\x98\xf1\x0b\x3c\x81\x92\x50\x21\x68\x66\x32\x3f\xb5\xb5\x51\x8c\x8f\xed\xc7\x87\x0f\xed\x95\x31\x8a\x0d\x8e\xb1\x86\x13\x42\x44\xc6\xcd\xee\x86\x12\x79\x09\x6a\xca\x08\x0c\x15\xe3\x84\x49\x9c\x9e\xd1\xdd\x2c\x31\x39\xca\x52\xd0\x9b\x95\xb1\x52\x78\xd6\xac\xab\x5e\x17\xc5\xc1\x9a\x3d\x5b\x81\x16\x99\x22\x45\x24\xd7\x8f\x32\x15\x53\x52\x09\x09\xca\xb0\x79\xbc\x6b\xa1\x14\x4f\xf9\x1c\x4e\xfb\x9a\x08\x4e\xb0\x39\x5c\x9a\xfc\xa2\x44\x26\x0f\x5b\x9d\x54\x10\x6c\x98\xe0\x6d\xcb\xe9\x38\x6d\x6b\x95\xf1\x43\x67\x43\x96\x9d\x56\xeb\xc6\x6e\x37\x5f\xe7\x9c\x11\x25\xb4\x48\x4c\xe7\x02\xcc\xbd\x50\x77\x5d\xca\xf5\x1f\x82\x83\xae\x6a\x2c\xfd\xe6\x5a\xe3\x54\xc4\x38\xad\x4a\x60\xc9\xd6\x98\xe9\xbb\x5e\x84\xdc\x9c\xd9\x8d\xb0\x6f\xc5\xa4\xf4\x6c\x6e\x3b\x4d\xc5\xfd\x77\xa6\x4c\x86\xd3\x45\xa4\x27\x84\x80\xce\xc5\x8d\xca\xa0\xbd\x41\xe5\xb3\x50\xf7\x58\x51\xa0\x57\x0a\x27\x09\x23\x4f\x88\x7f\xc1\x06\xee\xf1\xec\x4a\x61\xae\x99\xb1\x8f\xad\x04\xa7\xba\x49\x3a\xd3\x30\x82\x89\x30\xb0\xd0\xd0\x5b\x64\x55\x21\x58\x0e\xbe\xf1\x96\x85\x34\xa3\x45\xf6\x97\x69\x3f\xa3\x87\x4e\x3d\x49\xd3\x92\x31\xed\xb4\x2d\x47\x49\x24\x01\x4d\x39\x18\xe4\xba\x9e\xd3\xba\xb1\x6b\xf6\x1f\x4a\x27\x9b\x88\xa7\x64\x61\xa5\x2b\x01\x72\xaa\xa3\xb2\xe1\xe7\x13\xa9\x12\x63\xe5\xf7\x70\x6e\xbd\x46\xb3\x2a\x89\x7c\x17\xb9\x11\xaa\xfb\x5d\xa7\xe3\xf5\xa6\x1a\xb9\xf9\x9f\x7a\xef\x44\xbd\x9c\x1e\xaf\xe3\xdc\x1a\xc7\xd6\xa9\x97\x9f\xfd\xfb\x38\x77\xc7\x78\x81\xf5\x97\xa2\x91\x9e\x0a\x92\xe5\x43\xff\xf4\x97\xaa\xc7\xa7\xb8\x49\x04\xd7\x4c\x1b\xe0\x64\x36\x14\x29\x23\xb3\xcd\xc9\xa5\x90\xe0\x2c\x35\x9f\x56\x1a\x5f\x61\x0a\x69\x1e\xc5\xa5\x51\x62\x7d\xa6\x6d\xc8\x56\x09\x83\xf2\x84\x5b\xff\x6b\xf6\x5f\x52\xbe\x58\xce\xb8\x67\x80\x58\x0a\xa8\x76\x7a\xd3\x10\x62\x65\x3f\xf9\x96\x24\xa0\xae\x16\x94\xb9\x34\x98\x53\xac\xa8\xdd\xa0\x17\x63\x72\x97\xc9\xa7\x70\x94\xa0\x98\xa0\x8c\x9c\x0b\x0a\xc3\xed\xe9\xa9\x58\x3e\xe3\x06\xd4\x14\xa7\x67\xfc\x9c\xf1\xcc\x14\x4a\x7e\xe0\xd6\x23\xa9\xe8\x8d\x20\x5f\xfa\x98\xe0\x2b\x03\xbf\x89\xac\x58\xf1\x42\xdf\x6d\x06\xaa\xd9\xe8\x63\xe5\x0c\x17\x97\xd8\xb5\x5e\xaf\xd7\xd7\x8e\x86\x7d\xd0\x79\xce\xc6\xb1\xa2\x7d\xb7\x62\x62\xeb\xf2\xb1\x85\x34\x55\x97\x78\xbc\xa1\x6e\x16\xc5\xf0\xeb\x5f\x79\x36\x81\x93\x22\xb2\x4f\x42\x81\x75\x78\xf9\xfb\xd7\x96\xbd\x15\x84\x5a\xbb\xc8\x77\x77\xe4\xf5\x9f\x53\xf8\x8f\x00\x8e\x33\x46\x6b\xbb\x1d\xa3\xe5\x85\xae\x69\x35\x75\x5a\x6d\xcb\x19\x0d\xad\xae\x35\x02\x4c\x41\x3d\x0b\xea\x93\xcc\xdc\x0a\xc5\x7e\x14\x38\x75\x95\x48\xe1\x44\x6b\x36\xe6\x39\xfc\x35\xb0\x9f\x6a\x3a\x9a\x08\xd9\x5c\xbe\x8c\x56\x63\x29\x14\x72\x7f\xa7\x90\x30\xce\x0a\x16\xcf\xa7\x8c\xce\x62\x4d\x14\x93\xf9\xd1\xa8\x71\xe2\xd4\x83\x5e\x19\x29\xe6\x0e\x26\x94\x86\x3e\x0e\x51\xaf\x17\xf5\x51\x10\x41\x82\x62\x1a\xf8\x28\x39\x72\x8f\x92\x18\x47\x1e\x86\xb0\x0e\xcf\xe2\x8e\xeb\xbb\x7e\x99\xcf\xcd\xa8\x6f\x37\xf3\xd8\x5f\x2a\x9a\x2f\x63\x52\xbe\x31\x0f\x72\x32\x49\x05\x53\x06\xf7\xfb\x61\x94\xf3\x39\x67\xcb\x62\x06\x5a\x9f\x04\x37\x8a\xc5\x99\x11\xff\x65\xea\x04\x74\x10\xc6\x83\x28\x46\x1e\x0d\x12\x14\x84\x51\x88\xb0\x3f\xf0\x10\x39\x0a\xa3\x5e\x40\x7d\xcf\xdf\x89\x3a\x4d\x6f\xb4\x1f\x80\x3a\x8b\x57\xcd\x27\x9b\x7a\xdb\x72\xba\x9b\x40\x76\xda\x56\x89\x81\xe5\x84\x6c\xe9\xf5\x95\x97\xd8\x46\xc7\xad\x97\xf4\xc5\x95\x2f\x6b\x61\xa4\x4c\xfa\x57\xce\xa6\xae\x54\x62\xca\x28\x28\xbd\xff\x6a\xd8\x2f\x68\xef\x5a\x51\xfd\x98\x0e\x08\x8d\x22\x94\x40\xd0\x47\x81\xef\x1d\xa1\x41\x2f\x8a\x51\x32\x08\x83\x5e\x08\x5e\x3f\xe8\xbb\x1f\xbb\x19\x57\xe4\x29\x48\xe0\x54\x7f\xe3\x8d\x9b\xf0\xdb\xe5\xb4\xe4\xea\x66\x87\x32\xdf\xc7\x17\xa5\xdd\x3b\x41\xf5\x3b\x93\xd3\xb6\xf6\xf9\xa5\xab\xb5\x9c\x77\xa7\x17\x97\x56\xee\xe0\xe5\xb5\x5f\x8d\xf0\xfd\x0a\xfe\xad\xb1\x79\xd7\x86\x10\x43\x02\x09\x76\x3d\xe4\x63\x7f\x80\x02\x6f\x10\xa2\xa8\x87\x23\xe4\x87\x7e\x92\xf4\x7a\x04\x7a\x5e\xf0\xb1\x47\xec\x5e\x1a\xc2\xdb\xe7\x7c\x53\xc3\x38\x98\xff\x7e\x38\xf8\x27\x00\x00\xff\xff\x96\x67\xd1\xd1\xc0\x18\x00\x00")
func rpDevelopmentJsonBytes() ([]byte, error) {
return bindataRead(
_rpDevelopmentJson,
"rp-development.json",
)
}
func rpDevelopmentJson() (*asset, error) {
bytes, err := rpDevelopmentJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "rp-development.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _rpProductionGlobalAcrReplicationJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x92\x4f\x6b\xe3\x30\x10\xc5\xef\xfe\x14\x42\xbb\x60\x1b\xfc\x37\xb0\x97\x5c\xf7\x94\xc3\xb2\x10\x4a\x2f\x21\x87\xa9\x3c\x49\x54\x6c\x49\x68\x26\x6d\xd3\x92\xef\x5e\x14\xe5\x8f\x93\x86\x52\x0c\x46\xe8\x3d\xff\x9e\xde\x58\x1f\x89\x10\x42\xc8\xdf\xa4\x36\x38\x80\x9c\x0a\xb9\x61\x76\x34\xad\xeb\xb8\x53\x0d\x60\x60\x8d\x03\x1a\xae\xe0\x7d\xeb\xb1\x52\x76\x38\x6a\x54\x4f\x9a\xf6\x4f\xd9\xb4\x65\xd3\xd6\x1d\xba\xde\xee\x82\xef\x01\x07\xd7\x03\x63\xf5\x4c\xd6\xfc\x92\x45\x4c\x50\xd6\x30\x1a\x7e\x44\x4f\xda\x9a\x10\xd4\x56\x4d\x78\x4e\x06\x07\x1e\x06\x64\xf4\x24\xa7\x22\x1e\xeb\xb0\x0f\xca\xcf\x91\xec\xd6\x2b\x9c\x75\x57\xd2\x41\xe6\x9d\xc3\x40\x23\xf6\xda\xac\xe5\x59\xdc\x17\x17\x44\x6f\x15\x70\x4c\xfd\xe9\xd7\xc9\x88\x21\xfd\x31\x3f\x9c\x6c\x71\xf6\xdc\xa0\x0c\x0c\x07\xd4\x42\x59\xa3\x80\x33\xda\x3e\x45\x6a\x76\x69\x96\xa5\x57\x6d\xd2\xbc\x10\xd0\x75\x59\x0f\xc4\x33\xd3\xe1\xdb\xff\xd5\xf7\xe6\xb4\x0e\xef\x36\x8f\xcb\x42\x8c\xcd\xa7\x92\x69\x9e\x2f\x65\x71\xbf\xe6\x3f\xad\xbc\x25\xbb\xe2\xea\xaf\x35\x0c\xda\xa0\x9f\xe3\x5a\x13\xfb\x5d\xed\xe3\x42\x23\xd5\x1e\x5d\xaf\x23\x8d\x6e\x51\xa3\x59\xca\xc5\xfd\xfc\x2f\xf1\xe0\xf4\xe8\xbf\x4f\x9a\x49\x53\xb6\xe1\xd2\x94\xce\xe3\x8b\xc6\xd7\xdb\xb9\x2f\x93\x7d\xf2\x19\x00\x00\xff\xff\xa4\x6a\xad\x47\x99\x02\x00\x00")
func rpProductionGlobalAcrReplicationJsonBytes() ([]byte, error) {
return bindataRead(
_rpProductionGlobalAcrReplicationJson,
"rp-production-global-acr-replication.json",
)
}
func rpProductionGlobalAcrReplicationJson() (*asset, error) {
bytes, err := rpProductionGlobalAcrReplicationJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "rp-production-global-acr-replication.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _rpProductionGlobalSubscriptionJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x94\xcf\x6e\xdb\x30\x0c\xc6\xef\x79\x0a\xc3\xdb\x61\x03\x66\xcb\xce\xbf\x25\xb9\x15\xdd\xb5\x1b\xd0\x16\xbb\x14\x39\x30\x36\x9b\x70\xb3\x25\x81\xa4\x5b\xa4\x45\xde\x7d\x90\x9d\xa5\x69\x92\xee\x30\xa4\xf2\x45\xa6\xc8\xef\xfb\x81\xa6\xf5\xdc\x8b\xa2\x28\x8a\x3f\x4a\xb1\xc2\x1a\xe2\x59\x14\xaf\x54\xbd\xcc\x8c\xe9\x22\x69\x0d\x16\x96\x58\xa3\xd5\x14\x9e\x1a\xc6\xb4\x70\xf5\xf6\x4c\x4c\x3f\xcb\x47\x49\x96\x27\x59\x6e\x4a\xf4\x95\x5b\x87\xbc\x5b\xac\x7d\x05\x8a\xe9\x2f\x71\xf6\x43\xfc\xa5\x73\x28\x9c\x55\xb4\xfa\x13\x59\xc8\xd9\x60\x94\xa7\x59\x78\xfe\x26\x30\x8a\x6b\xb8\x40\x89\x67\xd1\x5d\x1b\x0a\xeb\x79\xb7\x6b\x93\x2c\xd4\x18\x6a\x87\x93\xe9\x64\x30\x1a\x0c\x93\x41\x99\x8d\x93\x61\x59\x2c\x12\x18\x8d\xc7\x49\x36\x81\xf1\x74\x88\x8b\xbc\xff\x75\xba\x15\xde\xd5\xea\xda\xb7\xb5\x57\x54\xb0\x13\x77\xaf\xe9\x45\xa3\x2b\xc7\xf4\x04\x4a\xce\x1a\x76\x15\x7e\xc3\x7b\xb2\x14\x5e\xe5\xb0\xdc\xb3\xf3\xc8\x4a\x2d\xe0\x6b\xac\x8e\xdf\x55\xf8\x7d\x8b\x77\x71\xfd\x23\x7a\x18\x46\x97\xce\x2a\x90\x45\xbe\xc6\x25\x89\xf2\x3a\xba\x75\xbf\xd1\xb6\x71\xa6\x45\xa3\x8e\x0f\x5c\x3a\x27\xe4\x9a\x44\x5a\x88\xfd\x5e\xec\xaf\x63\x80\x5d\x39\x14\xfa\xcf\xd2\x5d\xe2\x4b\x23\x8e\x40\x0d\x77\x1b\x42\x31\x4b\xb4\xc8\xa0\x78\xc9\x58\xa2\x55\x82\x4a\x4c\xe7\x71\x02\xfe\x7f\x0c\xa4\x70\x1e\xaf\xc0\x8b\x61\x84\xf2\x4c\xa2\x1a\x3a\x2d\xa6\xc4\x0a\x15\xcf\xab\x19\xe6\xa0\x1d\x99\x1b\x05\x6d\x04\xdf\x83\xfb\xfc\x8a\x8f\x4c\x8a\xf1\x9b\x8a\xf3\x93\x27\x9b\xa3\xe8\xfc\xc4\xc0\x82\x08\x2d\x2d\x2c\x2a\xbc\x09\x9f\xf2\xed\xd1\x8b\xef\xa4\x59\x48\xc1\xe4\x43\xfb\x3e\x7d\x4e\xa9\x9c\x1f\x23\xbd\x46\xd9\x1c\xfc\x87\xe0\x69\xef\x12\xe9\x67\xf9\xa4\xbb\x81\x12\xcf\xf8\x40\xf8\xf8\xa2\xd7\xc1\xcf\x7b\x9b\xde\x9f\x00\x00\x00\xff\xff\x30\xe9\xb0\x3d\xe6\x04\x00\x00")
func rpProductionGlobalSubscriptionJsonBytes() ([]byte, error) {
return bindataRead(
_rpProductionGlobalSubscriptionJson,
"rp-production-global-subscription.json",
)
}
func rpProductionGlobalSubscriptionJson() (*asset, error) {
bytes, err := rpProductionGlobalSubscriptionJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "rp-production-global-subscription.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _rpProductionGlobalJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x58\x5b\x6f\xdb\x46\x13\x7d\xf7\xaf\x20\xf8\x7d\x00\x25\x40\x14\x49\xdd\x2c\xf9\x4d\x6d\xd2\x22\x40\x1a\x0b\xb2\x91\x87\x1a\x41\xb1\xdc\x1d\xc9\x5b\x2f\x77\xd9\xd9\xa5\x5c\x27\xf0\x7f\x2f\x78\x91\x22\x8a\x14\xa5\xfa\x52\x24\x88\x60\x80\x90\x77\x87\xb3\x73\x66\xce\x1c\x92\xf3\xe5\xcc\xb2\x2c\xcb\xfe\xbf\xa6\xb7\x10\x11\xfb\xc2\xb2\x6f\x8d\x89\xf5\x85\xe7\xe5\x2b\xdd\x88\x48\xb2\x84\x08\xa4\xe9\x92\xcf\x09\x42\x97\xaa\xa8\xd8\xd3\x5e\xcf\x0f\x86\xae\x1f\xb8\x7e\xe0\x31\x88\x85\x7a\x48\xed\xae\x21\x8a\x05\x31\xd0\xfd\x53\x2b\xf9\x3f\xbb\x93\x9f\x40\x95\x34\x20\xcd\x47\x40\xcd\x95\x4c\x0f\x0a\xba\x7e\xfa\xb7\x36\x88\x09\x92\x08\x0c\xa0\xb6\x2f\xac\x3c\xac\x6c\x9d\x50\x7c\xaf\x28\x31\x5c\xc9\xcb\x15\x20\x72\x06\x25\x83\xcc\xc8\x3c\xc4\xe9\xaa\xad\x0d\x72\xb9\x2c\x5c\x6e\x76\x19\x2c\x48\x22\xcc\x47\x22\x92\xcc\xca\xde\x6c\x3f\x76\x4a\x07\xcd\x41\xab\x04\x29\xbc\x63\x07\x8f\xa8\x75\x41\x45\xa2\x0d\xe0\x8c\x20\x48\xf3\x46\x45\x84\xcb\x0f\x24\x3a\x1c\x6f\xad\xb3\x45\x7c\x05\xb8\xe2\x14\x66\xc8\x25\xe5\x31\x11\x4f\x0d\x6b\x49\x0c\xdc\x93\x87\x97\x72\x87\xf1\xcb\x00\xc4\x17\x03\x88\x71\xc1\xab\x2b\xa3\x90\x2c\x61\x4a\xa9\x4a\xa4\xf9\x97\xa1\x9d\x6d\xb9\xb5\xb1\xa0\x42\xca\xc6\x9b\x8d\xcd\x8e\x2b\x7d\x97\x54\xfc\x67\x1b\x32\x3f\xd9\x9e\x21\x44\x3c\x89\xec\x92\xc5\xe3\x0e\x3d\x63\x54\x31\xa0\xe1\xa0\xeb\x9d\x31\x62\xc8\x5b\xc9\x62\xc5\xa5\x79\x2b\x49\x28\x20\x4d\x93\xc1\x04\x1a\xdd\xae\x63\xb8\xd1\x49\x98\x63\x6d\x7d\xed\xb1\x96\x53\xa2\xbb\xd3\xee\x58\x84\xb1\x96\x20\xda\xbc\x93\x0c\xfe\xbe\x5c\x34\x1b\x3b\x5e\x7a\x0d\xda\xed\x4f\xbb\xcd\xb6\x4e\xef\x6f\x9c\xa2\xd2\x6a\x61\xba\x3f\x2b\x69\x08\x97\x80\x73\x58\x72\x6d\xf0\xc1\xc3\xfc\x47\x8a\x78\xe7\x6e\x51\xb4\x7a\x16\x38\x5f\xb4\xe0\xaf\x84\x08\xbd\x1b\xcb\xae\x20\x64\x11\xa5\x97\x75\xd5\x7e\x45\x95\xc4\xad\x76\x77\xed\xae\x63\x1d\xf4\x50\x45\x42\x62\xbe\x25\x57\x3d\xbf\xe7\xbb\x41\xaa\x75\x6e\x8c\xb0\xe2\x70\x5f\xcb\xc5\x2f\x4d\xb5\xdd\x5b\xa2\xed\xe8\xaa\xed\xe5\x1c\x93\xe5\x0f\x60\xee\x15\xde\x79\x4c\xea\xdf\x95\x6c\xce\xec\x52\xa8\x90\x88\x43\x78\x83\xb1\xeb\xa7\xf2\xfe\x4a\x38\xf7\x88\xe5\x77\x00\x76\x03\x87\x2a\x49\x89\x69\xbd\x6a\x83\x65\x3f\xd3\xcb\x57\xf4\xd3\xc4\xdc\x2a\xe4\x9f\x33\x88\xe9\xe6\x32\xe1\xac\x55\x04\xd3\xe8\xb5\xcc\xb3\xaa\xfc\x66\x27\xcf\x67\x96\x67\x4d\x29\xce\x12\x21\x9c\xf6\xb3\x5b\xdc\x8b\x51\xad\x38\x03\xd4\x1e\x2a\x01\x53\xad\xf9\x52\xa6\x2f\x0a\x95\xaa\x1d\x12\x42\x4d\x55\x7e\xf0\x0d\x6e\x50\xb5\x9c\xe3\xa2\x70\x3a\xd6\xab\x96\xa9\x92\xa5\x2c\xe2\x14\xf0\x1b\x58\x70\xc9\xd3\x52\x65\x8f\xb7\x5c\x8f\x29\xf2\x38\x5d\x9a\xd7\x02\x29\x17\xb8\xec\x24\x45\xe2\x9c\x2f\x26\xc3\x80\x31\xe2\x0e\x80\xf5\xdd\xc1\x68\xec\xbb\xe4\x9c\x12\x77\xd0\x5f\x40\x70\xde\x63\xc3\xfe\x98\x55\x1b\xa9\x48\xf2\xf6\xc3\x76\x57\x7a\xea\x28\xd1\xec\xe6\xba\x20\xc3\xee\x9d\xcd\xcf\xbc\xba\xf6\x9b\x6c\x6b\x6b\xe5\x15\x2e\x06\xc9\xf4\xa5\x2c\x3d\x8e\x37\xdb\xdf\x26\x21\x4a\x71\x7e\xfa\x61\x84\x65\xef\xeb\xe6\x49\x5d\x7e\x68\x75\x69\xe0\xc5\x49\x62\x4e\x12\x73\x48\x62\x9c\x5f\x32\xe5\x98\x5f\x5a\xab\x81\x55\x49\xbb\x75\xad\xee\x40\x66\xeb\xc8\xc3\xc4\x28\x3c\x89\xcb\x37\x29\x2e\x83\xf1\x64\xdc\x1f\xf6\x07\x6e\x9f\xf9\x23\x77\xc0\x68\xe8\x92\xe1\x68\xe4\xfa\x63\x32\x9a\x0c\x20\x0c\x7a\xe7\x93\x27\x88\x4b\xdd\xb4\xe4\xa4\x2b\xff\x9d\xae\x1c\x1c\x85\x5c\x19\x22\x19\x41\xf6\xc7\xfb\xf9\xd5\xf3\xe6\x21\x44\x08\x75\xff\x93\x50\xe1\x2c\x09\x05\xa7\x53\x4a\x41\xeb\x63\x26\x22\xa5\xe1\xc2\xbe\x51\x41\x45\x32\xf6\x7c\xa4\xef\x9d\x38\x1d\xf5\x01\x5b\xdc\xe6\xe9\xd2\xed\x15\x59\xa9\x70\x6d\xe2\xfa\x83\x27\x7d\x92\xd7\xb4\x41\x39\x7b\xf6\x86\x71\x75\x3d\x13\x81\x21\x8c\x18\x62\x5f\x58\x32\x11\xe2\xb8\xc1\x53\x8d\xaa\x37\xe6\x2d\xa5\x60\x31\xab\xf5\x30\x5e\xe5\x86\xcf\x49\xa7\x17\x0a\x15\x16\xdd\xad\x3d\xba\x06\x78\x7c\x9a\x5f\xa4\x95\xf7\x04\xe7\xec\x7e\x8e\x37\x64\xe6\x29\x1d\xf9\x7d\xd7\x5f\xd1\x35\x01\xf4\x89\x01\xcd\x0c\x38\xcb\xff\x7f\x3c\xfb\x27\x00\x00\xff\xff\x07\x7a\xd1\xad\xcd\x19\x00\x00")
func rpProductionGlobalJsonBytes() ([]byte, error) {
return bindataRead(
_rpProductionGlobalJson,
"rp-production-global.json",
)
}
func rpProductionGlobalJson() (*asset, error) {
bytes, err := rpProductionGlobalJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "rp-production-global.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
} |
func rpProductionManagedIdentityJsonBytes() ([]byte, error) {
return bindataRead(
_rpProductionManagedIdentityJson,
"rp-production-managed-identity.json",
)
}
func rpProductionManagedIdentityJson() (*asset, error) {
bytes, err := rpProductionManagedIdentityJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "rp-production-managed-identity.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _rpProductionParametersJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x96\x51\x6f\x22\x37\x10\xc7\xdf\xf3\x29\x10\xed\x63\x8e\x84\x9c\xfa\x92\x37\x6e\x21\x55\x54\xf5\x84\xb2\x6d\x1e\x5a\x55\xa7\x59\x7b\x76\x71\xb1\xc7\xd6\x8c\x0d\x85\xea\xbe\x7b\xb5\x2c\xb9\x14\x75\x41\xc5\x20\x24\x1e\x3c\xf3\xff\x79\x66\x67\xec\xf1\xdf\x37\x83\xc1\x60\x30\xfc\x5e\xd4\x02\x1d\x0c\x1f\x07\xc3\x45\x8c\x41\x1e\xef\xee\xba\x95\x91\x03\x82\x06\x1d\x52\x1c\xc1\x36\x31\x8e\x94\x77\x7b\x9b\xdc\x3d\xdc\x8f\x7f\xf8\x70\x3f\xfe\x70\x3f\xbe\xd3\x18\xac\xdf\xb4\x7e\x73\x60\x70\x18\x91\x65\xf4\xa7\x78\xfa\x6e\x78\xdb\xed\xa1\x3c\x45\xa4\xf8\x8a\x2c\xc6\x53\xbb\xd5\x78\x74\xdf\xfe\xde\x1c\xc2\x37\xe1\xf0\x71\xd0\x05\xb6\x5b\x07\xc5\x2f\x28\x3e\xb1\xc2\x67\x7d\x60\xda\x99\x57\x60\x13\xb6\xb8\xe1\xb7\xf5\xaf\xb7\xff\x52\x6b\x67\x68\x12\x4c\x01\x9f\x12\x69\x8b\xf9\x00\x6b\x90\x62\x81\x1c\x0b\xef\x9c\xa7\xcf\xe0\x32\x60\xec\x2e\x8a\xa5\x93\x5f\x29\x92\x8e\x93\xf3\x4d\xdb\x4e\x28\xac\x4f\x3a\x6f\xeb\x56\x5e\xa2\x9a\x83\x5a\xbe\x96\x93\x5f\x90\x20\x2b\x8c\xca\x58\x6b\xa8\x99\x3d\xcc\xca\xe8\x19\x1a\x9c\x28\xe5\xd3\x25\xa8\x12\x79\x65\x14\xce\xd9\x90\x32\x01\x6c\x06\x49\xd9\x24\x11\xf9\x67\xed\xf6\xd1\xe4\x13\x44\x5f\x01\x51\x78\xaa\x4d\xf3\x7e\xea\xb2\x41\x6d\xa9\x25\x80\x3a\xbf\xde\x7b\xc8\x1c\x18\x29\x4e\xbd\x03\x93\xd7\xb2\x1a\x22\x54\x20\x6f\x65\xce\x63\x54\xd1\x2f\x91\xb2\x5b\x5f\x1b\x81\xca\x62\xe1\xc5\x79\x99\x7e\x7a\x32\x8c\x6b\xb0\xf6\x38\xa7\x06\x2b\xd8\x8b\xaa\x6d\x42\x8a\x95\x89\xcf\x0e\x9a\xf3\x33\xa9\x43\x76\x12\x75\xb8\x42\xa3\x37\x10\x71\x0d\x9b\xae\x9e\x92\x2b\x7f\xbb\xd9\x7f\x64\x9f\x42\x56\x45\xf7\xa0\x2b\x64\x64\xc2\x4b\xb2\x78\x7e\x2a\x4b\xdc\xac\x20\xd9\x38\xfd\x5c\x96\xa9\xae\xcd\x5f\xd9\x84\x39\x63\x8e\xdc\x69\xf7\xc4\xbb\xf1\xaa\x7f\xe5\x13\xbd\x78\x54\x2e\x7a\x46\x2b\xc3\x9e\xda\x01\x7e\xb6\x9e\x3c\xfd\xe6\x09\xec\x0b\x36\xc6\x9f\x6a\x85\xdf\x0f\x96\x77\x26\x04\x89\x20\x06\x86\xb7\xff\xb5\x29\xa4\xc8\x60\x0d\xe9\x7e\xfb\xd2\x33\xc2\xde\xa9\xcf\x2e\x3e\xc5\xc5\xde\x9e\xa4\x77\x07\x20\xd0\xa7\x10\x0d\xb2\x03\xda\xac\x51\xe2\x09\x2f\xf2\xbc\x86\x4d\x9b\x4a\x6f\x18\x6b\x13\xb7\xc8\x16\x48\x93\xe7\xb8\xe8\xf3\xa9\x18\xb6\xc6\xee\x02\x3e\x9a\x09\xd4\x6c\x14\x1c\x45\xec\x0c\xef\xd9\x1e\x38\xfc\xd1\x5b\xb7\xe0\x39\x82\x9d\x28\x85\x22\xbb\xf3\xf7\xac\xcf\x6f\xfe\x0e\x92\x7d\x15\x75\xf2\x99\xc5\x15\x44\xd4\xd9\x51\x70\x78\x42\x88\x89\x33\x4e\x2f\x87\xbc\xfb\x97\xc3\x05\x33\xbe\x15\xe7\x8f\xf7\x4e\x7d\xd9\x64\xef\x18\xf9\x43\x9d\xc3\xc5\xf3\x9c\xaf\x31\x86\x38\xbc\x3a\x91\x02\x02\x28\x13\x37\xc7\xe5\x1f\x7b\xd5\x22\x8b\x79\xaa\xac\x51\x3f\xe1\x09\xed\x91\xad\xe5\xe0\xd5\xd9\x7d\x88\xf3\x21\xa9\x12\xc5\x26\x44\xe3\xe9\xf2\x61\xb8\x72\xa5\xd9\x9e\x92\x95\x11\x48\x03\xeb\x2f\xd3\x07\xf9\xb2\xfa\x78\x8c\x22\x52\x58\x04\x4a\x61\x46\xed\x73\xe7\x44\x59\x22\xa7\xfe\xe7\x4d\x0b\xf9\xff\x39\xdc\x74\xff\x5f\x6f\xfe\x09\x00\x00\xff\xff\x50\x76\x5b\x0d\x92\x0e\x00\x00")
func rpProductionParametersJsonBytes() ([]byte, error) {
return bindataRead(
_rpProductionParametersJson,
"rp-production-parameters.json",
)
}
func rpProductionParametersJson() (*asset, error) {
bytes, err := rpProductionParametersJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "rp-production-parameters.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _rpProductionPredeployParametersJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x90\x4f\x4b\xc3\x40\x10\xc5\xef\xfd\x14\xcb\xea\xb1\xcd\x1f\xc1\x4b\x6e\x55\xa1\x88\x50\x82\x15\x2f\xe2\x61\xd8\x4c\xdb\xb5\x9b\xdd\x65\x66\xb7\xb6\x4a\xbf\xbb\x24\xa9\x15\xc1\x0a\x2d\x21\x90\xc3\x7b\xf3\x7e\x33\xfb\x3e\x07\x42\x08\x21\x2f\x59\x2d\xb1\x06\x59\x08\xb9\x0c\xc1\x73\x91\xa6\x9d\x92\xd4\x60\x61\x81\x35\xda\x90\xc0\x47\x24\x4c\x94\xab\xf7\x1e\xa7\x57\x59\x7e\x3d\xca\xf2\x51\x96\xa7\x15\x7a\xe3\xb6\xcd\x5c\x09\x04\x35\x06\x24\x4e\xde\xd8\xd9\x0b\x39\xec\x76\x28\x67\x03\xda\xf0\x8c\xc4\xda\xd9\x66\x55\x9e\x64\xcd\xf7\x3d\xe0\x0f\x41\x59\x88\xee\xb0\x56\xef\xd0\xd3\xd9\xe4\xb7\xde\x7a\x6b\x30\x11\x65\x21\xe6\x60\x18\x0f\xd6\x6e\xf8\x93\xc6\x4d\x20\xb8\x35\x91\x03\xd2\x03\x6e\xd7\x10\x4d\x18\x2b\x85\xcc\xa5\x33\x5a\x69\xfc\x87\xfa\xf2\x7a\x1c\x79\x77\xf3\xe4\x56\x68\xfb\x44\x96\x8e\x02\x98\x3e\x89\x33\xa4\xb5\x56\xd8\x13\x72\xee\xf7\xbc\x92\xb4\x55\xda\x83\xb9\xaf\x8e\x43\xa4\xfc\x13\xb2\x80\x80\xef\xb0\x7d\x44\x76\x91\x14\x4e\xc8\x45\x3f\x85\x1a\x4f\x06\xad\xf6\x8f\x2a\x09\xe7\x7a\x73\x72\x9c\xfc\x94\x17\xb3\xf6\x86\x71\x55\x51\xd3\x4b\x0b\x3a\xa3\x17\x3a\xbb\x97\x41\xf7\xdf\x0d\xbe\x02\x00\x00\xff\xff\x10\xdc\x9d\x08\x88\x03\x00\x00")
func rpProductionPredeployParametersJsonBytes() ([]byte, error) {
return bindataRead(
_rpProductionPredeployParametersJson,
"rp-production-predeploy-parameters.json",
)
}
func rpProductionPredeployParametersJson() (*asset, error) {
bytes, err := rpProductionPredeployParametersJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "rp-production-predeploy-parameters.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _rpProductionPredeployJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5a\x5f\x4f\x1b\x39\x10\x7f\xe7\x53\xac\xf6\x4e\x4a\xa8\x9a\x64\x93\x52\x89\xe6\xad\x2d\xa7\x0a\xf5\xca\xa1\xa6\xe2\x05\x21\xe4\x78\x27\xc1\xc5\x6b\x5b\xb6\x37\x90\xab\xf8\xee\x27\xef\x6e\xc2\xfe\xf1\x7a\x03\x24\x5c\x28\x59\x1e\x48\xb2\xe3\x19\xcf\xcf\x3f\xcf\x78\x6c\xff\xda\xf3\x3c\xcf\xf3\xff\x54\xf8\x0a\x22\xe4\x0f\x3d\xff\x4a\x6b\xa1\x86\xbd\x5e\xfa\x4b\x37\x42\x0c\x4d\x21\x02\xa6\xbb\xe8\xdf\x58\x42\x17\xf3\x28\x7b\xa7\x7a\x83\xa0\xff\xbe\x13\xf4\x3b\x41\xbf\x17\x82\xa0\x7c\x6e\xe4\x7e\x40\x24\x28\xd2\xd0\xfd\xa9\x38\xfb\xc3\x7f\x9b\x5a\xc0\x9c\x69\x60\xfa\x0c\xa4\x22\x9c\x19\x43\xfd\x6e\x60\xfe\x16\x02\x33\x24\x09\x1a\x53\x50\xfe\xd0\x4b\x7b\x95\xb6\xa3\xb1\xd2\x20\xbf\xc2\x7c\x86\x62\xaa\x3f\x62\x0c\x4a\x9d\x72\x4a\x30\x49\x44\xcf\x97\xa2\xe6\xf9\x55\xf8\x96\x28\xd0\xc0\x10\xd3\xc7\xa1\x31\x79\xae\xe2\xb1\xc2\x92\x08\x4d\x38\x6b\xef\x77\x17\xef\x2e\xb2\x4e\x14\x1a\xf2\xf1\x4f\xc0\x8b\x86\x02\x49\x14\x81\x06\xa9\xda\xad\x89\x18\x81\x9c\x11\x0c\xa7\x92\x30\x4c\x04\xa2\xc7\x61\x6b\xdf\xaa\x43\x80\x8c\x88\x32\x1e\x17\xdd\x2a\x08\x29\xc0\x12\x74\xd5\x99\x82\xd0\x14\xb4\x6f\x7d\x7b\x51\x35\x9b\x02\x07\x52\x93\x09\xc1\x48\x5b\x70\x2a\x4a\x4a\x40\x1a\x2c\xdd\x5f\x4a\x84\x40\xc1\x2d\x61\xba\xe7\x78\x1d\x8b\xd0\x98\xb0\x3b\x50\xf9\xf5\x6e\xcf\xfe\x2d\xe7\xaa\x1f\x8e\x7f\xf0\x6b\x60\xdb\xc4\x0b\xb9\x5d\xbc\x78\x14\xac\x82\x4b\x8d\xe8\x0e\xd5\xf5\xa2\xaa\x52\x0f\x76\xb0\xba\xa3\x04\x25\xea\xe9\xb0\x27\x9f\xee\xb2\xa4\x72\xef\x71\x31\xab\xa4\xd9\xea\x64\xf4\xa5\xea\x91\xaf\xe7\x02\x0c\x5c\x63\xce\x69\xa9\xb7\x7e\x08\x13\x33\x80\x67\x88\xc6\x46\x66\x82\xa8\x82\xa5\xc4\x5d\x6e\xc4\xe1\x56\x4b\xf4\xb9\x21\x77\xd5\x18\x46\x52\xa2\x79\x83\xe5\xf3\x8b\x7a\xb3\x47\x9f\xdc\xa1\x71\x43\x66\x4f\xdd\xa1\x63\x43\x56\x47\x0d\x53\x6b\xfd\x66\x6d\xe9\xbf\xde\x90\xd2\x92\xb0\xa9\x6f\xd5\x34\x45\x1a\x6e\xd0\xfc\x3b\x28\x1e\x4b\x0c\x5f\x24\x8f\xc5\x09\x8a\xe0\x71\xda\xae\x33\x08\x4e\x25\x4c\xc8\x6d\xa3\x8e\x92\xef\x11\xba\xfd\x1b\xd8\x54\x5f\xf9\x43\x6f\x10\x58\x0d\x48\x71\xa2\xa6\xa3\xa4\xab\x1f\xc3\x50\x1a\x9c\x13\x53\x1b\xc2\xd9\x16\xa1\x1e\x80\x4c\x21\x0e\xc8\x0c\xe2\x62\x7c\x2a\xa9\x12\x92\x0b\xb3\x68\xb2\xf8\xe3\x65\x21\x2e\x96\x44\xcf\xbf\xc7\xd4\xb1\xa4\xb2\xc7\xc7\x55\x0c\x94\x65\x35\xc7\x9c\x1a\xdf\x7e\x60\xe1\x08\x9b\x69\xdf\x12\xf7\xcc\x14\xfc\x8e\xd8\x34\x41\xe4\x4d\x53\x9b\x10\x94\x26\x0c\x99\x34\x52\x68\x78\x70\xf0\x6e\x35\x73\x05\x16\x98\x96\x1f\x4d\x85\xb0\xa0\xf3\xb7\xa4\x72\x90\x0f\xe8\x45\x45\x5f\xa3\x0b\x28\x99\xee\x89\x69\x4a\xf9\x4d\x93\xb8\x90\x84\x9b\x11\xf4\x87\x5e\x7f\x10\x34\x75\x8c\x48\xc0\x3a\x2b\x56\x8e\xd9\x98\xc7\x2c\xb4\x67\x27\xaf\xc8\xdc\x8a\x26\x96\xce\x69\x5f\x8a\x4b\xc2\x2e\x91\x8c\xec\x6a\x6a\x54\xfc\x0e\x84\x7a\xff\x48\x42\xf5\x4d\x89\x78\xd8\x0d\x7a\x83\x83\x6d\xe5\xd1\xc1\xf3\xf3\x28\x1c\x6b\x93\xe0\x0d\x99\xb2\x14\x72\x69\x20\x7e\x5d\xa4\x3a\xfc\xbd\x59\xd5\xdf\x06\x56\x1d\xbe\x3e\x5a\x35\x36\xdc\x1a\x4e\x1d\x01\x2b\xaf\xac\x2a\xd2\x05\x4a\xbd\x7f\x7e\x4a\x01\x9b\xe7\xf8\xf4\xca\xa8\xf4\xb8\x75\x54\xe2\x54\xb9\x56\xaf\x5b\x78\xdb\x2b\xf6\xba\xde\x3d\x6b\x04\x7b\xf7\x7f\xad\xaf\xa6\xc0\x60\x86\x6a\xb8\x56\xf9\xb5\xb8\xb3\x50\xb2\x92\xd3\xdc\x61\xaa\x52\x33\x2d\x6a\x8f\x6f\x04\x4b\xae\xf8\x44\x77\x4f\x40\xdf\x70\x79\xdd\x63\xe9\xff\x51\x56\x31\x24\x95\x9d\x2a\x37\xa7\x1c\xa3\x85\xff\xe7\x32\x5f\x04\xb6\xf7\xbb\x8b\x97\xe5\xf1\xf5\x31\x67\x21\x59\x36\xcb\xd3\xe4\x7e\x53\xa3\x4a\x0b\x1f\x09\x92\xdb\x7b\x1f\x04\x83\xa0\x13\x1c\x76\x82\xbe\xb5\x96\x74\x16\x46\x0e\x8c\x04\xec\x60\xaa\xaf\x1f\x51\x3a\xfd\x46\x02\xe1\x6a\x89\x5f\x96\xca\x05\x03\xc7\x86\x5a\x92\x75\x82\x34\xeb\x58\xa5\x2c\x1b\x67\x96\x1d\x3e\x15\x8f\x99\x6b\xf3\x6e\x4d\xb1\x18\x59\x93\x66\xb0\x52\xd2\xb4\x51\xa5\xd1\x62\xd2\x92\x84\x05\xe6\x1c\x87\xed\xd6\x8a\x4c\x6c\xbd\xf5\x5a\xe9\xcc\x6f\x8e\xb3\x89\x29\x8d\xa6\x06\x06\x16\x53\xea\x14\x76\x04\x33\x2f\xb7\x5f\xfc\x17\x0b\x05\x27\xac\x61\x57\x75\xf1\x34\x43\x91\xd7\x5d\x9c\x90\x5f\x61\x7e\x86\x62\xea\xda\x9d\x2d\x28\x59\x4c\xbb\xd5\x7a\xb6\x6c\xf6\xa6\x3e\xbe\xe7\x9f\x2a\x69\xcb\x4f\x03\x80\xde\x13\xe1\x48\xf6\x31\x3e\x73\x15\x71\x75\xf4\xe9\xc5\x60\xe2\x94\xa8\x6f\xbf\x5a\x66\xed\xa4\x31\x62\xed\x69\x75\xc6\x2a\x67\x02\x8e\x84\x31\x23\x52\xc7\x88\x66\x5f\xd7\x94\x2a\x6a\x63\x7e\x65\xdb\x52\x00\x0b\xd5\x3f\xcc\x3a\xc0\x6b\x88\x30\x7b\x76\x04\xb7\x2f\xe3\x1c\x98\x90\x3d\x78\xb1\x19\x27\xeb\xfe\xf6\x66\x9c\x74\x1d\xf5\xcc\x49\x47\x48\x32\x43\x7a\x99\x74\xb2\x7e\xe6\x8e\x52\xfc\x23\xa2\xd0\x98\xc2\x93\x57\xe9\xc6\xbf\x0d\x85\x13\x01\x49\x44\xe9\x04\xd5\xe9\xfb\xea\xa2\xca\x3d\x8b\x1e\x1c\x58\x16\x88\x9e\x63\xce\x30\xd2\xed\xfc\xfa\xb9\x78\xce\xd4\xda\x7f\xeb\xb5\x3a\x98\xda\x16\xd3\x55\xc4\x17\xeb\x8c\x5e\xa2\x61\x4d\x48\x37\x45\xc1\x47\x1f\x9e\xab\xeb\xb8\x3e\x6a\x4e\x50\x44\xe8\x3c\x29\x8d\x6b\x66\xe9\x12\x45\xa5\x11\x0b\x91\xb4\xcc\x1b\x5b\x68\x44\xe5\x23\xcc\xe5\x28\x2c\xaf\x48\xb5\x5b\xce\x6b\x51\x66\x4c\xf2\x23\xd6\x7c\x18\xdd\xda\xb7\x1f\xfd\x03\x33\xf6\x46\x7c\xa2\x8f\xd2\x2b\x40\x43\x4f\xcb\x18\x9c\xb3\xb1\x4c\xfc\xfe\x87\x4e\xf0\x61\xc5\x12\xea\xa1\xbc\x0b\xc7\x7a\xc7\xbb\x8a\xc8\x06\x79\xe7\xbc\x76\x65\xe5\x9d\xf3\x36\xc2\x4b\xe5\x9d\xe0\x72\xc7\xbb\x8a\xc8\x06\x79\xe7\xba\x97\x66\xa5\x9d\xeb\x36\xca\x4b\x65\x9d\x9a\xe1\x1d\xeb\x2a\x22\x1b\x64\x9d\xf3\xde\x9e\x95\x76\xce\xeb\x48\xcf\xcc\xbb\xe4\xd3\xc5\xde\xdd\xde\x7f\x01\x00\x00\xff\xff\xa8\x8f\xbc\x95\xe5\x2d\x00\x00")
func rpProductionPredeployJsonBytes() ([]byte, error) {
return bindataRead(
_rpProductionPredeployJson,
"rp-production-predeploy.json",
)
}
func rpProductionPredeployJson() (*asset, error) {
bytes, err := rpProductionPredeployJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "rp-production-predeploy.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _rpProductionSubscriptionJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x5c\x90\x41\x4f\x03\x21\x10\x85\xef\xfd\x15\x04\x3d\xba\xbb\x50\x63\x62\xf6\x0f\x34\x1e\xf4\xa2\xf1\x62\x3c\x4c\xe9\xb8\x60\x80\x21\x30\x7b\xa8\x4d\xff\xbb\x81\xd6\xc6\x2d\x5c\x26\xf3\x3e\xde\x23\xef\xb0\x12\x42\x08\x79\x5b\x8c\xc5\x00\x72\x14\xd2\x32\xa7\x32\x0e\xc3\x69\xd3\x07\x88\x30\x61\xc0\xc8\x3d\xfc\xcc\x19\x7b\x43\xe1\xac\x95\x61\xad\xf4\x43\xa7\x74\xa7\xf4\xb0\xc3\xe4\x69\x5f\xb9\x37\x0c\xc9\x03\x63\xff\x5d\x28\xde\xc8\xbb\x53\x82\xa1\xc8\x18\xf9\x1d\x73\x71\x14\x6b\x90\xee\x55\xbd\x7f\x40\xc6\x42\x73\x36\x58\xe4\x28\x3e\xda\xaa\x9e\xc3\x65\x6a\x50\xca\x94\x30\xb3\x6b\xd4\x52\x6b\xfa\x94\x69\x4e\xaf\x96\x32\xbf\x40\xc0\x9a\x92\x93\x45\xf0\x6c\xcf\x31\x0b\x1a\x23\x6c\x3d\xee\xe4\x28\x38\xcf\xb8\xd0\x8f\x4b\x5c\xc6\x8b\x5d\x77\xf2\xeb\x60\xba\xb2\x94\xbc\x4f\x8d\x79\x76\x26\x53\xa1\x2f\xee\x9f\x62\x71\x93\xe5\x32\x80\x61\x47\x71\x53\x7f\x57\xae\x9f\x79\x32\xc0\xe7\x4e\x36\x9e\xb6\xe0\xaf\x09\x48\xee\x5f\x6f\x6b\xa5\x1f\x3b\x75\xdf\x29\x2d\x2f\xd8\xb1\x4d\x9f\xab\xe3\xea\x37\x00\x00\xff\xff\xd4\xfe\xf8\x48\xd1\x01\x00\x00")
func rpProductionSubscriptionJsonBytes() ([]byte, error) {
return bindataRead(
_rpProductionSubscriptionJson,
"rp-production-subscription.json",
)
}
func rpProductionSubscriptionJson() (*asset, error) {
bytes, err := rpProductionSubscriptionJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "rp-production-subscription.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _rpProductionJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\xe9\x72\xa3\x48\xb6\xf0\xff\x79\x0a\x47\x7d\x37\xc2\x5d\x31\xe5\x2a\x40\x56\x95\x99\x88\xf9\x21\x10\x20\x90\x84\xc5\x96\x20\xe6\x4e\x4c\xb0\x09\x61\x92\xa5\x01\x49\x96\x6e\xcc\xbb\x7f\x91\x2c\x12\xda\x6c\x79\xa9\xee\xea\xee\x52\x47\x54\xcb\xca\xcc\x73\x72\x39\x7b\x9e\xcc\xfc\xbf\xbf\x5d\x5d\x5d\x5d\x7d\xf8\x9f\xdc\x99\x7b\x91\xf5\xe1\x1f\x57\x1f\xe6\x45\x91\xe6\xff\xf8\xf2\xa5\xfa\xe5\x73\x64\xc5\x96\xef\x45\x5e\x5c\x7c\xb6\x36\x8b\xcc\xfb\xec\x24\x51\x5d\x96\x7f\x21\x30\xbc\x7b\x83\xe1\x37\x18\xfe\xc5\xf5\x52\x98\xac\x51\x3d\xd5\x8b\x52\x68\x15\xde\xe7\x87\x3c\x89\xff\xdf\x87\x4f\x15\x06\x27\x89\x0b\x2f\x2e\x80\x97\xe5\x41\x12\x23\x44\xf8\x67\x0c\xfd\xd7\x54\x48\xad\xcc\x8a\xbc\xc2\xcb\xf2\x0f\xff\xb8\xaa\xba\x55\xfe\x6e\x39\x99\xec\xe5\xc9\x22\x73\x3c\xde\xdd\x2b\x2a\x8b\x8b\x75\xea\x21\x68\x79\x91\x05\xb1\xff\x61\x5b\xf8\xdf\x4f\x2d\x10\x6e\x14\xc4\xbd\x34\xa0\x2d\x6a\x11\xbb\xd0\x7b\x23\x14\x18\x78\x71\x41\x7b\x59\x41\x27\x51\x94\xc4\xa2\x15\xbd\x16\x62\x16\xbd\xa4\x57\x9f\xf6\x4b\x5d\x6f\x66\x2d\x60\x01\x2c\xb8\x28\x6b\x3d\x89\xe3\x35\x7d\x7e\x2d\xbe\x0a\xd9\x05\x8b\xf5\x3a\x04\x88\x0a\x69\x98\x2c\xdc\x37\x4c\x3c\x82\xa1\x78\xce\xc4\x72\x42\xa0\xf4\x54\x2f\xb6\x2e\xea\xf0\x49\x60\x76\x00\x61\x10\xfb\x0c\xc1\x28\x45\x92\x59\xbe\xd7\x73\x9c\x64\xf1\xfd\x26\xa0\xc6\xa7\x78\xd9\x32\x70\xbc\x49\x16\xc4\x4e\x90\x5a\xf0\x7b\xa1\x73\xe0\x22\x2f\xbc\x6c\xec\x46\xf5\xb8\x5e\x37\x4b\x5b\x30\xb9\xfb\x5e\x70\xe8\x24\x9e\x05\xfe\x4e\xa2\xbc\x0d\x1a\xa2\xa6\x3c\xb5\x9c\x57\x92\x54\x0d\x69\x62\x65\x5e\x5c\xf4\x93\xc8\x0a\xde\x20\x18\x5c\xab\xb0\x6c\x2b\x6f\x28\xe9\x0d\x80\xec\x22\x09\xbd\xf8\x62\x8e\x3c\x0d\x24\xc8\x2d\x1b\x7a\x74\x92\x47\x49\xde\xa7\xd8\x20\xf3\x56\x16\x84\xe7\x81\xd9\x49\x02\x9f\x21\xb6\x99\x05\x73\xef\x24\xb6\x19\x5c\x78\x71\x61\x07\x05\x1f\x59\xfe\x2b\x87\x3d\x4b\xdf\x36\xe2\x59\xfa\x0a\xee\x3a\x09\xc9\xb7\x0a\x6f\x65\xad\x2b\x82\xc8\xbf\x0f\x87\xd6\x38\x1a\x2d\xc9\x65\xc9\x22\x7d\x3d\xc9\xd4\xd0\xde\x6b\x02\x82\x54\x5e\x40\xef\x89\x91\x5b\x59\x66\xad\x4f\xb7\x0d\xbd\xf5\x12\x8d\xbf\x2f\x2a\xca\x62\x36\x0b\x1e\x5f\xd7\x85\x06\xcc\x24\xf3\x5e\x0d\x23\x72\x23\x36\x2b\x4d\x18\x57\xcb\x9e\x20\xfd\xa7\x61\xe4\x2e\x13\x2f\x83\x2c\x89\x91\x9d\xf4\x3a\x20\x71\x12\x9b\x49\x6c\x41\xd9\xf3\x83\xe4\x29\x8a\xaa\xe6\xf5\x69\x82\xfa\xd7\x5e\x69\x59\xc3\xb3\xf2\xc2\xca\x03\xeb\xa0\x65\x59\xe6\x78\x71\x91\x59\x30\x88\xdd\xd3\xe5\x61\x92\x79\x56\x5d\xe9\x54\x79\x9e\x2c\x8a\x79\x5d\xbe\xc8\x4f\x62\xb0\x62\xcb\x7d\x0a\x84\xef\x65\x91\x15\xaf\x57\x5e\x5e\x3c\x51\x2b\x4e\xb2\x95\xb5\x46\x43\x39\xd9\x8d\x55\x50\x6c\xbc\x0c\x5a\xb1\x1b\x27\x59\x31\x3f\x55\xc7\xce\xac\x4d\x00\xcb\x0e\x9f\x1d\x89\x35\xcb\x02\xc7\x3a\x0b\xa2\x2c\xd8\x8d\x76\xaf\xc2\xbf\x4f\xae\x6d\x9a\x64\x85\x05\x7b\x8e\xe3\xe5\x79\xc9\xc5\xbc\xfb\xbc\xc8\x78\x02\xd2\xdb\x84\x60\x05\x83\x81\xde\xd2\x2a\x3c\xf7\x6d\xfd\xc9\x52\xd6\xb3\x8a\x45\xf6\x94\x20\x78\x8b\x08\xcc\xd2\x37\x68\x8b\x2c\x7d\xab\x71\x83\x20\xbc\xd1\xae\xa9\x40\xbc\x83\x49\x53\x01\x7a\xa3\x35\x93\xa5\xef\x63\xc8\x64\xef\xa6\x48\xb3\x14\x44\x79\x4e\x5b\xa9\xe5\x04\xc5\xfa\x3c\x8c\x20\x3e\xe4\xf9\x43\x12\xea\x9c\x84\x9f\xe7\xf3\xc9\xc2\x86\x81\x33\xf4\x9e\x80\xfe\x54\x0f\xf3\x3d\xeb\xbf\x9a\xb9\x57\x42\x5a\xd8\xb9\x93\x05\x69\x11\x24\xf1\x3b\x69\xf5\x65\xa4\x04\x9b\x37\xbb\x7c\x4a\x61\xc5\xae\x95\xb9\xff\xe9\x13\xf9\x7f\x96\x9d\x73\xa8\xf2\x9c\x86\x9e\x15\x2f\x52\x26\x46\x86\xe3\x13\x2b\x7e\x81\xa5\x58\x64\x8b\xd3\x86\x22\xc2\xf3\xc2\x09\xf9\x5b\x0b\xc6\x87\xac\x9e\xd9\x7c\x4f\x11\x1e\x80\xca\xc3\xc5\x11\xfc\xb2\x20\xae\x30\x6f\xa7\x64\x5f\xbe\xff\xf7\x60\x4c\x69\x96\xa4\x5e\x56\x04\x27\xe4\x5f\x55\x5e\x92\x1e\x3f\xe9\x41\x98\x38\x16\x5a\xf7\xb1\x57\xcc\x13\xb7\xc6\x50\x04\xce\xd3\xf0\x37\x49\x5c\x82\xfe\xf0\xaf\x60\xf6\x8b\x93\xc4\x05\xb2\x36\x7f\xd9\x85\x54\x7e\xb9\x3e\xb0\x1b\xae\x3f\x7e\x2a\x92\x51\xb2\xf2\xb2\x5f\x32\x2f\x85\x96\xe3\xfd\x92\xb5\x29\xed\x97\x8f\x9f\x9b\x9e\x7c\xba\xba\xbe\xba\xfe\x74\x75\x7d\xfd\xf1\xe3\xc7\x4f\xd7\xd7\x9f\xd2\xc0\x09\x4d\x84\xef\x97\xeb\x71\xe0\x64\x49\x9e\xcc\x8a\xcf\xa2\x57\xac\x92\x2c\x44\xf5\xb6\x63\x71\xdd\xcc\xcb\x73\x2f\xbf\xfe\x74\x75\x1e\x76\xe7\xe3\xc7\x7f\x1f\x52\x40\x33\xb7\x59\x7a\x93\x06\xe9\x61\x69\xb3\xba\x47\xc8\xbf\x1c\x61\x3e\x6c\xda\xa0\x2d\x67\xea\x5c\x9f\x8e\xba\x63\xa5\x41\x2b\x6c\x45\x60\x04\x76\x83\xdd\xdd\x60\xf8\x49\xf2\xff\x49\x3f\x3f\x0e\xfd\x54\x16\xcc\x4f\x1a\xba\x8c\x86\x66\xb5\x7f\xc3\x4f\x2a\x2b\x64\x91\x95\x43\xc9\x4f\xfa\x09\xc7\xdd\x7c\x09\xae\xfd\xba\xfb\x33\xfe\x6c\x83\xb2\x51\xe0\xee\xcd\x3f\xef\x9e\x20\xa6\xe3\xb5\x44\xe4\x55\x09\x95\xeb\x23\xaa\x39\x89\xa6\xb0\x7c\xd4\xa1\x78\x01\xe1\x93\x95\xff\x7b\xb6\xf4\xbf\xe7\xb1\xb4\xe5\x5c\x33\xf9\x1f\x4e\xd6\x3e\x03\xe4\x8f\xb9\x00\x3b\xae\xfc\xa1\x16\xa1\xee\xd6\x33\x0b\x71\xf4\xeb\xbf\x4f\x39\x8f\x96\x13\x7a\xb1\x5b\x8f\x7a\x92\x24\xf0\x55\x4c\xd4\x22\x8f\x1a\xe2\x5b\x3a\x05\x13\xcb\xa5\x2c\x68\xc5\x4e\x10\xfb\x4d\x4c\xe6\xbb\x32\xf6\x19\x81\xf2\x8e\xf4\xb5\x1b\x93\x97\xe5\x5f\xce\x09\xb0\x9a\xeb\xa1\x5d\x7f\x69\xea\x21\xf2\x7b\x9a\xa0\x9e\x26\xce\x13\xeb\xfc\xdd\xc6\x76\x82\xa4\x0e\x87\x55\x57\x79\xf3\xa8\xd2\x2c\xb1\x8f\x8d\xea\xf7\x1a\x48\x09\xfd\xa8\xef\xe5\xaf\xef\xd1\xf3\x22\x71\xca\x55\xf8\xa0\x3a\x87\x4a\xff\xa8\x3a\xea\x58\x3f\x40\x4e\x82\xbd\x68\x34\x7a\xbf\x72\x3e\x9e\x6b\xda\x90\xd0\x24\xc9\x90\xbf\x7f\x7b\xdb\xb9\x8c\x54\x76\xf5\xdf\xaa\x30\xa0\x9d\x2d\xa0\xf7\x7b\xa8\x8b\x1f\x92\xad\x0f\xa4\xf7\x4f\xd6\x3e\xd9\xbb\xdf\x9c\xb5\x1b\x5d\x8f\x4a\x6e\xca\xcc\x82\xbf\x12\x93\xdf\xbe\x87\x41\xf2\x93\xd1\x7f\x32\xfa\x8b\x47\xf5\xfb\x32\x7a\x9e\xcf\xff\xb8\x6c\x4e\x10\x2f\xe2\x72\x82\x20\x88\xf7\x63\x73\x34\x75\x6f\x31\xf2\xab\x95\xf9\xee\x1e\x7b\x6b\x6d\x06\x48\xa8\x3f\x37\xc5\xe9\xa5\x12\x34\x5e\x44\xb6\x97\xdd\xcf\x26\xcd\x38\x9e\x5b\x8c\xcc\xfb\x75\xe1\xe5\xc5\xc4\x2a\xe6\xa8\x37\x5f\xe6\x9e\x05\x8b\xf9\xe6\x4b\xe6\x59\xee\xfa\x3c\x05\x5e\x68\x64\x55\x6c\xf4\x7b\xb8\xe4\xaf\x9e\xe1\xdb\x3f\xd0\x0c\x1f\x5b\x07\xbf\xf7\x5c\x5f\x20\x69\xd2\x1d\xdf\xbf\x78\xaa\xdf\x6f\xb6\x5e\x22\x27\x9e\x8c\x0a\xee\xb9\x14\x97\x87\x49\xf7\x14\xc2\x77\x0e\x91\x1e\xed\x0f\xa5\x5e\xec\xe6\xf7\xf1\xe9\x04\x86\xef\x11\x92\x7a\x63\xa0\xf1\x6f\xa7\x97\xe3\x2f\x1b\xf1\xcd\x17\x76\xec\x1d\xef\x93\x9f\xac\x7b\x91\x79\xb2\x0c\xb2\x62\x61\xc1\xfa\xcf\xfc\x4b\x85\xa0\x59\x85\x65\xec\x15\xf5\xd7\xaa\xe0\x79\xe3\xe4\x4d\x8c\x5a\x27\xdd\xed\x42\x89\x4f\xb4\xf9\x83\x6f\xbc\xfc\x0c\x92\x9e\xea\xd3\x1f\xda\xc9\xba\x09\xe2\xc2\xcb\x62\x0b\xa2\x5f\x0e\x49\xf9\xcf\xe2\x6e\xed\x0d\xf2\x8f\xea\x77\x9d\x5c\xa9\x3f\x78\x24\xf5\xee\xf6\xb6\xfb\xc2\x28\x4b\xf7\x5d\x84\xf5\x93\x61\x96\x3f\x85\xef\xf5\xdc\xbc\xfe\x48\x9e\xc1\x1e\x35\x7f\x07\x43\x77\xcb\x3a\x3f\xa6\xc5\xfb\x1e\x26\xe2\xbf\xda\x46\x44\x95\xfe\x75\xc6\xb8\x2d\x02\x2f\xdb\x33\x2a\x4f\x26\xe6\x6e\x13\xef\xf6\x21\xef\xe7\xe5\x1d\x09\x9e\x97\x9a\xa4\x8b\xd4\xcf\x2c\xd7\x9b\x24\x30\x70\x8e\xf3\xf0\xb6\xd5\xa2\xc4\xad\x96\xc9\x8a\x17\x16\x3c\x26\x90\x13\x34\xf6\xa1\x36\x12\xc7\x96\x33\x0f\x62\x6f\x92\x25\xb3\xe0\xc4\xa1\xab\x6d\xf5\x24\x7f\xae\x4a\x35\x31\x49\x94\x2e\x0a\x2f\x13\xad\xc8\xdb\x26\x98\x7f\xf8\x97\x93\xc4\x8e\x55\xa0\xe9\xb9\xb9\xfe\x74\xb5\xbf\x14\x55\xda\xda\xf5\xc7\x4f\x57\xd7\x37\x4f\x6f\x81\x57\x67\xce\xb4\x1c\x91\x6a\xb5\xaa\x0e\x4c\x16\xee\xcd\x22\xf7\xb2\xa7\x9a\xc1\x20\x5e\x3c\xbe\xcc\x9e\x68\x4e\x79\x4c\xac\x3c\x5f\x25\x99\xdb\x5b\x14\x73\x2f\x2e\x82\x2d\x5d\x17\xd9\xc2\x7b\x46\x20\x20\xa7\xf4\x22\xdd\x97\x36\x99\x96\xe7\xc5\xe5\xe1\xe7\x79\xa8\x3b\xe8\x5b\x81\x94\x44\xde\x97\xdd\x8c\x7d\xf9\x9c\xe7\xf3\x2f\xd6\xa2\x98\x27\x59\xb0\xf1\xdc\xff\x84\xa8\x03\xcf\x27\x20\x6c\x01\x87\xde\xba\x6f\x15\xd6\x11\x0f\xb4\x73\x47\x9f\x55\xbd\xcd\xe7\xbc\x8f\xd1\x7c\xfe\xfd\x5a\x1f\xe5\x25\x41\x94\x26\x6d\xf5\x22\x62\x0f\x22\xcb\xf7\x64\x6f\xe6\x65\x5e\x7c\x22\xb5\xf8\xa8\x7e\xb9\xce\xf9\xbc\x12\x2f\xb2\xe7\x0e\xac\x67\xed\x81\x64\x36\xab\xab\x0f\x98\xd1\x73\x95\x2b\x21\xf8\xe1\xdb\xcd\x08\x8c\x9f\xab\xbb\xdc\x49\x5a\x68\x15\x5e\x5e\xbc\x4e\x43\x25\x79\x3f\xc8\xc3\xe7\x87\xee\x64\x9e\x55\x78\xf7\x69\xa3\x15\xd8\x2c\x89\xaa\x1c\xf5\x67\xfa\x59\x1d\xb8\x75\x2f\xc2\x72\x75\x75\x94\x77\xac\xd6\x0a\x6c\x92\x79\x51\xb0\x88\xfe\x33\x92\x95\x57\xfb\xba\x2f\xa2\xa3\xb8\xd2\x93\x17\xd1\x51\x65\x29\x4c\x2e\x32\x96\x7f\xcb\x24\x83\x27\x4d\x93\x0a\x13\x8f\x0c\x87\x99\xe5\x78\x17\x86\x4c\x9a\xcf\x05\x0b\xd9\xb2\x50\x90\x92\xb8\x89\x03\xe7\x92\xf4\xa8\x17\x58\x8f\xad\x36\x41\x64\x65\xeb\x8b\xc4\xfa\xb6\x51\x90\xbe\x70\xcc\xed\xcf\xe5\xf2\xfb\xea\xd4\x5c\x04\xa9\x53\xe2\x7e\x81\xb8\xbe\x7a\xe5\xe4\xec\xb5\x7f\x41\xa8\xea\x64\xfb\xdf\x25\x7c\x75\xea\xf3\x8c\xc7\x77\xb2\xf3\xaf\xa1\x92\x7d\x00\xfb\x11\xa5\x97\x47\x38\x4e\x42\x3d\xa4\x8d\x34\x48\x7f\x9b\xf9\x68\x0b\x19\xea\x05\xe1\xab\xe7\x3e\xaf\x9b\x87\xab\xdf\x7f\x1b\xfa\xdc\xe7\x15\x93\x7b\xf5\x43\xcd\xc3\x6b\xe3\x43\xe7\x3e\xcf\x1b\x7b\x87\x9f\xa7\x8d\xbf\xd7\xc3\xbf\xac\xe6\xf3\xd8\x9f\x86\x73\xbe\xf4\x34\xe4\x73\x46\x85\x1b\x58\x7e\x9c\xe4\x45\xe0\x5c\xe6\x8d\xd9\x49\x52\xf4\x77\x6d\x9e\x37\x2e\xbc\xed\x09\xa5\x4b\xfc\x9b\xca\xd2\xd2\xb2\x60\xcf\xc7\x6b\x6e\x4c\x39\x70\xf4\x4e\x9d\x07\x2b\x9d\xbe\x2f\x4f\xdb\x20\x2f\x9a\x20\xef\xb1\xf0\x62\x64\xd7\x5e\x34\x3d\xdb\xda\xdf\xc5\x5e\x71\xf2\xe7\x8c\xdb\xab\x57\xdb\x2b\x6d\x47\x62\xc7\xd8\xbd\xf2\x46\x1a\x66\x37\xaa\x0b\x2d\x99\x26\xca\x43\x2f\xf2\x22\x89\x94\xf2\xac\xdd\x4b\xda\x0e\xac\xd8\x85\x5e\xd6\x0e\xde\x6c\x2f\xb2\x79\x16\x80\xb5\x28\x12\xad\x0a\x73\x8c\x83\x38\x69\x41\x79\x81\x2d\x96\x7b\x45\x11\xc4\x65\x5e\xfc\xff\x5d\x28\x6d\xcb\xd0\xa1\xe7\x14\x9e\xab\xb4\x1a\x5f\xee\xfd\x56\x47\x12\x4b\xca\xb7\xad\xdc\xfb\x7a\xfb\x4b\xcd\x00\xd5\x5f\x6a\xa2\x94\xc7\xed\x7e\xb9\x76\x08\x80\xf1\x34\x0e\xbd\x5e\x32\xbc\xfe\xf8\xe9\xba\x47\xcb\x32\xa3\xdc\x6b\x32\xcd\xf0\xfd\x7f\xfe\x4f\x5d\xff\xea\xc6\xbd\xfa\xdf\x05\x86\x75\x9c\xf6\xbf\xd7\xd7\xd7\x9f\x6a\xe8\x6d\x5e\xda\xbb\x04\xe8\xba\xdc\xce\xba\xfe\xf8\xbf\xf1\xf5\xa7\xeb\x5e\x7f\xcc\x8b\xbd\x09\x4f\x8f\x78\x46\x54\x69\x46\x56\xe9\xfb\xf1\xf8\x5e\x14\x7b\x63\xe6\x95\xb8\x9e\xb8\xe7\x67\x1f\xb5\x3c\x7e\x5f\xc4\x67\xaf\xea\x39\x44\x5b\xe1\x7c\xf5\x64\xee\xae\xe8\xd9\x07\x6c\x6a\x32\x43\x8f\xee\xb5\xfe\x1b\xc6\xb0\x77\x3b\xcf\x31\x78\x85\xa1\x27\x3d\x7a\x08\x94\x9e\xca\x88\xbd\x37\x8c\xe1\xf4\x0d\x3e\x7b\x08\x29\x7e\x34\xe2\x45\x8e\x21\x18\x45\xbd\x97\x7b\x1c\xd3\xa3\xe9\x7b\xed\xf5\x38\xcf\x5f\xf4\xb3\x87\x96\x1e\x69\x8a\xca\xc8\xe3\xfe\xb8\xc6\xf7\x3a\x6c\x47\xf7\xee\x9c\x46\xa2\xf4\xdf\x07\xcb\xf6\xf8\xfa\x39\x34\xf4\xbd\xc8\xf2\x1c\x60\x64\x85\xbf\x17\xdf\x8c\x6c\xef\xa0\xfb\x39\x94\x88\x0c\x95\x49\x8f\x7e\x25\x2d\x9e\xba\xdc\xe7\x14\xaa\x49\x4f\x66\x44\xb5\x7f\x3f\xee\xf1\x6f\xe0\xde\x33\x17\x00\xed\x21\xec\xf7\xd4\x1e\xd5\x53\x1a\x42\x7c\x3d\xb2\x13\x17\x04\xed\x23\xa2\xd4\xfb\x21\x23\xbe\x4d\x4e\x1c\x5c\x1e\xb4\x87\x80\x1d\x69\x8c\xa8\x52\xbc\xca\x8f\x7b\xdc\x2b\x07\xb1\x7f\xd3\xcf\x3e\xf8\xc9\xdb\xba\xbe\xbb\x00\xe8\x00\xac\xc2\xc8\x80\xa7\x99\x89\xcc\x8b\x34\x3f\xe9\x8d\x5e\x8f\xe0\xf8\x62\x83\x3d\x54\x5c\x4f\x65\xf4\xde\xb4\xa2\x2a\xe5\x75\x48\xf6\x2f\x0f\x3a\x05\xbe\xd1\xab\x9c\x7c\xaf\x4d\x5e\x4f\x4e\xe7\x6e\x10\x3a\x85\xf2\xbd\x66\xf0\xec\x3d\x43\x7b\x48\x87\xcc\x14\xf4\xb4\x91\xda\x17\x15\x45\x63\x59\xde\x78\x1d\xb2\xa3\x9b\x84\x4e\x22\x99\xc8\xcc\x9b\x31\x54\x7b\x40\x7b\xe0\xc7\xfd\x31\x2b\xdf\x8b\x2a\x23\xf6\x35\x79\xf4\x3a\xf0\xfb\xf7\x0f\x1d\x80\x57\xfa\x8c\x08\x78\xf9\x5e\x1c\x33\xaf\xd5\x03\x07\x77\x13\xed\x21\x98\xdc\xcb\x6a\x6f\xd4\xa3\x69\x46\x51\x4a\x42\xe3\xfb\xaf\x24\xe8\x53\x17\xdc\x9c\x40\xf5\x36\xde\xdf\xbf\xfb\xe6\x04\x78\x66\xc4\x80\x9e\xca\xf4\xdf\x63\x2c\x87\xd7\xe3\xec\xa1\x93\x27\x2c\xd3\x53\x91\xd5\xf3\x3a\x14\xbb\x5b\x73\x0e\xc0\xbe\x41\xe8\xd6\x37\xe5\x1c\x00\x7c\xab\xb1\xd2\xbe\x42\xe7\x08\xf4\x1b\x4d\x94\xbd\xcb\x75\x4e\x00\x7f\x07\xc3\xe4\xc4\xe5\x3b\x27\x10\xbd\xd1\x1c\x39\xb8\x98\xe7\x00\xc1\xfb\x18\x21\xc7\xf7\xf6\x9c\x76\x96\x7a\x94\x26\xf6\x47\xcc\x3f\x11\x98\x93\xbe\x4f\x7d\x27\xe9\x75\xd9\xb8\xed\xed\x9c\x6d\xb9\x77\x97\x69\xab\xdd\xb8\x3f\xae\xc8\xf5\xfa\xfa\x8b\xef\xc5\xde\xd2\x8a\xdc\xe8\x1f\x91\x85\x8c\xa5\xff\x10\x18\x81\x63\x24\xd6\xfd\x8c\xd7\xb5\x47\xf7\x74\x4f\x7d\xd1\x3a\x9e\xcb\xb6\x68\x0d\x5b\xd1\x28\x85\x96\xf9\x09\x02\xfc\x12\xa9\xd2\xbe\x87\xe7\x97\x8f\x9f\xdb\x7f\xf2\x6e\x7b\xf5\x5e\xaf\x85\x0f\x7b\x1f\x5b\x91\xd7\x82\x8c\xfe\x39\x74\xaf\xe9\x18\xe2\xb6\xd2\x2b\x3c\x85\xc2\x1d\x4e\x9e\xbb\x9c\xe6\x8f\x0c\xdf\x07\x18\x3b\xb6\xf4\x2e\xee\x31\x6c\x6c\xea\x5d\x8c\xf6\xd3\xdc\x8d\xc0\xad\xcb\x81\x85\x49\xf7\x0a\x9b\xee\x65\xa2\xda\x83\x32\x14\x58\x59\xe9\x2d\x4d\x0e\x10\xa3\x8e\xb0\xb4\x3b\x32\x61\xae\xc9\xb5\x4d\x90\x98\x3d\x98\x0e\x3d\xce\xdc\x18\x84\xbb\xb6\x3b\x6e\xe4\xac\x7b\xcb\x53\x70\xc6\x6a\x6f\x25\x68\xa6\x22\x6b\x9a\x3f\x22\x64\xe8\x06\x55\x7b\x37\x72\x96\x6e\xc4\xae\x4f\xc1\x41\xbf\xd3\x7e\xf2\xc0\x73\x2c\x61\x13\x30\xe4\x69\x01\x3a\xb1\xb0\x74\x1e\x12\xdf\xe4\x78\x9c\xe7\xc0\xda\x89\xc8\xf5\x90\xc6\x36\xe3\x7e\x48\xdc\x2b\xa1\x6f\xc6\xc2\xd2\x56\xa8\x70\x1a\x81\x85\x1b\x60\x7f\xb7\x3b\x14\xb4\x1f\x12\x5f\x0a\x65\x7a\xdc\xef\x75\xc7\x0a\xc5\x48\x90\xd4\x65\x20\xa8\x8a\x46\xde\x1b\x18\x2e\x68\x18\x4e\x01\x46\xe4\xef\x03\x8a\x99\x1a\xf2\x7c\x1a\xb1\x1b\x53\xa1\xa0\x1d\x9b\xa9\x13\x91\x0b\x5b\x07\x0b\x97\xa6\x08\xd3\x10\x36\x96\x4e\x2e\x78\x0e\x4f\x1d\x02\x9f\xbb\x9c\x98\xf0\x7e\xba\x46\x73\x6b\x06\x55\x7f\x47\xc4\x63\x3a\x0d\xc8\xb5\xc3\x61\x4b\x03\x27\xc3\x69\x90\x0c\xe9\x58\x58\xa1\x3a\x23\x1d\x16\x0e\x47\xae\x5d\x9a\x4a\xdc\x81\xbc\x72\x36\xc9\x72\x44\xc8\xf9\x28\x32\xa1\xc9\x91\xeb\xa9\x41\xad\x6d\x22\x85\xd3\x8e\xb4\xb0\x3b\x42\x3c\xea\x50\xf8\x34\x20\xa1\xc3\x81\x7c\x84\x0b\x92\xaa\xe0\x03\x8d\x71\x0a\x05\x03\xe6\x48\x03\x92\xac\xad\x0a\x71\x95\x22\x5c\xfe\x48\xc1\x53\xdb\xa0\x96\x4e\x2c\xf9\xd6\x40\xc6\x9c\xc1\xf8\xeb\x68\x4d\xae\xa6\xba\x98\x4d\x75\x17\x3a\xeb\x6e\x61\xe9\xe2\xda\xee\x88\x4b\x33\x96\x16\x53\x82\x2c\x46\x44\x01\x3d\x63\xbc\xb4\x75\xf8\xe0\x44\xe4\xc6\x26\x4c\x6c\x14\xb1\x9b\xe9\x2a\x19\x9a\x11\xb9\xe6\x39\x16\x73\x39\x50\x38\x03\xc9\xb7\xf4\x5b\xdf\xdb\x30\x8b\xd1\x03\x20\xef\xd7\x54\x68\xaf\x12\x9f\x1f\x6c\xe9\x2a\xb5\x63\x11\x9b\xea\x8f\x39\xcf\xcd\x31\x77\x40\x6d\xee\x83\xbb\xa5\xc9\xad\x16\x66\x04\x42\xbb\x23\xcc\x9d\x81\xb0\xb4\x22\xf0\xe0\xd2\xdd\xa5\x13\x39\x4b\x67\x00\x82\x11\x01\x56\xa6\xbe\x5a\x9a\x06\x05\x6d\x1a\x5f\x9b\xfa\x23\x9c\x1a\x22\x1c\xe9\x8f\x73\x97\x03\x1b\x97\xc6\x3a\xa3\xa8\xbb\x9c\x1a\xc2\x83\x45\x77\xcb\x71\x0a\xc1\xd4\x9f\xc6\x02\x9c\xea\xf9\x90\xa7\xa9\xd4\x0c\x28\x5b\x5f\xf7\x42\x8f\x68\xfa\x2a\x93\x3c\x8d\xe7\x2e\xdd\xc3\x79\x16\x77\xef\xd7\x14\x66\x71\x60\xc1\x0f\xc4\xdc\xd4\xc1\x8a\xef\x33\xab\xfb\x35\x05\xed\x81\x08\x79\x0e\xdc\x5a\x86\xe4\x8f\xd5\xdc\x37\xa3\x70\x68\x72\xe4\xc2\x94\x92\xe1\x94\x60\x31\xbe\x7f\xbb\x34\x0d\xf9\x61\xd4\x41\x63\xec\xae\x4d\x34\xb7\xeb\x6e\x38\x22\xd8\xaf\xae\x21\xc0\x51\x2c\x40\x87\xbb\xf3\x27\xfd\x55\x2c\x6b\x24\x27\xac\x52\x7b\x6a\xa4\xb8\x13\x69\xc5\x94\x78\x4c\x0d\x29\x5d\x4c\x75\x1c\x4e\xf4\xba\xbe\x2e\xe6\x96\x94\x06\x68\x7c\xae\x21\xe4\x13\x7d\x37\x4f\x0e\xc7\x3e\x58\x04\x1b\x9b\xc6\x78\xb1\xbf\x16\xe2\xd2\x56\xc8\xae\xab\xe3\x35\x7e\x72\xee\xc5\x60\x6d\x2a\xf8\x83\xcd\x85\x43\x53\xef\xce\xa7\xd1\x23\x34\xfb\x78\xd7\x34\xc6\x43\xb3\x43\xc5\x53\x62\x0e\xa7\x44\x4e\x7a\x3a\xd8\xd0\x7e\xd3\x27\xf0\x60\x77\x04\x78\xd8\xa7\x29\x41\xae\xcd\xf7\xea\x93\x2e\x2e\x9d\x48\x7b\xb2\x4f\x76\x74\x37\x44\x73\x45\xfb\xe9\xc6\xd4\xf1\xb9\x1d\xb1\xb1\xa9\x50\xd1\x94\x20\x17\x2e\x07\x6e\x5d\xba\x57\x4c\x95\x5e\x81\x78\x6d\x6a\x08\x33\x9b\x23\x63\xa3\x23\xf9\x7c\x40\x12\x53\x83\x5f\xa2\xbf\x47\x44\xba\x74\x0d\x61\x31\xd5\x57\xc9\x68\x7d\xfb\xeb\x50\xbd\x0b\xe8\x08\xcf\x4c\x0e\xae\x79\x1a\x5f\xf1\xf4\xc9\xba\x43\x3a\x32\x97\x4e\x40\xcd\xdd\x81\x0c\x6d\x83\xc2\x78\x0e\x2e\xf8\x41\xfe\x38\x0a\x6e\xf1\x19\x5a\x7f\xee\x6e\xe8\x19\xa0\xe0\x69\xac\x68\xc6\xe0\x44\x60\x65\x6f\xf0\xb5\x35\x00\xe9\xc8\x10\x12\x53\x5f\x15\xe2\x1a\xdf\x98\x86\x40\x98\x06\x5f\x38\xd1\x1c\xb7\x14\x7c\xe9\x0c\xe4\xd4\x26\xba\x73\x44\xc3\x0e\x87\x6f\x78\x1a\xef\x22\xf8\x4e\x47\x9e\xdb\xdc\xca\x9f\x12\x8f\x73\x5b\x67\x09\x9e\x63\xbf\x3a\x04\x78\x40\xb4\x30\xd5\xf1\xb9\x1b\x50\x73\x2f\x16\xe1\x74\x8d\x17\x36\xd1\x4d\x5d\xae\xe4\xb9\x1d\xbd\x28\x54\xb3\xde\x85\x39\x10\xc3\x6d\x99\x21\xc2\x69\x07\xac\x2d\x43\xee\xf2\x9c\xbc\x9c\x12\x05\x74\x02\x6a\xe9\x70\x60\xe1\x74\xc4\x7c\x64\x50\xd0\x89\x56\xbf\x09\xaf\x38\x31\x55\xf0\x34\x0e\x79\x5a\x4a\xea\xef\x8f\x53\x85\xba\xe3\x39\x77\x6d\x1a\x3d\xdf\x88\xd8\xc0\x89\xa5\x62\x28\xa5\x0f\x53\x43\xf2\x27\x01\x09\x5d\x6e\xbc\x74\x3a\x70\x33\xed\x20\xd9\x27\x2d\xc7\x7d\xa6\x30\x39\xb8\x99\xea\x42\x5e\xd2\x76\x47\x80\x25\x9d\x45\xd3\x36\x7f\x65\xa6\x21\x2c\x4c\x7d\xb5\x28\xe9\x16\x90\xab\xa9\x21\x63\xe8\x37\xbe\x8f\xf9\x33\x9a\x0c\x2c\xfd\x76\xe9\x0e\x04\xdc\x94\x52\x56\xc5\xa6\xc3\x06\x07\x5a\x53\xf4\x1d\x8d\x99\xf6\xd3\xc8\x32\x04\xe8\x12\x6c\x6e\xd3\xf8\x83\xad\x4b\x48\x76\xce\x4d\x4e\xaa\x64\x73\x1f\xc3\xc4\xfe\x78\xe9\x72\xe2\xaa\x6c\xc7\x81\xb5\xad\xb3\x8b\x52\x3f\x46\x48\x1f\x80\x0e\x92\x73\x23\x5d\x2c\x90\x6e\x1b\xe9\x6c\x68\xd2\xf8\xca\xee\x08\xd8\x44\x95\x30\x91\x26\xb1\x69\xa7\x87\x60\xae\x4c\x43\x28\xa6\x7a\x17\xda\xb1\x34\x34\x23\xb8\x36\x0d\x17\xd1\x43\x31\x25\xf0\x10\xc1\x9e\xea\x72\x38\xaa\x64\x36\x29\xf6\x25\x7c\xd4\x91\x1f\x1c\x04\xb3\x5c\x3f\xc4\x1f\x60\xe1\xf6\x9e\xed\xef\x7a\xfc\xc0\xaf\x4f\xb7\x4d\x86\xa6\x31\x2f\xfb\xc6\x33\xac\x05\x80\xc0\x1a\x98\x38\x56\x71\xc0\x18\x58\x97\x52\x35\x8d\x14\xea\xdf\x25\xec\x71\x02\x34\xf9\x5e\xd2\x70\x96\xf6\xd3\xb9\x17\x50\xb9\x4d\xb8\xa9\x1d\xf4\x0a\xab\xd4\x5d\x6c\x6e\x73\x64\x67\xa4\x77\x97\x23\x43\xc4\xa7\xb1\xf8\xe0\x44\x70\xe5\x72\x70\x69\xc7\xe3\x21\x1d\x8b\x5d\xa7\x23\x43\x5b\x17\x31\x9b\xa6\x36\x2e\xc7\x22\x3d\x17\xda\x84\x98\x99\x06\xbf\x70\x08\xb0\x76\x23\xf8\x60\x4a\x25\xec\xf9\xb4\xc3\xfb\x88\x47\x91\x5e\x19\x29\x78\x29\x97\x78\x9a\x0f\x87\x03\x11\xa2\x79\x35\x95\x5e\xec\x74\x56\x8b\x61\x40\xde\xcd\x68\x07\xd1\xc1\x1d\x1f\xc8\x94\x84\x0b\x8a\x0c\xc4\x09\x00\x42\x5f\xd6\x20\xc3\x07\x61\x40\xfb\xa9\x28\x33\xb8\xa0\x6a\xec\x40\x56\xb1\x40\x18\x14\x8a\xc6\x40\x51\xd2\x5c\x56\x50\xb4\xe5\x30\xc6\x96\xc2\xa0\xd8\xd5\x51\xc6\x0f\xc3\x80\x24\x79\x3f\x6d\xfa\xe7\x3b\x03\x90\xdb\x74\x2f\x10\x18\x9c\x51\xb5\xaa\xed\xc9\x72\x56\x90\x14\x0d\xa7\x64\x4c\x0b\xe8\x68\xcb\x7b\x2b\x17\xe9\x3e\x9a\x0f\xe5\xf0\x11\xc8\x5a\x57\x93\x42\xa8\x6d\xeb\xed\xe6\x73\xe9\x1a\xd2\xf0\x40\x5e\x95\xfc\x60\x46\x8f\x38\xa2\xb1\x69\x04\xb1\xd1\x2a\x2d\x2c\x42\x4e\x9d\xa0\x57\x38\xf4\x9e\x5d\x11\xd9\x03\x50\xd2\x13\x1d\x89\x73\x97\xee\xfd\x7d\x44\x00\x6c\xba\x6e\x7e\x97\x03\xcb\x90\xf6\x61\x1d\xf3\x91\xad\x68\x5d\x09\x00\xd9\xa5\x7d\x78\x5f\xce\xfb\xa0\x59\x3b\x69\x48\x03\x79\x6e\xae\xa9\x5f\xed\x0e\x58\xdb\x11\x9b\x9b\xbd\x44\xd0\x6a\xde\x31\x59\x92\xb3\xf4\x47\x0c\xcd\x87\x81\x89\x13\x55\xc3\xc8\xa9\x21\x20\x7b\xcf\x96\x43\x38\x06\x0c\x50\x0c\x29\x11\xd4\x88\x2d\x4c\x85\x2a\x6c\x42\x4e\xcd\x38\x1c\xd2\x5a\x6d\x2b\x71\x8d\x3c\x7e\x0c\x69\x1f\x2a\xa6\x8e\x2f\xdd\x08\xcc\x5c\x02\xe6\x26\x27\xce\x9d\x48\xf2\x8d\x55\x22\x68\x11\x28\xec\x8e\x09\x79\x56\x16\x54\x0d\xa8\x80\x61\x45\xad\x97\x0c\x75\xcc\x14\x54\x56\x66\x35\x88\x0d\x69\xad\x3b\xb7\x75\xcd\x47\xfc\xe8\x44\x10\x33\x01\x89\x4d\x75\xe7\x2c\x2e\x57\x7f\x84\x3c\x2d\x8f\x55\xcc\x1d\x29\x5a\x97\xe1\x39\x18\x79\x1c\x8b\x9b\x1c\xc4\xf6\xbf\x9b\x73\x24\xef\xd0\x98\x54\x1c\x68\x1a\x0b\xb4\xd6\x98\x22\xbb\x23\x74\xa6\x86\x80\x60\x8a\x53\x43\x7e\xb0\xe8\xde\xaf\xb4\x0f\xa5\x92\xc7\xfa\x7c\x77\xfc\x30\x5e\xd1\x21\x98\xc8\x7e\x32\x34\x75\x31\xb1\xd7\xbd\xe0\x04\x4d\x90\x02\x63\x8e\x81\x06\xee\x01\x23\x08\xa0\xa6\x55\x3e\xd8\xae\xe7\xc6\x33\xc4\x07\x9b\xe8\x46\x96\xee\xec\xad\x27\xed\x1f\xcb\xd1\x72\x6d\xe8\xb2\x0d\x66\xea\xd8\xfe\xfa\xc7\x22\x74\x62\x33\x9d\x12\xda\x1e\x0d\x00\xbd\x9b\xba\x2c\x36\x94\x74\x13\xad\x27\x69\x72\xe4\x83\x45\x80\x75\xab\xfe\x50\x8b\xc0\xa3\xab\x23\xb9\x35\x3e\x5d\xde\x91\xe7\x4e\x2c\x8f\x2d\x1d\x4f\x5d\x06\x22\x1d\xbe\x76\x23\x36\xd7\x08\xf0\x30\x51\x7b\x43\x1a\x16\x2a\xd2\x95\x96\x2e\x22\xdb\x43\x31\x0d\x11\x9b\x1a\x02\x56\x95\xb3\x1b\x3a\x04\x0b\x37\x82\x6b\x9b\xe8\x22\x9d\x8b\xc9\x11\xcc\x4d\x15\xab\xed\x2e\xb1\xeb\x10\x88\x76\xcd\xd4\xdc\xa7\xef\xa1\x6c\xcc\xe1\x14\xaf\x61\x0d\x04\x38\x51\xb0\xa5\x6b\x88\xeb\x11\x21\xa4\x76\x40\xee\x78\x36\xc2\xfc\x91\x3e\xf5\x05\xed\x76\xaf\xcd\x44\x21\x71\xa7\xc3\x2f\xa7\x11\x5c\x8c\x88\x2d\x0f\xaf\x5d\xfd\xd6\x37\x7a\x89\x5f\xca\x7d\x02\x3c\xb8\x86\x90\xba\x83\xb0\xb0\x3b\xc8\x5e\x78\x9c\x4f\x23\x90\x4f\x74\x39\x75\x08\x36\xb0\x39\x6d\x5b\x17\xf5\xdd\x89\xe1\xca\x26\xe0\xc2\xa5\x7b\x4b\x54\x7f\xd4\x91\xc3\x91\x5e\xf9\x55\x23\x5d\x48\xdd\x46\x3f\x71\x52\x81\xec\x71\x3b\x96\x0a\xb4\x3e\x3c\xbb\x1a\xf2\xa5\x1e\xe9\x42\xb7\x8f\x27\x76\x47\x6c\xfd\x36\x5f\x3a\x1d\xb9\x96\x8d\x80\x6f\xd5\x2d\x69\x5f\xd0\x5a\xfd\x8d\xb0\xfa\x3b\x4e\x9c\x94\x29\x84\x99\xbb\xc8\x8f\xe1\x50\x5f\xba\x25\x6d\xdd\x07\x97\xd5\x6b\xf0\xba\xc1\x49\x59\xf4\x75\xd4\x31\xe7\x4e\x40\xe6\x96\xce\x6f\x61\xdd\xc7\x49\xbb\x3f\xa7\x6c\xb2\x6d\x3b\x9b\x70\x96\x56\x44\xe2\x4e\xd4\x9d\xdb\xfd\x74\x6d\xaf\x29\x87\x0e\x7a\xfe\xc8\x98\xfa\xa3\x8e\x80\xdb\x47\x75\x92\xa5\x13\x83\xc5\x88\x78\x5c\x9a\x6b\x72\x2b\xab\xee\x63\x61\xd9\xee\x6b\x49\x47\x04\x3e\x9f\x12\xf3\xd4\x8e\xb4\xc2\xd2\xa5\xaf\x35\x7f\x15\x53\x5d\x4c\x2c\xbd\x0b\x47\x3a\x0c\x5b\xed\x4e\xc9\xef\x66\x1c\x0f\x97\xce\x2b\xed\xa7\xac\xc7\x81\x07\xad\x23\x2f\x9d\xfe\x19\xda\x44\x65\x74\x0f\xaa\xfe\x8e\x2f\x26\x3a\x9b\xbb\x04\xdb\x75\x56\x87\xbc\x72\x9e\xd7\x6a\x3e\x13\x1a\xff\xcb\x90\x52\x63\xaa\x77\x31\x53\x97\x69\x4f\xc5\x0b\x57\x7f\xc4\x2c\x05\xc7\x9d\x92\x77\xe5\xb9\x13\xb9\xd0\xed\x55\xf6\xd1\x4e\x0f\x55\xe3\x42\xb2\x7c\x64\x08\x2b\x3a\x12\xe6\x0e\xa1\x11\x22\xd2\xc3\x74\xef\x6e\xd2\x5f\x05\x02\xc3\x22\xbd\x78\x2f\x01\x4a\x90\x30\x96\x06\x5a\x97\x51\x19\x2d\xd8\xfa\x49\x04\xbb\xb6\xd7\xf8\xda\xa1\xc9\xb9\xc9\xe1\xa9\x1d\xe0\x0f\x53\x05\x0f\x5c\xbd\x1b\xda\x9c\xb6\x70\x90\xed\x15\xc1\x88\x67\x0b\x9b\xa7\xf1\x05\xd2\x95\x12\x10\xc4\x23\x78\x2d\x3b\x14\xd1\x40\xe9\x8b\xab\x53\x8c\xa7\xf1\x90\xef\xaf\xee\x26\xe7\xda\x9d\xe8\x87\x13\x61\xc5\x94\x60\x8a\x69\x0c\x16\x26\xf7\x08\x47\x31\x05\x6d\x29\x8d\x2c\x29\x7d\xb0\x38\xb2\x83\x6c\x1b\x2d\xe8\x3d\x8e\xfb\xbd\xd5\xfd\x03\xb3\x1a\xf7\x7b\x7e\x4d\x1f\x73\x27\xba\x2b\x9c\xb8\x77\xa8\x67\xf7\xe4\xb2\xad\xcb\x05\xea\x53\xe9\xd3\x84\xc8\x6e\x30\x15\x15\xeb\x6a\xb2\xd6\x65\x00\x10\xc6\x13\xc5\x09\x55\x4d\x16\x65\x28\x4c\x54\x28\xb3\x6a\x28\x03\x2d\x5c\xc5\x55\xdd\x4a\xe6\x37\x75\x6a\x5a\xab\xca\xb6\xf6\x0d\xb8\x07\x21\x44\x30\x45\x44\x93\x65\x5d\x86\xec\x4b\x40\x16\x54\xec\x76\xbf\x2e\x10\x26\x2a\xa3\x91\xa8\xcf\x08\x9e\x86\x91\x40\x0b\x45\x56\x0b\xc9\xb1\xac\xc1\x7b\x0d\x97\x29\x35\x14\x4b\x7c\x43\x6e\x27\x57\x86\x8a\xd3\xf8\x64\x8d\xcd\x51\xf3\x38\xc0\xdc\xd5\xf3\xba\x06\xcd\xc1\xf7\xd4\x31\x07\x3a\x84\xdd\x8b\xc9\x30\x66\x6a\x73\x1a\x79\x6e\x6d\xe8\x10\xdc\x9a\xba\xa8\x96\xb6\x28\x4b\xad\x4d\x15\x2b\x46\x1d\xb0\x71\x82\x4a\x16\x37\xf8\xf8\x81\x80\xfc\x96\x88\xa7\xc1\xfd\x5e\x9b\x73\xbc\x1b\x83\xc5\x4e\x16\x83\x85\x3b\x10\xba\x0e\x47\xa6\x76\x2c\xf9\x35\xfc\x4d\xa9\x5b\x30\x1c\xba\x03\x21\x9d\x76\x44\xd6\x43\x7e\x67\x2c\xa6\x36\xb1\x93\xd9\x16\x47\x6e\x5c\xae\x91\xe5\xfe\xf6\x77\xbb\xb4\x35\x7a\x50\x0d\x6a\x39\xa8\xe0\x6b\x5b\xa9\xbf\xeb\x98\x3f\x8e\x9c\x23\x59\x5f\xad\x03\x05\x6d\x35\x69\xe4\x5e\x68\x2b\xdd\x95\xa9\x63\x47\x72\xd8\x34\xe4\xce\x7d\xd0\xfa\xbe\x95\xd5\x72\xcb\x46\xae\xf1\x69\x68\x2d\x64\x64\xf7\xc1\x23\x5c\xdb\x3a\x26\xd2\xe1\x98\xac\x77\x43\x60\x08\x79\x0d\x87\xd3\x42\xf2\x1e\x30\xe0\x5e\x66\x81\xa2\xd2\x4d\xdd\xc7\xa5\x49\xb8\x48\xdf\xf6\x6d\xa2\xbb\xb1\x89\x47\xd8\xcc\xa5\xca\x91\xb1\xca\x01\xc2\xd4\x57\x3e\x40\x7c\x1c\xc1\x85\xd9\xe8\x02\x40\xad\x2d\xc3\x9c\xbb\x1c\x18\x99\x06\x7c\xba\x3f\x40\x44\x3a\xe6\xc1\xd4\xf6\xed\x8b\xba\x5f\xaa\x8a\x03\x45\xc2\x00\xab\x42\x53\x40\x7d\x54\x11\x9f\xd1\x07\x6d\x81\xb0\x44\x3a\xbe\xf4\x07\xf6\x79\xac\x99\x4f\x15\xe9\x9d\x29\x01\x14\x34\x06\x45\xef\x22\x9f\x67\x81\x78\xe0\xb8\x0d\x10\x54\x28\x6a\x92\xd6\xed\xcb\xd2\x7b\xeb\x06\xe6\x3b\xe9\x86\xf3\x72\xaf\xd1\x13\x3b\xd9\xc7\xf6\x35\x48\xee\xfc\x32\x40\x0a\x72\x1f\x8b\x05\x06\xfd\x2e\xb0\x8d\x2c\x52\x34\x29\xa6\xc3\x4a\x8f\x18\x18\x2b\x29\x80\xec\xab\x0c\x64\x55\x28\xcf\x24\x0c\x28\x80\x25\xfb\x2a\x86\x8b\x2a\xd6\x9d\xa9\x21\x2b\xca\x6a\x09\xa3\xa5\x77\x1e\x85\x52\x37\xa3\xba\x0c\xf2\x3f\xf0\x89\x1a\x96\x7e\x2c\x82\xab\xa8\x80\xa4\x34\x06\xce\x9a\x7a\x06\x26\xb2\x5a\x09\x9b\x14\xd5\x32\xd6\x5b\xfb\xbc\x6b\x99\xd2\x42\x1c\xd5\x6d\xf0\xa3\x75\xd2\x9a\x7a\x25\x6e\xc5\x19\x4a\x20\x05\x5a\x08\x66\x48\xdf\x20\xbf\x59\x41\x74\xc2\x6e\xc7\xa6\xa8\x5a\xfd\x1b\x03\x19\x61\x95\x52\x3a\x04\x8a\x0c\x48\x4e\x63\x77\xe3\x52\x34\x09\xe1\xe3\x34\xe6\x6c\x5d\x55\x06\x82\xae\x68\x22\x6b\xe0\x94\xa2\x68\xdd\xbe\x02\x28\x4a\xdd\xe2\x31\x25\x0d\x8d\x37\x84\x7d\x19\xb4\xca\x2b\x38\xb4\xa2\x3d\x8e\x15\xad\x3b\x30\x30\x6d\x2d\x03\x52\x05\x0c\xa9\x20\xbd\x62\x60\x6c\x5f\xc2\x48\xd0\xea\xc3\xb6\xae\xac\x0a\xac\x86\xcb\x13\x2d\x64\x07\xb2\xb6\xad\x27\xc8\xb4\x33\x94\xb0\x47\xa0\xe1\xc8\xc7\x22\x45\x99\x15\x99\x1d\x9c\x12\x46\x5f\x65\x91\x3f\x06\x14\x55\x93\x55\x99\xd9\x96\xc5\x74\x28\x8e\x01\x10\x35\x19\x08\xb3\xb2\xac\x5c\xcb\x2e\xa7\x68\xee\x0c\x84\x40\xd1\x30\x38\x51\x1f\xd0\x78\xb6\xf5\x4a\xf8\x12\x46\xde\xcb\x21\x1c\x6c\xeb\x04\xa7\xfa\x80\xd6\x0d\xa8\x1a\xc3\xf6\x2b\x9a\xd8\x87\x51\xae\x17\x10\x25\x49\x13\x59\x61\x95\x32\x12\x90\x29\x29\x64\x55\x19\x90\x94\x84\x89\x13\x50\xd2\xc2\x76\xed\x77\xe5\xbb\xb1\x97\x71\x0f\xd4\x56\xc5\x70\x4a\xd1\xda\xf4\xf7\x38\x91\x30\x56\x53\x34\xf2\x7e\x14\xec\xc6\xaf\x31\xac\x82\x68\x4c\x66\x48\x51\xd2\xe0\x96\x66\x64\x8c\xd5\x64\xe0\x52\x3a\x20\x1b\x58\x2a\xd2\xb9\xdb\xdf\x35\x79\xa2\x6a\xac\xa0\xc2\x71\x4c\x87\x2e\x05\x18\x60\x48\x00\xce\xb4\x10\x34\x72\x69\xa0\x85\x24\xd0\xca\xb5\xdf\x96\xb7\x62\x1e\xae\xa2\xe2\x40\x6a\xf0\x29\x18\x30\x41\xc8\x02\x95\x95\x67\x1a\x2b\xb0\x72\x08\xa7\x08\x5f\xeb\x77\x49\x0b\x01\xa7\x00\xbf\xb6\x17\x76\x73\x82\xea\x69\x90\x42\x32\x71\xfb\x9b\xb0\x2a\x63\x29\xb3\xf6\x9c\x4e\x80\x20\xa1\xb6\x1a\x26\xcf\x64\xad\xab\x2b\x48\xa6\x85\x38\x5b\xd3\x44\xb9\x06\x07\xbf\xc7\x34\x14\x24\x03\x33\x59\x09\x20\x9b\x07\xa8\x35\x2e\x4e\xd6\x58\x0d\x00\x81\xd5\xd6\xce\x50\x83\x54\x13\xb3\x89\x5b\x71\x95\xb8\xf1\x9f\xf7\xe3\xf0\xdb\x18\xc5\xd6\x0e\xd9\xd9\x7a\xdd\x4d\x63\x23\x20\x99\x24\x60\x60\x22\x07\xce\x50\xc7\xc1\xc2\x32\x64\x97\x0e\xd9\x08\xc9\xc4\x89\xde\xf8\x7a\xbb\xfa\x34\x14\xa0\x63\x80\xd4\x89\xc0\xe6\x64\xb9\x9f\xda\x5a\x13\xbb\x02\xd8\x50\xd6\xbb\x84\x65\x08\x4b\x3b\xc2\x91\x3f\x5a\xea\xa1\x89\xd2\x8a\x69\x12\xe4\xc2\x8c\x60\xbc\xed\x5b\x6f\x2b\xef\x91\x8c\x96\x9c\x48\x23\x47\x67\xfc\xcf\x7a\x1f\x09\xc9\xfd\x56\x1b\x89\x3c\x63\xb3\xe0\xf6\xce\x3e\x28\xfd\xc6\xc6\x6e\x50\xe8\xed\xef\x8d\xef\x78\xbf\xb5\x57\x0c\xa1\x68\xbe\x9b\x0a\x45\x49\xb8\xd0\xa6\x3d\x24\x2b\x1a\xfd\x06\x79\xe6\x05\xb2\x7a\x6b\x9f\x68\x7e\x25\x2f\x5f\xda\xe6\x8c\xac\xa5\x8f\xeb\xc8\x90\x6a\xc9\x78\xc8\xb4\xc6\x73\xa9\x3c\x6c\x8d\xf1\x48\x6e\x6d\x79\xa1\x05\x77\xcb\xf7\x06\x56\xf1\x41\x23\xb7\x0c\xdc\x64\x35\x28\x22\x7b\xfc\x29\x98\xa5\x7c\xd1\x70\x8a\x92\x76\xfe\x24\xe4\x19\x99\x02\x0c\x4b\x4b\x40\xdc\xeb\xdf\xe1\xdc\x34\x32\xa6\x96\x61\xad\xf6\x3b\xf9\xd1\xd4\xd1\xd6\xad\x76\x3b\x59\x74\x24\x3f\x5a\x63\x1b\xc9\x00\xea\x12\x00\x63\xc0\x92\x8d\xac\x68\xe1\xd8\x97\x19\xad\x76\x47\x72\x62\xaf\xac\x94\xdb\xfb\xfe\x4b\xab\x5c\xd1\x58\xb2\x25\x0b\x4e\xda\xb6\x2b\xbe\x2f\x61\xe3\x4d\x72\x2b\xf6\xa5\xcd\xb1\x5f\x5f\xf3\x57\x3f\x39\xf0\xfb\x8e\x62\x07\xb5\xaf\x82\x87\xfb\xb1\x87\xb3\x65\x2f\xb1\x97\xb7\x71\xe6\xaa\x7f\x54\x9b\xdf\x9f\xb3\xef\x0a\x97\xee\x6d\xc4\x87\xde\xaa\xf4\x3b\xa0\x9c\xda\x3a\x58\xba\x86\xac\xba\x1c\xb9\xaa\x6c\xbb\x31\x31\xee\xf7\x90\x8f\x54\xc5\xd3\xfb\xf8\xdc\x1e\xb8\x73\xcf\x18\xef\x7e\x63\x45\x38\xdd\x60\x8f\x34\xac\x6c\x42\x95\x83\x85\x65\xc8\x82\x1d\xcb\xc8\x87\x9a\xdb\x7d\x6c\x85\x64\x58\x6d\x9f\xe6\x36\x8b\x0d\x01\xc1\x22\x1b\x31\x94\xe2\x90\xb4\x0d\x90\xbb\x5c\x58\xb8\x86\x88\xe4\x1e\x82\x11\x9b\x86\xd4\xf8\x84\xc7\x7b\x36\xfb\xf2\x2d\x9c\xc6\xf2\xd2\x22\xc0\x62\x67\x0b\x3e\x41\xd3\xa5\x3e\xdd\x95\x4b\x9a\xd8\x57\x71\x64\x13\x6d\x6d\xb8\x7a\x8f\x42\xa6\x01\x43\x8e\x64\xad\xbb\xc7\xe7\xa5\x1e\x65\x04\x4d\xc5\x0a\x56\x0d\x4f\xda\x53\x03\x09\xc8\x2c\xc0\x58\xd3\xc0\x91\xed\x67\x0a\x48\xa6\x69\xac\x20\xa8\xa1\x28\x68\x0c\x3b\xde\xc2\xd9\xf2\xc6\xb6\x9e\xa4\x85\xf0\x5e\xc2\xa0\x24\x69\x8f\xa5\x2d\xf4\x9b\xe8\x56\x4d\xa6\x01\x47\x66\xa6\x7e\xfb\x5e\xfa\x70\xbb\x26\x3f\xa2\x5f\x5e\xfa\x0e\xba\x1c\xb8\xf5\x98\x7f\x8b\x58\xee\xeb\xfc\xec\x96\xbe\xd9\xc9\xd1\xf3\xb6\x3a\xdd\x96\xd7\xac\x26\x69\x02\xa5\x61\x60\xb6\xa5\xf1\x5a\x07\xb4\x75\x6f\x4d\xb3\x8c\x04\xe5\x89\x82\x81\xfb\x73\x7a\xef\x14\x1d\x1e\xca\xe1\x2d\x9e\x56\x3b\x55\x93\xc5\x3d\xdb\xf9\x74\x0c\xa1\x94\xb3\xa2\x5a\xca\x59\xbc\x2d\x67\xcb\x18\x6b\x8b\xce\x8e\xe2\xaf\xa7\xca\xf6\xe4\x6f\x19\xdf\x85\xee\xc0\xf9\xda\xfe\xee\xd5\xf3\xdd\xa2\xf1\xaa\x2f\xdc\x31\x5d\xd8\x9d\xde\x19\xfb\xa7\x2c\xf3\x47\x86\xe4\x8f\x37\xd3\xd5\xb8\xf2\x97\x35\x4b\xc7\xa1\xdd\x01\x58\x29\x7f\x2b\xd9\x58\xca\xd8\xd2\xce\xab\xed\xa9\xa9\xfe\xd8\x99\x1a\x70\xb3\xfb\x4d\x56\x4d\x7d\x4c\x8e\xa5\xb4\x8a\xfb\x30\x8f\xa9\xad\x43\x4c\x41\xbe\xb2\x21\x10\x53\x7d\x45\x8e\xcb\xfd\xa7\x66\x4f\xff\xd1\xa5\xa1\x3b\x47\x32\xd6\x64\x84\xee\x44\xc7\x71\x7b\x20\xa7\x23\x03\x6c\x4c\x83\x5f\x20\x18\x26\x01\xb0\xb3\x7c\xbb\xdb\xd7\x28\xe3\x8c\xb6\x4e\x22\x3b\x75\xe9\x04\xbd\xbb\x09\x83\xda\xa4\x7b\xf6\xc6\xa1\x5c\xd9\xfa\x47\x65\x1c\xf1\xac\x0f\xb6\xbf\xf6\x2a\x4e\x4b\x21\x36\x7c\x8a\x36\x2b\xf9\xba\x2d\xdf\xe2\xfc\x4d\x7d\x0d\x4d\xa0\x55\x29\xdd\xea\xd5\xba\x5d\x1d\x93\x72\xce\xea\xa5\xc3\xb8\x64\x25\x67\xf0\xa5\x1d\x41\xcc\xee\xb4\xf6\x9f\x95\xde\xdd\x84\x76\x59\x15\x9b\xc6\x34\x2c\x00\x2a\x37\xa4\x94\x32\x91\xbe\x7c\xc0\x4f\xed\x57\x2b\xa6\xc1\xe2\x96\x21\x40\x67\x73\xaa\x3c\x19\xea\x78\x23\x17\x81\xfb\x82\x3d\xad\xfd\xb5\xf7\x7f\x68\x3f\xe1\x2c\x3d\x3e\x51\xe7\x4d\x36\xef\x77\x96\x79\xc8\xb6\x43\x3a\xa2\x4d\x3b\xbb\x7d\xa4\xe0\x7c\xd9\xf1\x5e\x18\xc0\xdc\x4d\xd2\xfa\x9e\x7e\x6d\xf6\x92\x76\x74\x5b\xf5\xa1\xb5\xd6\x3f\x70\x5c\x8f\x5a\x3a\xb1\x3c\xb7\xe9\xad\x2c\x6a\xf4\x94\xa4\xe2\x82\x26\x69\x8f\xe5\xbc\xcb\x40\x54\x0d\xac\xf2\x25\x90\x4d\xa5\x6d\x90\xcd\x42\x4d\x34\x28\x53\x6a\x19\x17\x02\xaa\x56\x97\x2b\x9a\xac\xb6\x6c\xb4\x1d\x9c\x03\xdb\x4e\x63\x48\x05\x30\xec\xb8\xf9\xbd\x8e\x17\x56\x36\x21\x5e\xc3\x66\x49\x56\x65\x80\x8e\xec\x3c\x99\x25\xab\x58\x09\xd2\x97\xec\x98\x14\xd6\x72\x03\x7b\x57\x87\xd9\xeb\xc3\x5b\x62\x43\xa7\xfc\x24\xd4\xa6\xf5\x3b\xa5\xc8\x9a\x29\xe8\xb4\x33\xac\xe8\x70\x2f\x76\xa6\x68\xcc\xa1\xbc\x3e\xe2\x13\x52\xe3\xc8\x35\xb2\xd3\x69\xb8\x1d\x2f\xaf\xe2\xa2\x76\x32\x1e\x15\xb1\xa1\xad\xc3\xc5\x28\x90\x15\x8d\xa5\x28\x2d\x44\xf6\x70\x1d\x5f\x6a\xe2\xa2\x70\xab\x63\xeb\x3e\xd4\x79\x31\xab\xb3\xf4\x50\xf9\x43\x8d\x6e\x57\xea\xbd\xb0\xba\x5f\xbf\x47\x3e\xc1\x89\x5c\x82\x97\xd9\x9b\x0d\x4d\xff\xe0\xb1\x98\x8b\xf8\x6c\x7d\x6c\x9f\x36\x7c\xf3\x5c\xdc\x64\x57\x0f\x8c\x65\x60\x52\x80\x01\xcc\x39\xd8\x17\xda\xb0\xdf\x23\x76\xb0\x1d\xbf\xc2\x90\x6a\xed\x1b\x36\x30\x0b\xbe\x2f\xc4\x4d\x5d\x87\xee\x61\x62\x5f\xfa\x7a\x8f\xec\xd7\xad\xdd\xd9\xf3\xc7\x0f\xfc\x7a\xfc\x90\x94\x79\x71\xbf\xab\x3d\x3b\xd8\xd2\xdd\x45\xf2\xfe\x0d\x32\x3d\x41\x7e\x39\x4f\xe3\xca\x36\x97\x0b\x90\xf8\x7d\x44\x06\x65\xce\x35\x4b\xae\xef\x63\x73\xee\x40\x32\xb7\x09\x77\xe6\xf6\xd3\xcd\x78\x9b\x57\x46\xae\x5c\x9a\x6c\xe7\x86\x2f\x6d\x0e\x2e\x5c\x63\x5e\xd8\x44\x77\x6e\x12\x55\x1e\xa1\xad\x17\xa1\x65\xf0\xe5\xfc\xb6\xf3\x3d\x5c\x82\xad\xea\xd0\xa4\xd8\xce\x2f\x97\xaa\x5c\xe8\x85\x42\x80\x2e\x88\x58\xdc\x1e\x48\x8b\xd2\x26\x8f\xb4\x26\x17\x97\x98\x1a\x7c\x99\x57\xce\x47\x78\xe8\x10\x52\xc0\xd3\x42\x61\x72\x58\x50\xe5\x97\x5f\x90\xe7\xc4\x91\x1d\x3b\x7a\x5c\x4e\x75\xa9\x10\x06\xa8\x4f\xf8\x83\x13\x81\x10\xd9\x5d\x96\xce\xe6\xce\x7a\x3f\x2e\x5c\xda\x8e\x3b\x9b\x8f\x31\x8d\x56\x0e\xe3\x03\x2e\x99\x86\x90\xda\x84\x9c\x4e\xd7\xbd\x10\xcd\x0d\xcf\x89\x6b\x53\x97\xa1\x1d\xcb\xe9\x54\x7f\xdc\xf0\x03\x01\x9a\xb1\x00\x1d\xc2\x3f\x94\x47\x9a\x67\x50\x70\xa2\x93\x0b\xd3\x10\x13\xbb\x23\x9d\xf4\x51\x6d\x8e\x7c\x98\xea\xab\xad\xac\x71\x89\x6e\x6e\x13\x6c\x38\xd2\xf7\xf1\x8c\x62\x31\xe1\x69\x19\xcd\xcf\xc5\xf6\xee\x05\x73\x81\x21\xdf\xa8\xe5\x63\x6c\x65\xb6\xcc\x81\xcd\xb4\x23\xa4\x55\x1e\xf3\x2d\xa9\x71\x60\x6d\xe9\x64\x68\xe9\x63\xbf\x82\x45\x1d\xc0\xa2\xd6\xa6\x6e\xae\xd1\x58\x69\x04\x87\x83\x85\x69\x08\x2e\x1d\x92\x0b\x29\x22\x97\x75\x4c\x6a\x85\x74\x53\xf9\x1b\xc1\xe6\xa6\xde\x0d\xa7\x06\x4f\x8e\xe9\xbb\xc7\xf1\x43\xb2\x1a\xf7\x93\xd5\xb8\x97\x52\x53\x42\xc4\x9d\x88\x7d\xf0\x40\xd9\x06\x77\x56\x4f\xf8\x59\x46\x19\x23\x5b\xa3\xb1\xec\xc5\xa9\x22\x79\x69\x97\x74\x55\xeb\xb2\x4a\x46\xe7\x36\x21\xce\x6d\x7a\x2b\xab\xb7\xf3\x33\xed\x08\xd0\xe4\xc0\xc2\xe5\xe0\xdc\x1e\x8c\x17\x0e\xe1\xfb\x93\xfe\x63\xc9\x4b\xfc\x9a\xa9\xd7\x47\x98\xa3\x75\x6e\xae\xa6\x70\xa5\x64\x28\x61\xa4\xa8\x31\xe4\x7d\x95\x8b\x21\x38\x42\x9f\x09\xe8\x08\x3c\x58\xdc\x9d\xcf\x87\xbb\xf5\xe4\xd9\x55\xd8\xae\x7b\x44\x47\x7e\x32\x04\x0c\x10\x35\x96\x64\x14\xc0\x93\x06\x2d\x25\xb6\x5e\x20\xbd\xbb\xe2\x69\x3c\x1c\x4a\x29\xf4\x06\xd5\x59\x8e\x6d\x5c\xbc\x89\x37\x63\xb2\xa0\x3d\xe0\x8e\x40\xcf\x0b\x0b\xf1\x39\xe2\x47\x5d\x4a\xe9\x88\xfd\xca\x73\x8f\x4b\x93\x80\x48\x0e\xa4\x55\x7e\xf1\x63\x6e\x77\x9c\xc2\x46\xfa\xbb\x03\x02\x87\x10\xd7\x96\x41\x61\x96\x4e\x2e\xd0\x3c\xbb\x03\x61\xee\xd0\xbd\x60\x4a\x3c\xc2\xa9\xde\xc5\x1d\x9a\xf7\x65\x30\x17\x40\x0f\xf9\xac\xcd\x6f\x7e\xca\x0f\xca\xfc\xf1\xbd\x1c\x56\x9e\x2e\x73\xc9\x03\x83\x96\x35\x59\xc3\xa5\xb2\x5f\x01\xef\x4f\x8c\x5b\x7f\xd4\x91\x0b\x87\xbe\x5b\x0c\xd7\x94\x6b\x28\xbd\x48\xa8\xf5\xa9\x13\x4d\xfd\xbd\xfa\x7e\xe2\x57\xf9\x40\x82\x23\xec\xf2\x93\x27\x6a\x68\x0a\x32\x5e\xce\x4d\xc0\xf7\xf1\xbf\xf3\x34\x89\xd9\x46\x6f\x39\x0a\x72\xdf\x00\xd5\x19\x90\x32\x1e\x64\x08\x51\x39\xd7\xa7\xe6\xc8\x4f\x49\xda\x4f\xcb\x9c\xf7\x0a\xbe\x38\x51\x01\x35\x51\x91\x8d\x44\xf3\xfe\x44\xe9\x05\x65\xfe\x4e\x40\xed\xe5\x1b\x49\x38\x50\x4a\x3b\x8a\x25\xcb\x3d\x61\x03\x33\x05\x95\xd1\x48\x3e\x38\xcc\x35\xe0\x87\xa6\xfe\xf8\x1c\xfc\x8d\x49\xf3\xbe\xa1\xe6\xbe\xcb\xcd\xa1\x8d\xc6\xcb\x88\x40\x83\x42\x7b\x4f\x84\x53\xb4\x47\x76\xa2\xf0\xed\xdc\xba\xce\xb4\xc9\x1d\xc4\xf0\x74\xda\x11\x96\x0e\x41\x46\x2e\xdd\xa5\xaa\xf3\x00\xdd\x91\x69\x40\x7d\x6a\x80\xdc\xa5\xbb\xaa\xcb\x91\x6b\x53\x21\xcb\x33\x02\xad\xbe\x6d\x4c\x29\xf1\x79\xae\xa2\x4f\x45\xef\x12\x53\xd4\x5f\x9a\xc2\x1d\x82\x8d\xcd\x6a\x5d\xa0\xc7\x41\x8c\xef\x33\x43\x33\x0a\x87\x34\x14\x59\x09\x17\xd8\x5d\x3c\x40\x58\x3b\x34\xee\x08\x83\xa2\xaf\x62\xb8\xa4\x62\x5d\xd4\x6f\x92\xf7\xd3\x7b\x19\xb8\x07\xfd\x17\x1c\x81\x95\x59\x15\x50\x33\x99\x81\xca\x08\xdf\xe7\x81\x6d\xbf\x2e\x39\x57\xc4\x21\x5a\x2e\xa0\x67\x98\x73\xa4\xf7\xf8\x81\x08\x11\xcf\x96\xf9\xe4\x1d\x77\x61\x73\xe4\xbc\xca\xbd\x2f\xed\x3e\x44\x53\x47\xfd\x29\xe3\x7e\xba\xe4\xf3\xd1\xee\x4c\x8b\xc0\x14\xac\x8e\xec\x1e\xed\x51\x6b\x7c\xca\x91\x21\x12\xd3\xf5\x6d\x3b\x86\xc1\xa8\x50\x54\x81\x66\x22\x5b\x66\xe9\x10\x00\xc9\x3e\xcc\x59\x93\x8e\xc0\xee\xcf\x0f\x1f\x94\x73\x18\x38\x11\x98\x5b\x65\xbf\xeb\xfc\x36\x76\x15\x96\x67\x13\x74\x7c\xe5\xd2\xbd\xc2\x1e\x48\xbe\xa8\x50\xee\x96\x0e\x02\x6a\x63\x73\x00\x3a\x74\xef\x71\xdc\xcf\xfd\x72\xad\x94\xed\x5a\x7c\xe3\x39\x33\xdd\xc9\x34\x04\x33\x2f\x63\x9f\x27\xc7\xc9\x62\xdf\xf8\x81\x9c\x98\xfa\xed\xf6\x8c\xc7\x8b\xe8\x91\xee\xf9\x53\x62\xbe\x74\x89\x5b\x1f\xe9\x15\x9b\x23\xe3\xfb\x58\xec\x3a\x65\x6e\x25\xe5\x08\x4c\x97\x05\xf8\x1e\x2f\x20\x3c\xe5\x99\x10\xc4\x2f\x3c\x4d\x6d\x4c\x5d\xf2\x47\x7a\xe8\x8f\xf4\x2e\xe4\x69\xe7\x71\x44\x93\xac\x1a\x4a\x7e\x95\xa7\x01\x39\x45\x13\x29\xc0\x68\x4b\x7e\xd0\x43\xb6\x5b\xa8\x86\xc0\x68\x72\x33\xe4\x10\x8e\xe5\x92\x4e\xcd\xb4\xcc\xf7\x23\x90\xdd\x21\xf9\xe3\xfe\x74\x35\xa6\xcf\xe2\x2f\xdc\xe0\x64\x19\x9a\xa3\x3e\x00\x82\xd2\xce\xff\xa8\x70\xa4\xdb\x3e\x57\x79\xd9\x14\x37\xd5\x61\x8e\xfa\xee\x72\x77\xbe\x13\x81\xc8\x89\xc0\xc6\xa2\xa9\x07\xd3\x10\x30\x4b\x47\x3e\x09\x8b\x55\x39\xdf\x7c\x09\x77\xc7\x03\x52\x29\x83\x4c\x63\x9e\xba\x74\xef\x91\x8e\xcc\x74\x17\xeb\xab\xfa\xef\x2a\xc5\x2d\x4f\xbf\x5c\xef\xd3\x7e\xda\xf8\x60\x0f\x2e\xb7\xf2\x9b\xb3\x48\x3c\xd7\x6e\x57\xda\x4b\x47\x7a\xcc\x2d\xf5\x30\x3f\x6c\xec\x83\xf2\xec\x0b\x07\x16\xd5\xb9\x9a\x36\xef\xe0\xc8\xce\x3a\xdb\x9e\x0e\x5e\x61\xaf\x70\x65\x9f\x86\xaf\xd1\xbd\x55\xae\x5e\xb9\xaf\x5a\xae\xa5\xa2\x99\x82\x54\xc6\xdc\x45\x4a\x7d\xc0\xc2\x21\x47\xae\x4c\xbd\xbb\x71\x88\x95\xef\xf5\xb5\xd5\xbd\xd2\x2b\xca\x73\x23\x6f\xb3\x43\x97\x15\x3f\x54\x39\x72\x23\xbd\xbb\xb4\x3b\xa5\xae\xdf\xb8\xba\xf0\xab\xa9\x8b\x18\x3f\x58\xf9\x0e\x01\xc2\xea\x7c\x93\xbb\x19\xad\x6f\x7f\x95\xb0\x5b\x72\xb4\xbe\x8b\x87\x52\xcb\x26\x3e\xef\x3b\xd7\x32\x79\xeb\xe3\x2e\xcc\xde\x45\x39\x95\xa8\x5f\x5b\xdb\x75\x14\x49\x4b\xbb\x63\x42\x27\x16\x52\x93\xd3\x16\xe5\x1e\x5c\xf0\x64\x1c\x73\x61\x1a\x72\xc7\xee\x08\xd9\x48\x27\x17\xc8\x9e\x37\x95\xa7\xf7\xf6\x4c\x0e\x44\x95\x3e\xa9\xfa\xdc\xce\x6b\xd5\x30\x79\x56\xe6\x91\x82\x5d\xdc\xa3\x9e\xfb\x32\x4e\x56\xf5\x97\x0c\x4d\xbd\x92\xd7\x4d\x1b\x15\xa7\xca\x38\x85\xb6\xc1\x82\x91\xc6\x20\x3b\xc5\x1f\x19\x25\x2f\x55\xfb\xc3\x78\x99\x1b\xb7\x8b\xef\xf9\xed\xf3\x4a\xf8\x44\x0d\xa1\xa6\xe2\x82\xa0\x86\xee\x4c\xc6\x90\x1f\x7c\x22\x1f\xb6\xcc\x43\xda\xff\x5d\x58\xed\xec\x27\x55\x23\xef\x15\x20\x4f\xb4\x10\xde\xcb\x38\x39\x90\x70\x71\xe7\xcf\xf6\xab\x7d\xb6\xa3\x5c\xa6\x08\xdc\xd6\xe7\x74\x45\x15\xeb\x0a\x80\x21\x95\x2a\xa7\xc0\xed\x6b\x38\xa9\xc8\x9a\x2b\xa8\xd8\x2d\x29\xac\xe5\xb1\x8a\x21\xd9\x56\xe5\x2d\x3d\xdd\x77\x16\x00\x66\x3e\x53\x34\x79\x06\x58\x28\xc9\x2a\x4e\xb9\x86\x9c\xb4\xe8\xf2\xc2\xf6\xbb\x9c\x9b\x7d\xf9\x0a\x54\x49\xbb\x7d\xae\xef\x7b\xb1\xa6\x26\x26\xa5\x61\x72\xfb\xf7\x27\x60\x6c\x6d\xb3\xed\xf9\xe9\x6d\x5c\xff\x4c\x2e\xd7\xb9\x31\x81\x32\xfe\xeb\xb2\x6a\x08\x74\x09\xec\x72\xc5\xb6\xb9\x6e\x2a\x8e\x39\x31\x80\xb4\x7f\x76\x2d\x35\x59\xeb\x52\x75\x6c\xed\xa2\x75\x28\x69\x58\x2d\x73\x72\xce\xc1\x54\x54\xec\x91\x35\xb0\x4b\xf2\xa5\x93\x07\x64\x97\xb8\xc8\x67\x8a\x9c\xd3\x74\x06\x80\xa6\x94\xb1\x49\x52\xd3\x01\xc5\x4e\x34\x16\x77\xb9\x79\xcb\x76\x43\xba\x13\x14\xce\x9a\xc2\xec\x35\x95\x4c\x0d\x13\xf2\x9c\x38\x77\x0d\x11\x9a\x34\x55\x9f\xf9\xec\x2e\xdd\x1e\xc2\x25\x2f\x79\x6e\xbe\xb4\x23\x12\x77\x02\x4a\xd5\xb0\xc7\x7d\x3b\x80\x63\x17\x9e\x42\x15\xe5\xd9\x49\x9a\x4a\x4c\x9d\x25\x4c\xfd\xd6\xb7\x89\x6e\xee\x29\x54\x66\x47\x64\xc7\x59\x53\x1d\x6b\x10\x2e\x0e\xce\x9f\xd5\xb2\x1e\x06\xa3\x8e\xb8\xb1\x69\xb2\xd4\x7f\xce\x2a\x7d\x70\x3a\x54\x6e\x19\x52\x69\x7b\x34\x72\x1d\xc9\xda\xf2\x8c\x29\x51\xe6\x05\x6f\xaa\xff\x4b\x05\x4f\xe3\x01\x4f\x6b\xab\x31\x21\x95\xb9\xfe\x4d\x6e\xae\xc3\x15\xe9\xa8\x23\xe7\xce\xba\x86\xbb\x26\xf7\xce\x05\x4c\xd1\x3a\xd1\xa4\x27\xd0\x77\x8f\xfc\x20\xff\x75\xa6\xf4\xfe\x5e\xdf\x0f\xb0\x70\xf5\xc7\x9c\x8e\xc4\x99\x13\x81\x64\x6a\x20\x9f\xf8\x5c\x5f\x93\x21\xbf\xa6\x3a\xa6\x42\xe5\xe5\xb8\x15\xea\xc1\xe6\x20\xd2\x4f\x82\x49\x53\x81\xcd\xb1\x0b\x6b\x4d\xcd\x9d\x35\xf2\x5f\xba\x31\xcf\xb1\x1b\x9e\x23\x17\xf6\x20\xf4\xc7\x0a\x55\x4c\xf5\x32\xce\x11\xf2\x1c\xac\x7c\xdc\xf2\x5c\x2d\xbb\x71\x08\x18\xdb\x11\x08\xcb\xb9\x1f\x98\x85\xd3\x19\x23\x3c\xc8\xc6\x42\xb8\xea\xf5\xc2\x91\x2e\x41\xf6\xd4\xdc\x0e\x7a\x8f\x23\xba\x2c\xeb\x58\xe5\x59\xf8\x2e\xdc\xda\x17\x1c\xb9\x72\xcb\x73\xed\x9a\xef\x12\x10\x43\x76\x46\xab\x8f\x78\xa9\x67\xb8\x32\x2e\xb2\x34\xa3\x69\x61\x47\x64\x68\x2a\xd4\x66\x4a\xb0\x8b\x32\x0f\x7a\x75\x56\x5e\x13\x0e\xc1\x54\xf5\x0d\xf1\x61\xaa\xdf\x36\xe7\x8b\x16\x4d\xae\xc6\x56\x96\xc7\xc8\x8f\x43\x36\x9b\x70\x5f\xc6\x44\x1e\x12\x7f\xac\x86\x79\x65\xc7\xf5\x82\xd2\x8f\xd7\x49\xdc\xa5\xf9\xaf\xe5\xd9\xdf\x7e\x6f\x35\xea\x55\x36\x22\x1f\x8a\xb9\x55\xc6\x5a\x61\x88\xda\xf1\x01\xdf\x6a\x87\x6c\x8c\x2e\xa6\xe8\x52\x70\x1f\xf4\x82\xc6\x97\xd3\x30\xd0\xd7\x18\xb6\xaf\xe0\xa6\x2a\x01\x99\x55\x43\xb6\xcc\xd7\xe0\x83\x55\xd3\x0f\xc9\x89\xd0\x1a\x8a\x9b\x7d\xdc\xe3\xd5\x98\xde\xd6\xe9\xdb\x04\x8e\xd6\x28\x94\x39\x90\x4f\x8d\x10\xe1\x58\x21\xdc\x33\xa9\x89\x7f\x95\xf3\xb6\x8b\xf5\x70\x70\xc1\x73\x07\xf9\x29\xd5\xdf\xed\x73\xd6\xe8\xef\x52\x4e\x4c\xf5\x95\xbf\xcd\x69\xe6\x58\xdc\x26\xf0\x4d\x73\x56\xdb\xa4\xeb\x33\xda\x04\xbe\xb4\x23\xc9\xaf\x74\x1d\x85\xec\x27\x64\xab\xae\x6d\xa2\xdb\x35\x69\xaa\x7d\x3e\xed\x1b\xb2\xd5\x50\xff\x4e\xdb\x61\xbd\x70\xb7\x17\xd9\xca\x33\xdc\xda\xfb\xd5\x58\xd0\xba\x57\xe3\x10\xe6\x0e\x01\x72\x0b\xd9\xd6\xdc\xf6\xdc\x78\x79\x16\xdf\x25\xd8\xb5\x59\x9f\x5d\xe7\xe9\x5e\x9d\x37\xf1\xb8\x9c\x12\x6c\x5e\xe7\x4f\xd4\x7d\x97\xfc\x2d\x2d\xd0\xf8\x86\xa7\xe5\x0a\x3e\x8d\x87\x3c\xdb\x7b\x94\x7b\x69\x68\x13\x5d\x24\x6b\xd7\xa6\x21\x62\x48\x86\x34\xf1\x44\x79\x77\xe6\x6b\x69\xae\xef\x7e\xa5\x83\x79\xe3\xbb\x6c\x90\xef\xe2\x44\x20\xb0\x09\x12\x1b\x2a\xbd\x88\xf6\xff\xf9\xcf\xeb\x8f\x67\x5f\x79\x3b\xfc\x3c\x7f\xb1\xf0\x3b\x5f\x19\x7c\xd1\xa3\x2f\xc9\xd2\xcb\xd2\x2c\x59\x06\xf5\x15\xab\x33\x0b\xe6\xde\x93\x2f\xd4\x04\xae\x17\x17\xd5\x4b\x37\x27\xde\xa7\x69\x6e\x8e\xd5\x72\x2f\xeb\xe5\x79\xe0\xc7\xde\xc9\xf7\x72\x16\xad\x72\xbe\x82\xf8\xd4\x85\xb7\xe7\x6e\xae\x1e\x57\x8f\x41\xd4\x10\xd6\x5f\x4e\x83\xbd\xfe\x74\xd5\xdc\x45\x6c\x65\xc9\x4d\xf5\xe6\xcc\xd9\x7b\x71\x3e\xfe\xbb\xbc\x34\xf6\x78\xfa\x2e\x7a\x3e\xa9\xfd\xb2\x4d\x79\xeb\xef\xd9\xe7\x6d\x8e\x5e\xb6\x39\xf1\xb8\x12\x5d\xbd\x9e\xf3\x65\xff\x6d\x1e\xc5\xb1\xa0\xa7\x78\xc5\xf7\x7b\x66\x09\x27\xde\xed\x61\xd1\x5e\xfd\x9e\x4c\x89\xfb\x4b\x96\x40\xaf\x5a\xa0\xc8\x8b\xcb\x2b\xfc\xfd\x45\xe0\x1e\x5d\xf3\x13\xb8\xfb\xb3\x96\x9d\xbc\x4e\xf0\xd3\xd5\xb5\x3c\xb9\xfa\x72\x25\x7b\x96\xeb\x65\x27\x66\xf4\xa9\x7e\x9d\xbc\xf4\x7c\x77\xd1\xfb\x3b\xc2\xda\x5d\x96\xfe\x22\xa0\xf5\x25\xaa\x5f\xf6\xef\xc9\x46\x60\xf3\x85\x9d\x57\x97\x1d\x5d\x72\x9f\x36\xf6\xe9\x2a\x88\x5d\xef\xf1\x7e\x76\x51\xf5\xeb\xcf\x27\x84\xda\xbb\x3e\xca\x7a\xfc\xe6\xca\x21\x33\xbd\x8e\x8e\xb7\x2c\xf8\xfd\xe7\xe7\x02\xd6\x3d\xb3\x7c\xcf\x31\x1f\x4e\xde\x60\xb7\x17\xbe\x71\xf6\xdc\x63\x61\xb9\xb7\xf4\xb2\x4a\x54\x9f\x78\xab\xee\xd9\xbb\xdd\x3f\xe4\x4e\x92\x3e\xf1\x62\xdf\xeb\x39\xeb\x08\xdc\xa9\xf7\x02\xbd\xa5\x05\x17\xe5\x4a\xb3\xe5\x4b\x7a\x71\xf9\xe0\xd9\x87\x89\xda\x3d\xf5\x96\xd1\x87\x55\x10\xbb\xc9\x4a\x09\x36\xd5\xf3\x3e\x2a\x7e\xba\x5a\x61\x65\xbe\x57\x34\x37\x79\xaa\xaf\x7a\xce\xee\xaa\x7a\xc0\x28\x28\xbc\x2c\xb0\xce\xeb\x2c\x0b\xc2\xfb\xd9\x93\x17\xc9\x3f\x73\xf9\x3e\x5a\x5d\xab\x48\xca\x4b\xdd\x47\x5e\x9e\xab\x73\x2b\x7e\xee\x79\xa4\x62\x9e\x79\xf9\x3c\x81\x68\x59\x3b\xd8\x33\x95\x7b\xae\x1b\xa0\x09\xb6\xe0\xa4\x4d\x4a\xf1\x02\xc2\xe7\x1e\x43\xac\x79\x6d\xb0\x7b\xa9\x88\x9e\x7b\x4e\xf8\xec\xeb\x4d\x5e\x91\x05\x8e\x58\xb7\xee\x07\x69\x6f\x69\x05\xd0\xb2\x03\x88\xe8\xf4\xe2\xc6\xe5\x0d\x81\x08\x42\xb4\x5d\xb8\xf8\xc2\x85\xdb\x9f\xad\x20\xf2\x7a\xbe\x9f\x79\xfe\x56\xde\xf4\x96\x5e\x76\xc1\x33\x54\xf5\xf2\x27\x71\x43\x42\x4a\x61\x15\x81\xa3\x36\xd3\x4f\x37\xe5\x2f\x7d\xc9\xe0\x04\x2b\x5c\xbd\x6a\xad\x3e\x24\xae\x55\x58\x9f\x8f\x25\x53\xf5\x2a\xc0\x38\x89\x83\x22\xc9\x3e\x2b\x41\xec\x43\xaf\x61\x88\xf1\x02\x16\x41\x0a\xbd\x71\x39\xd5\x74\x43\xe3\x17\xd9\x92\xd6\xa2\x48\xc6\x41\x11\xf8\x56\xe1\x9d\x17\x2a\x96\xf3\xea\x37\xb4\xab\xa6\xf5\x55\xa2\x87\xef\x99\xec\xc9\xee\xd6\x4d\x84\x27\x2e\xed\xfd\x74\xd5\x92\x56\x7c\x9c\x07\xfe\xbc\xc8\xbf\xb4\xa0\x37\xd2\xaa\x7a\x87\xeb\xc6\xf2\x9f\x79\x86\x70\xe5\xd9\x83\x24\x09\x8f\x16\xe6\x42\xc3\xfc\xb2\x87\x39\xdb\x96\xa5\xd5\xe2\x9a\x1b\x0b\x7a\x59\xf1\xa4\x49\x7b\x81\xba\xda\x4e\x43\xc5\x64\x3d\x04\xf3\x49\xfb\xd2\x87\x89\x7d\xfc\x3e\xe8\x91\x3a\xbb\xbb\xc1\x3a\xef\xfe\x48\xfd\x85\x9a\xe5\x12\xab\xe5\x05\x3a\xb4\xf3\x27\xd2\xa1\x67\xb4\xe3\xa1\x12\xfd\x3a\xf8\xab\xeb\xd0\xaf\xdf\x7e\xea\xd0\x9f\x3a\xf4\xa7\x0e\xfd\xf3\xe9\x50\xd7\x2b\xdf\x18\x72\x7f\xea\xcf\xdf\x48\x7f\xfe\xf5\x7c\xd0\x9f\x2e\x68\xe7\x84\xd1\xb4\x57\xf9\xf7\x56\x9f\xe0\xa7\xfa\x3c\xfa\xfc\x54\x9f\xe7\xe6\xeb\xa7\xfa\xdc\x6d\x6e\xc4\x5e\xf1\x53\x75\xbe\x45\x75\x1e\xce\xea\xb3\x0f\x58\x7d\xba\xba\xfe\x72\x6e\x63\xe5\x70\x27\x65\xbf\xf7\xfd\xc4\x59\x44\x5e\x5c\xf4\xa9\x2f\x07\x90\xf3\x83\xfd\xa9\x33\x2f\x67\x5d\x9d\x78\xe8\xed\xd4\x96\xcc\xd5\xa7\xab\x6b\xaa\x2a\xbe\xfa\x72\xb5\x43\x7b\x55\xc3\xbb\xa2\x93\xb8\xc8\x02\x7b\x51\x24\xa7\x36\x6c\x4e\x50\xc7\x13\x5d\xff\x52\xee\x5c\xba\x5e\x96\x1f\xee\x2b\x1d\x82\x7d\xd6\x4a\x41\x56\xc6\x13\x2f\xe4\xbe\x75\xfe\x4e\xee\xfb\xa0\x3e\xf7\xbd\x59\x10\x07\xd5\x8b\x19\xdb\xbd\x8b\x43\xb9\xf1\xdc\x86\xda\x0e\x48\x49\x91\x5d\xdb\x25\x1d\xf7\xee\xee\x66\xe6\xdd\x76\x6f\x6e\x09\xfc\xeb\x0d\xd9\xb9\xb3\x6f\x66\xe4\xb7\xdb\xce\x37\x0f\xef\xde\x76\xb1\x33\x5b\x51\xe9\x6e\x2d\xcb\xde\x5c\xb8\xea\x4f\xc3\xda\xaa\xa7\x83\x96\x4f\xef\x01\x39\x49\x5c\x69\x9b\xb2\x23\x7e\xe6\x59\x85\x97\xfd\x02\xbd\xd8\x2f\xe6\xa7\x9e\x1d\x3c\xfd\x50\xd6\x15\x76\x4c\x63\xa7\x04\x05\x79\x83\xe1\x37\x69\xe6\x2d\x03\x6f\xf5\x3e\x02\xe3\xed\x24\xf3\xee\xb6\xb8\x05\x61\xb2\x02\x7b\x8f\xa9\x57\x6f\x4d\x3d\xa1\x2a\x51\x13\x36\xc9\x56\x56\xe6\x7a\xae\x9a\x59\xb3\x59\xe0\x3c\x53\x9d\xab\x5e\x2e\x53\x33\x2b\xce\x83\xa2\x49\x2a\x38\x9d\x04\x20\x7b\x51\x52\x78\x75\x8b\xfc\x89\xba\x59\x59\x71\xbf\xf3\xe7\xed\xde\xe3\xe7\xae\x2f\x7b\xce\x6d\x4f\x2d\x9f\x79\x7e\x1e\xb1\x58\x0d\xa1\x7a\x7b\xfe\x94\x33\x71\x59\xb2\x40\xad\x46\xbf\xa4\x9e\x97\x05\xb1\x7f\xd3\x06\xfb\xbc\x64\x3c\xf7\x3c\xfe\xfe\xdf\x93\x0a\xf6\xb3\x1b\x8f\x04\x76\x83\xdd\x9d\x50\x97\x17\xee\xc5\xbe\x98\x3c\x9f\xb3\x31\xce\xa1\x2a\x77\x60\xf7\x19\xe8\xfc\xd3\x92\x17\xe8\x97\x66\x16\xdd\x38\x37\x93\xd8\x7b\x27\xc3\xa3\xfb\x4e\x5b\xb8\x7f\x65\x96\xbd\x8c\x15\xb3\xf4\x26\xf5\x2a\x6b\x14\xc3\xf0\x77\x64\xc6\x7d\xc0\x7f\x76\x76\xfc\x49\x7a\xaf\x22\xbd\xb7\x2b\x80\x16\x8d\xb5\x49\xef\xcf\xa9\x02\xc2\x20\x2e\xe7\x9a\x2b\x05\xe9\xce\x44\x7a\xa9\xbd\xee\x24\x71\x1e\xe4\x85\x17\x3b\xeb\x49\x02\x03\xe7\x74\x36\xe2\x55\x65\xbd\xcd\xac\x05\x2c\xe8\x5d\x8b\x91\xb7\xf4\x60\x15\x2f\xc9\x92\xd8\xbf\x2c\xb2\xd0\x0c\xf3\x55\x51\x83\xa6\x71\x13\x74\xba\x64\x12\xf7\x3a\x74\x51\xb0\xf2\xc0\x8e\xbc\x9f\xcd\xbc\xac\x15\x1a\x2a\x73\xaf\x4e\x19\xea\x41\x2a\x2f\x60\x39\xd3\x1f\xfe\x15\xec\x27\x41\xb9\x41\x6e\xd9\xd0\xa3\x93\x3c\x4a\xf2\x3e\xc5\x06\x99\xb7\xb2\x20\x44\x86\x92\x53\x5a\xe4\xbd\x2c\xb3\xd6\xbf\x7c\xdc\x26\x58\xb6\x1b\xd7\x70\xaf\x3f\x7e\x6a\xd7\xad\xbe\xdf\xdb\x0f\x9e\x53\xa0\x3a\x3d\xd7\xcd\xbc\x3c\xbf\xcf\x64\x2b\xf6\x3d\xc4\x56\x38\x76\xfb\xf9\x96\xf8\x8c\x93\xdd\xcf\x24\xb1\x6d\x7d\xbe\xc5\xf5\x2d\xf6\xf9\xdb\xd7\xcf\xdd\xdb\xcf\x78\x07\xbf\xa4\x7e\x97\xf8\x8c\x7f\xfb\xfa\xf9\xeb\xe7\x0e\x76\x69\xf5\xaf\xe4\xe7\x2e\xf6\xf9\xb6\x7b\x69\xfd\xbb\x6f\x9f\xf1\xbb\xdb\xcf\xc4\xd7\xeb\x8f\x1f\xcf\xb8\x7f\x41\xbe\x2f\x9f\xd8\x00\x16\x5e\xc6\x6c\x03\xe2\x1f\xfe\x15\x27\xc5\x65\xab\x71\x1a\xc1\xbe\x10\x78\x8f\x35\x3e\xbf\x8c\x6e\x2b\x06\x74\x91\xfc\xfc\x92\x2f\xec\xd8\x2b\xf6\xe4\x68\xf5\xb5\x2a\x28\x9d\xb7\xa7\x91\xbc\x9f\x49\xdf\xee\xcc\x9e\x69\xdf\xfa\x7b\xdb\xad\x33\xb3\x5d\x4f\xe3\xd0\x5b\x53\x56\xee\xb9\x63\xaf\xb0\x10\x43\xea\x59\x50\x78\xcf\xe9\x4d\xdb\x72\xc2\x45\xfa\x9c\x24\x4b\xbd\x2c\x48\xdc\xc0\x19\x27\xae\x37\x79\x5a\x40\x1e\x40\xe6\xe3\xc2\xcb\x96\x16\xe4\xe3\x71\x10\x2f\x8a\xb2\x11\x71\xfb\x44\x76\x58\xdd\x4e\xf6\x0a\x2f\x2e\x83\x13\x5b\x00\x83\x64\x91\xa1\xe6\xdf\x08\xec\xb4\xa8\x3a\x13\x09\x6e\x74\xd7\xa4\x1e\xc4\x6b\x35\xe6\xbf\x9e\xf5\x9c\xdf\x16\x53\x7a\x9f\x94\xea\xc2\xf2\xcf\x68\xae\x5a\x1d\x31\x8f\x68\x35\xbd\xb8\xda\x42\xa0\x93\xcc\xbb\xfa\x45\x91\x46\x1f\x9f\x8e\x89\x1c\x29\x6c\xfc\x06\xc3\x6f\xf0\xee\x7b\x98\x7b\xcd\xd8\x9e\x35\x93\xae\x7b\xf2\xfd\xf5\x29\x8b\xe7\x04\x61\x27\x69\xa3\x33\xcf\xc0\x2c\xe6\x59\xb2\xf0\xe7\xe9\x02\x59\x7e\x5d\xec\x98\xa6\x5e\x96\x75\x7f\x51\x08\x15\x71\x35\x1a\xc3\x5b\xa3\x8f\xf9\xaf\xb0\x5f\xff\xf6\xbd\x32\xf1\x9b\xf5\xfd\xb3\x46\xa4\x2e\x25\xba\x5e\xbe\x8e\x9d\xfb\x72\x2f\xb2\xa4\xa7\x33\x42\x26\xb5\xb2\xa2\x8c\x19\x0e\xbd\xf3\x72\xb4\xae\x59\xcc\xcf\x9b\x72\xdb\x6a\x5f\x02\xf7\xfc\xe6\xd9\x99\x6d\xb2\xab\xb6\xa1\x3b\xb0\xf2\xf9\x19\x9b\xee\xcc\x18\x6a\x01\xa1\x16\xc8\x44\xfd\x8a\xdd\xde\x9d\x62\x8a\xa7\x59\xed\x3b\xf2\x0c\xfa\x7e\xb0\x18\xef\xca\x47\x5f\x9c\x24\x2e\xac\x20\x3e\xde\x38\xfd\xa3\xb1\xd4\xde\xb0\x2e\xe1\xaf\x73\x52\xe9\x95\xf8\x7f\x74\x96\xae\x77\x89\xfe\x14\xac\xfc\x83\x31\x68\x3d\xb5\x3f\x19\xf3\x27\x63\xb6\xca\x2f\x65\xcc\x3a\xbc\xf6\xa7\x60\xcc\x4b\x74\xec\x0d\xfe\x1d\x4c\xd9\xdb\xdf\xd6\x94\x45\xdf\xeb\x75\xfb\xc9\xf5\x3f\xb9\xbe\x55\x7e\x29\xd7\xd7\xb9\x58\x7f\x1d\xd3\xfa\xe5\x6c\xff\x9d\xf9\xb7\x59\x81\x9f\x0c\xfc\x93\x81\x5b\xe5\x97\x32\xf0\x7d\xea\xc5\xca\x3c\x98\x15\x74\xb5\x1b\xfe\x1b\x72\xf2\x1e\xc4\xdf\x98\xa7\x17\x71\xf0\xeb\xc2\x1b\x7a\xcf\xed\x03\xed\x57\x7e\x7e\x54\x4f\xe7\x45\x5f\x5d\x3e\x3d\xdb\xea\x5f\xc2\xa7\x66\xa7\xf9\x9c\xbe\xcc\xa3\xf9\x9c\x99\x85\xe6\xf3\x1d\x3a\x5d\xe7\x56\xec\x05\xd6\x79\xf7\xc9\x85\x6e\x3e\x3f\xde\x50\x02\x2f\x2e\xde\xa7\xf3\xe7\xa9\xfc\x0f\xe1\x21\x1e\x09\x8b\x9f\x4a\xe7\xa7\xd2\x69\x95\x5f\xaa\x74\x26\x49\x56\x1c\x25\x44\xed\xb0\xfc\xb4\x19\x7f\x4c\x57\xb1\x5a\xb6\x9f\x3c\xff\x93\xe7\x5b\xe5\x97\xf2\xbc\xd2\xca\x50\xff\x73\xb8\x8b\x3f\x98\x72\xde\x9b\xe0\x9f\x4c\xfa\x97\x62\xd2\x2d\xf1\xbc\xe3\xb5\x68\x17\x10\xd0\x93\x97\xb4\xbd\xfd\x30\xcd\xae\xff\xbf\xe9\x41\x18\xcb\x71\xdd\x6f\x84\xf5\xed\xa6\xd3\xb9\xeb\xde\xdc\xde\x79\xb3\x1b\xdb\xbd\x25\x6e\x66\x5f\xb1\xaf\x33\xdb\xba\xc3\x2d\xef\xdb\x2b\x0e\xc2\x9c\x9e\xf5\xef\x71\x06\xe6\xd9\x93\x2a\xef\x42\x51\xd7\x2c\xa2\x96\x3a\x17\x6a\xff\x80\xd6\x5f\x96\x74\x6e\x5d\xf2\x9b\x4d\xde\xd9\x37\xb8\x7b\x3b\xbb\xb9\xfd\x76\xf7\xed\xc6\x22\x48\xfc\xc6\xf9\xfa\xed\xae\x73\xeb\x12\x38\xf1\x2a\xd2\x99\xfd\x31\x49\xe7\x8f\x76\x40\xf1\x69\xb9\xf8\xf3\x54\xe2\x9f\xe8\x54\xe2\x0f\x24\x8c\xff\xa8\x49\x5a\xdf\xe3\xe4\xd5\xeb\x25\xc1\xe1\x79\xac\xdd\xe5\xbd\xef\x72\x22\xec\x63\xa3\xef\xfa\xa2\x72\x85\x10\xbc\x9c\xf7\x0f\x7b\xf8\xdb\x31\xfc\xf7\x9e\x9b\xdf\x54\x20\xd8\xde\xcc\x9b\x59\x18\x7e\x43\x58\x04\x79\x73\x8b\x93\xdf\x6e\xee\x3a\xd6\xdd\x0d\xf1\x8d\x98\xcd\x3a\x1d\xc7\xeb\xe0\xb7\x7f\x6c\x15\xfb\x2e\x02\xe1\xfb\xaf\xf9\x39\x81\xf1\xb7\xea\xef\xff\xfe\xed\xff\x07\x00\x00\xff\xff\x61\xe1\x6c\x3c\x05\xf9\x00\x00")
func rpProductionJsonBytes() ([]byte, error) {
return bindataRead(
_rpProductionJson,
"rp-production.json",
)
}
func rpProductionJson() (*asset, error) {
bytes, err := rpProductionJsonBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "rp-production.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"cluster-predeploy.json": clusterPredeployJson,
"databases-development.json": databasesDevelopmentJson,
"env-development.json": envDevelopmentJson,
"gateway-production-managed-identity.json": gatewayProductionManagedIdentityJson,
"gateway-production-parameters.json": gatewayProductionParametersJson,
"gateway-production-predeploy-parameters.json": gatewayProductionPredeployParametersJson,
"gateway-production-predeploy.json": gatewayProductionPredeployJson,
"gateway-production.json": gatewayProductionJson,
"rbac-development.json": rbacDevelopmentJson,
"rp-development-predeploy.json": rpDevelopmentPredeployJson,
"rp-development.json": rpDevelopmentJson,
"rp-production-global-acr-replication.json": rpProductionGlobalAcrReplicationJson,
"rp-production-global-subscription.json": rpProductionGlobalSubscriptionJson,
"rp-production-global.json": rpProductionGlobalJson,
"rp-production-managed-identity.json": rpProductionManagedIdentityJson,
"rp-production-parameters.json": rpProductionParametersJson,
"rp-production-predeploy-parameters.json": rpProductionPredeployParametersJson,
"rp-production-predeploy.json": rpProductionPredeployJson,
"rp-production-subscription.json": rpProductionSubscriptionJson,
"rp-production.json": rpProductionJson,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
cannonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"cluster-predeploy.json": {clusterPredeployJson, map[string]*bintree{}},
"databases-development.json": {databasesDevelopmentJson, map[string]*bintree{}},
"env-development.json": {envDevelopmentJson, map[string]*bintree{}},
"gateway-production-managed-identity.json": {gatewayProductionManagedIdentityJson, map[string]*bintree{}},
"gateway-production-parameters.json": {gatewayProductionParametersJson, map[string]*bintree{}},
"gateway-production-predeploy-parameters.json": {gatewayProductionPredeployParametersJson, map[string]*bintree{}},
"gateway-production-predeploy.json": {gatewayProductionPredeployJson, map[string]*bintree{}},
"gateway-production.json": {gatewayProductionJson, map[string]*bintree{}},
"rbac-development.json": {rbacDevelopmentJson, map[string]*bintree{}},
"rp-development-predeploy.json": {rpDevelopmentPredeployJson, map[string]*bintree{}},
"rp-development.json": {rpDevelopmentJson, map[string]*bintree{}},
"rp-production-global-acr-replication.json": {rpProductionGlobalAcrReplicationJson, map[string]*bintree{}},
"rp-production-global-subscription.json": {rpProductionGlobalSubscriptionJson, map[string]*bintree{}},
"rp-production-global.json": {rpProductionGlobalJson, map[string]*bintree{}},
"rp-production-managed-identity.json": {rpProductionManagedIdentityJson, map[string]*bintree{}},
"rp-production-parameters.json": {rpProductionParametersJson, map[string]*bintree{}},
"rp-production-predeploy-parameters.json": {rpProductionPredeployParametersJson, map[string]*bintree{}},
"rp-production-predeploy.json": {rpProductionPredeployJson, map[string]*bintree{}},
"rp-production-subscription.json": {rpProductionSubscriptionJson, map[string]*bintree{}},
"rp-production.json": {rpProductionJson, map[string]*bintree{}},
}}
// RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
if err != nil {
return err
}
return nil
}
// RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
} |
var _rpProductionManagedIdentityJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\x90\x41\x4b\x03\x31\x10\x85\xef\xfd\x15\x21\x0a\x6d\xa1\xc9\x26\x15\x41\x7a\xf3\x24\x1e\x7a\x13\x2f\xa5\x87\x90\x8e\x6d\xa4\xc9\x84\xcc\xec\x61\x2d\xfd\xef\xb2\xe9\xee\x22\x8a\xc9\x65\x78\xf3\xde\x37\xf0\x2e\x33\x21\x84\x90\xf7\xe4\x4f\x10\x9d\xdc\x08\x79\x62\xce\xb4\x69\x9a\x9b\xa2\xa3\x4b\xee\x08\x11\x12\x6b\xf7\xd5\x16\xd0\x1e\xe3\xb0\xa3\x66\x6d\xec\xa3\x32\x56\x19\xdb\x1c\x20\x9f\xb1\xeb\x7d\x6f\x10\xf3\xd9\x31\xe8\x4f\xc2\x74\x27\x57\xb7\x0b\x1e\x13\x43\xe2\x77\x28\x14\x30\xf5\x87\xac\x36\xfd\x1f\x0d\x05\x08\xdb\xe2\x81\xe4\x46\xec\xaa\xd4\xbf\xcb\x34\x55\xd3\x19\xbd\xe3\x21\xbf\x1b\x13\x2f\x05\xdb\xbc\x58\xea\x71\xb9\x1f\x90\x53\x2a\xb9\x08\x35\xe1\x31\x79\xc7\x8b\xb9\x2b\xa8\x4a\x56\xf3\x95\xf8\x8f\xb1\xfc\x03\xe1\x2e\x57\xc8\x36\xf8\x82\x84\x1f\xac\xb7\xb5\x9a\xc3\xeb\x01\x12\x07\xee\x9a\x96\xa0\x3c\x13\x85\x63\x9a\xc4\x00\xf4\x9b\xe3\x72\xf8\x51\xc2\xda\xd8\x27\x65\xad\x7a\x30\x72\xb2\x5d\xeb\xb4\x9f\x5d\x67\xdf\x01\x00\x00\xff\xff\x2c\x44\xd2\x17\x9e\x01\x00\x00") |
track_model.js | var mongoose = require('mongoose'); | var ObjectId = mongoose.Schema.Types.ObjectId;
var Tag = require("./tag_model");
var TrackSchema = new Schema({
name: String,
tag_id: [{ type: ObjectId, index: true, ref: "Tag" }],
tasks: [{
name: String,
category: { type: String, validate: /call|email|follow_up|meeting|milestone|site_visit/, index: true, default: "email" },
due_after_event: { type: String, validate: /track_start|last_task/, index: true, default: "last_task" },
due_after_days: Number,
}],
date_created: { type: Date, default: Date.now },
_owner_id: ObjectId
}, {
timestamps: true
});
TrackSchema.set("_perms", {
admin: "crud",
owner: "cr",
user: ""
});
module.exports = mongoose.model('Track', TrackSchema); | var Schema = mongoose.Schema;
|
mainThreadLanguageFeatures.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { IDisposable } from 'vs/base/common/lifecycle';
import { Emitter, Event } from 'vs/base/common/event';
import { ITextModel, ISingleEditOperation } from 'vs/editor/common/model';
import * as modes from 'vs/editor/common/modes';
import * as search from 'vs/workbench/contrib/search/common/search';
import { CancellationToken } from 'vs/base/common/cancellation';
import { Position as EditorPosition } from 'vs/editor/common/core/position';
import { Range as EditorRange, IRange } from 'vs/editor/common/core/range';
import { ExtHostContext, MainThreadLanguageFeaturesShape, ExtHostLanguageFeaturesShape, MainContext, IExtHostContext, ILanguageConfigurationDto, IRegExpDto, IIndentationRuleDto, IOnEnterRuleDto, ILocationDto, IWorkspaceSymbolDto, reviveWorkspaceEditDto, IDocumentFilterDto, IDefinitionLinkDto, ISignatureHelpProviderMetadataDto, ILinkDto, ICallHierarchyItemDto, ISuggestDataDto, ICodeActionDto, ISuggestDataDtoField, ISuggestResultDtoField, ICodeActionProviderMetadataDto, ILanguageWordDefinitionDto } from '../common/extHost.protocol';
import { LanguageConfigurationRegistry } from 'vs/editor/common/modes/languageConfigurationRegistry';
import { LanguageConfiguration, IndentationRule, OnEnterRule } from 'vs/editor/common/modes/languageConfiguration';
import { IModeService } from 'vs/editor/common/services/modeService';
import { extHostNamedCustomer } from 'vs/workbench/api/common/extHostCustomers';
import { URI } from 'vs/base/common/uri';
import { Selection } from 'vs/editor/common/core/selection';
import { ExtensionIdentifier } from 'vs/platform/extensions/common/extensions';
import * as callh from 'vs/workbench/contrib/callHierarchy/common/callHierarchy';
import { mixin } from 'vs/base/common/objects';
import { decodeSemanticTokensDto } from 'vs/workbench/api/common/shared/semanticTokensDto';
@extHostNamedCustomer(MainContext.MainThreadLanguageFeatures)
export class MainThreadLanguageFeatures implements MainThreadLanguageFeaturesShape {
private readonly _proxy: ExtHostLanguageFeaturesShape;
private readonly _modeService: IModeService;
private readonly _registrations = new Map<number, IDisposable>();
constructor(
extHostContext: IExtHostContext,
@IModeService modeService: IModeService,
) {
this._proxy = extHostContext.getProxy(ExtHostContext.ExtHostLanguageFeatures);
this._modeService = modeService;
if (this._modeService) {
const updateAllWordDefinitions = () => {
const langWordPairs = LanguageConfigurationRegistry.getWordDefinitions();
let wordDefinitionDtos: ILanguageWordDefinitionDto[] = [];
for (const [languageId, wordDefinition] of langWordPairs) {
const language = this._modeService.getLanguageIdentifier(languageId);
if (!language) {
continue;
}
wordDefinitionDtos.push({
languageId: language.language,
regexSource: wordDefinition.source,
regexFlags: wordDefinition.flags
});
}
this._proxy.$setWordDefinitions(wordDefinitionDtos);
};
LanguageConfigurationRegistry.onDidChange((e) => {
const wordDefinition = LanguageConfigurationRegistry.getWordDefinition(e.languageIdentifier.id);
this._proxy.$setWordDefinitions([{
languageId: e.languageIdentifier.language,
regexSource: wordDefinition.source,
regexFlags: wordDefinition.flags
}]);
});
updateAllWordDefinitions();
}
}
dispose(): void {
for (const registration of this._registrations.values()) {
registration.dispose();
}
this._registrations.clear();
}
$unregister(handle: number): void {
const registration = this._registrations.get(handle);
if (registration) {
registration.dispose();
this._registrations.delete(handle);
}
}
//#region --- revive functions
private static _reviveLocationDto(data?: ILocationDto): modes.Location;
private static _reviveLocationDto(data?: ILocationDto[]): modes.Location[];
private static _reviveLocationDto(data: ILocationDto | ILocationDto[] | undefined): modes.Location | modes.Location[] | undefined {
if (!data) {
return data;
} else if (Array.isArray(data)) {
data.forEach(l => MainThreadLanguageFeatures._reviveLocationDto(l));
return <modes.Location[]>data;
} else {
data.uri = URI.revive(data.uri);
return <modes.Location>data;
}
}
private static _reviveLocationLinkDto(data: IDefinitionLinkDto): modes.LocationLink;
private static _reviveLocationLinkDto(data: IDefinitionLinkDto[]): modes.LocationLink[];
private static _reviveLocationLinkDto(data: IDefinitionLinkDto | IDefinitionLinkDto[]): modes.LocationLink | modes.LocationLink[] {
if (!data) {
return <modes.LocationLink>data;
} else if (Array.isArray(data)) {
data.forEach(l => MainThreadLanguageFeatures._reviveLocationLinkDto(l));
return <modes.LocationLink[]>data;
} else {
data.uri = URI.revive(data.uri);
return <modes.LocationLink>data;
}
}
private static _reviveWorkspaceSymbolDto(data: IWorkspaceSymbolDto): search.IWorkspaceSymbol;
private static _reviveWorkspaceSymbolDto(data: IWorkspaceSymbolDto[]): search.IWorkspaceSymbol[];
private static _reviveWorkspaceSymbolDto(data: undefined): undefined;
private static _reviveWorkspaceSymbolDto(data: IWorkspaceSymbolDto | IWorkspaceSymbolDto[] | undefined): search.IWorkspaceSymbol | search.IWorkspaceSymbol[] | undefined {
if (!data) {
return <undefined>data;
} else if (Array.isArray(data)) {
data.forEach(MainThreadLanguageFeatures._reviveWorkspaceSymbolDto);
return <search.IWorkspaceSymbol[]>data;
} else {
data.location = MainThreadLanguageFeatures._reviveLocationDto(data.location);
return <search.IWorkspaceSymbol>data;
}
}
private static _reviveCodeActionDto(data: ReadonlyArray<ICodeActionDto>): modes.CodeAction[] {
if (data) {
data.forEach(code => reviveWorkspaceEditDto(code.edit));
}
return <modes.CodeAction[]>data;
}
private static _reviveLinkDTO(data: ILinkDto): modes.ILink {
if (data.url && typeof data.url !== 'string') {
data.url = URI.revive(data.url);
}
return <modes.ILink>data;
}
private static _reviveCallHierarchyItemDto(data: ICallHierarchyItemDto | undefined): callh.CallHierarchyItem {
if (data) {
data.uri = URI.revive(data.uri);
}
return data as callh.CallHierarchyItem;
}
//#endregion
// --- outline
$registerDocumentSymbolProvider(handle: number, selector: IDocumentFilterDto[], displayName: string): void {
this._registrations.set(handle, modes.DocumentSymbolProviderRegistry.register(selector, <modes.DocumentSymbolProvider>{
displayName,
provideDocumentSymbols: (model: ITextModel, token: CancellationToken): Promise<modes.DocumentSymbol[] | undefined> => {
return this._proxy.$provideDocumentSymbols(handle, model.uri, token);
}
}));
}
// --- code lens
$registerCodeLensSupport(handle: number, selector: IDocumentFilterDto[], eventHandle: number | undefined): void {
const provider = <modes.CodeLensProvider>{
provideCodeLenses: (model: ITextModel, token: CancellationToken): Promise<modes.CodeLensList | undefined> => {
return this._proxy.$provideCodeLenses(handle, model.uri, token).then(listDto => {
if (!listDto) {
return undefined;
}
return {
lenses: listDto.lenses,
dispose: () => listDto.cacheId && this._proxy.$releaseCodeLenses(handle, listDto.cacheId)
};
});
},
resolveCodeLens: (_model: ITextModel, codeLens: modes.CodeLens, token: CancellationToken): Promise<modes.CodeLens | undefined> => {
return this._proxy.$resolveCodeLens(handle, codeLens, token);
}
};
if (typeof eventHandle === 'number') {
const emitter = new Emitter<modes.CodeLensProvider>();
this._registrations.set(eventHandle, emitter);
provider.onDidChange = emitter.event;
}
this._registrations.set(handle, modes.CodeLensProviderRegistry.register(selector, provider));
}
$emitCodeLensEvent(eventHandle: number, event?: any): void {
const obj = this._registrations.get(eventHandle);
if (obj instanceof Emitter) {
obj.fire(event);
}
}
// --- declaration
$registerDefinitionSupport(handle: number, selector: IDocumentFilterDto[]): void {
this._registrations.set(handle, modes.DefinitionProviderRegistry.register(selector, <modes.DefinitionProvider>{
provideDefinition: (model, position, token): Promise<modes.LocationLink[]> => {
return this._proxy.$provideDefinition(handle, model.uri, position, token).then(MainThreadLanguageFeatures._reviveLocationLinkDto);
}
}));
}
$registerDeclarationSupport(handle: number, selector: IDocumentFilterDto[]): void {
this._registrations.set(handle, modes.DeclarationProviderRegistry.register(selector, <modes.DeclarationProvider>{
provideDeclaration: (model, position, token) => {
return this._proxy.$provideDeclaration(handle, model.uri, position, token).then(MainThreadLanguageFeatures._reviveLocationLinkDto);
}
}));
}
$registerImplementationSupport(handle: number, selector: IDocumentFilterDto[]): void {
this._registrations.set(handle, modes.ImplementationProviderRegistry.register(selector, <modes.ImplementationProvider>{
provideImplementation: (model, position, token): Promise<modes.LocationLink[]> => {
return this._proxy.$provideImplementation(handle, model.uri, position, token).then(MainThreadLanguageFeatures._reviveLocationLinkDto);
}
}));
}
$registerTypeDefinitionSupport(handle: number, selector: IDocumentFilterDto[]): void {
this._registrations.set(handle, modes.TypeDefinitionProviderRegistry.register(selector, <modes.TypeDefinitionProvider>{
provideTypeDefinition: (model, position, token): Promise<modes.LocationLink[]> => {
return this._proxy.$provideTypeDefinition(handle, model.uri, position, token).then(MainThreadLanguageFeatures._reviveLocationLinkDto);
}
}));
}
// --- extra info
$registerHoverProvider(handle: number, selector: IDocumentFilterDto[]): void {
this._registrations.set(handle, modes.HoverProviderRegistry.register(selector, <modes.HoverProvider>{
provideHover: (model: ITextModel, position: EditorPosition, token: CancellationToken): Promise<modes.Hover | undefined> => {
return this._proxy.$provideHover(handle, model.uri, position, token);
}
}));
}
// --- debug hover
$registerEvaluatableExpressionProvider(handle: number, selector: IDocumentFilterDto[]): void {
this._registrations.set(handle, modes.EvaluatableExpressionProviderRegistry.register(selector, <modes.EvaluatableExpressionProvider>{
provideEvaluatableExpression: (model: ITextModel, position: EditorPosition, token: CancellationToken): Promise<modes.EvaluatableExpression | undefined> => {
return this._proxy.$provideEvaluatableExpression(handle, model.uri, position, token);
}
}));
}
// --- occurrences
$registerDocumentHighlightProvider(handle: number, selector: IDocumentFilterDto[]): void {
this._registrations.set(handle, modes.DocumentHighlightProviderRegistry.register(selector, <modes.DocumentHighlightProvider>{
provideDocumentHighlights: (model: ITextModel, position: EditorPosition, token: CancellationToken): Promise<modes.DocumentHighlight[] | undefined> => {
return this._proxy.$provideDocumentHighlights(handle, model.uri, position, token);
}
}));
}
// --- on type rename
$registerOnTypeRenameProvider(handle: number, selector: IDocumentFilterDto[], stopPattern?: IRegExpDto): void {
const revivedStopPattern = stopPattern ? MainThreadLanguageFeatures._reviveRegExp(stopPattern) : undefined;
this._registrations.set(handle, modes.OnTypeRenameProviderRegistry.register(selector, <modes.OnTypeRenameProvider>{
stopPattern: revivedStopPattern,
provideOnTypeRenameRanges: (model: ITextModel, position: EditorPosition, token: CancellationToken): Promise<IRange[] | undefined> => {
return this._proxy.$provideOnTypeRenameRanges(handle, model.uri, position, token);
}
}));
}
// --- references
$registerReferenceSupport(handle: number, selector: IDocumentFilterDto[]): void {
this._registrations.set(handle, modes.ReferenceProviderRegistry.register(selector, <modes.ReferenceProvider>{
provideReferences: (model: ITextModel, position: EditorPosition, context: modes.ReferenceContext, token: CancellationToken): Promise<modes.Location[]> => {
return this._proxy.$provideReferences(handle, model.uri, position, context, token).then(MainThreadLanguageFeatures._reviveLocationDto);
}
}));
}
// --- quick fix
$registerQuickFixSupport(handle: number, selector: IDocumentFilterDto[], metadata: ICodeActionProviderMetadataDto, displayName: string): void {
this._registrations.set(handle, modes.CodeActionProviderRegistry.register(selector, <modes.CodeActionProvider>{
provideCodeActions: async (model: ITextModel, rangeOrSelection: EditorRange | Selection, context: modes.CodeActionContext, token: CancellationToken): Promise<modes.CodeActionList | undefined> => {
const listDto = await this._proxy.$provideCodeActions(handle, model.uri, rangeOrSelection, context, token);
if (!listDto) {
return undefined;
}
return <modes.CodeActionList>{
actions: MainThreadLanguageFeatures._reviveCodeActionDto(listDto.actions),
dispose: () => {
if (typeof listDto.cacheId === 'number') {
this._proxy.$releaseCodeActions(handle, listDto.cacheId);
}
}
};
},
providedCodeActionKinds: metadata.providedKinds,
documentation: metadata.documentation,
displayName
}));
}
// --- formatting
$registerDocumentFormattingSupport(handle: number, selector: IDocumentFilterDto[], extensionId: ExtensionIdentifier, displayName: string): void {
this._registrations.set(handle, modes.DocumentFormattingEditProviderRegistry.register(selector, <modes.DocumentFormattingEditProvider>{
extensionId,
displayName,
provideDocumentFormattingEdits: (model: ITextModel, options: modes.FormattingOptions, token: CancellationToken): Promise<ISingleEditOperation[] | undefined> => {
return this._proxy.$provideDocumentFormattingEdits(handle, model.uri, options, token);
}
}));
}
$registerRangeFormattingSupport(handle: number, selector: IDocumentFilterDto[], extensionId: ExtensionIdentifier, displayName: string): void {
this._registrations.set(handle, modes.DocumentRangeFormattingEditProviderRegistry.register(selector, <modes.DocumentRangeFormattingEditProvider>{
extensionId,
displayName,
provideDocumentRangeFormattingEdits: (model: ITextModel, range: EditorRange, options: modes.FormattingOptions, token: CancellationToken): Promise<ISingleEditOperation[] | undefined> => {
return this._proxy.$provideDocumentRangeFormattingEdits(handle, model.uri, range, options, token);
}
}));
}
$registerOnTypeFormattingSupport(handle: number, selector: IDocumentFilterDto[], autoFormatTriggerCharacters: string[], extensionId: ExtensionIdentifier): void {
this._registrations.set(handle, modes.OnTypeFormattingEditProviderRegistry.register(selector, <modes.OnTypeFormattingEditProvider>{
extensionId,
autoFormatTriggerCharacters,
provideOnTypeFormattingEdits: (model: ITextModel, position: EditorPosition, ch: string, options: modes.FormattingOptions, token: CancellationToken): Promise<ISingleEditOperation[] | undefined> => {
return this._proxy.$provideOnTypeFormattingEdits(handle, model.uri, position, ch, options, token);
}
}));
}
// --- navigate type
$registerNavigateTypeSupport(handle: number): void {
let lastResultId: number | undefined;
this._registrations.set(handle, search.WorkspaceSymbolProviderRegistry.register(<search.IWorkspaceSymbolProvider>{
provideWorkspaceSymbols: (search: string, token: CancellationToken): Promise<search.IWorkspaceSymbol[]> => {
return this._proxy.$provideWorkspaceSymbols(handle, search, token).then(result => {
if (lastResultId !== undefined) {
this._proxy.$releaseWorkspaceSymbols(handle, lastResultId);
}
lastResultId = result._id;
return MainThreadLanguageFeatures._reviveWorkspaceSymbolDto(result.symbols);
});
},
resolveWorkspaceSymbol: (item: search.IWorkspaceSymbol, token: CancellationToken): Promise<search.IWorkspaceSymbol | undefined> => {
return this._proxy.$resolveWorkspaceSymbol(handle, item, token).then(i => {
if (i) {
return MainThreadLanguageFeatures._reviveWorkspaceSymbolDto(i);
}
return undefined;
});
}
}));
}
// --- rename
$registerRenameSupport(handle: number, selector: IDocumentFilterDto[], supportResolveLocation: boolean): void {
this._registrations.set(handle, modes.RenameProviderRegistry.register(selector, <modes.RenameProvider>{
provideRenameEdits: (model: ITextModel, position: EditorPosition, newName: string, token: CancellationToken) => {
return this._proxy.$provideRenameEdits(handle, model.uri, position, newName, token).then(reviveWorkspaceEditDto);
},
resolveRenameLocation: supportResolveLocation
? (model: ITextModel, position: EditorPosition, token: CancellationToken): Promise<modes.RenameLocation | undefined> => this._proxy.$resolveRenameLocation(handle, model.uri, position, token)
: undefined
}));
}
// --- semantic tokens
$registerDocumentSemanticTokensProvider(handle: number, selector: IDocumentFilterDto[], legend: modes.SemanticTokensLegend, eventHandle: number | undefined): void {
let event: Event<void> | undefined = undefined;
if (typeof eventHandle === 'number') {
const emitter = new Emitter<void>();
this._registrations.set(eventHandle, emitter);
event = emitter.event;
}
this._registrations.set(handle, modes.DocumentSemanticTokensProviderRegistry.register(selector, new MainThreadDocumentSemanticTokensProvider(this._proxy, handle, legend, event)));
}
$emitDocumentSemanticTokensEvent(eventHandle: number): void {
const obj = this._registrations.get(eventHandle);
if (obj instanceof Emitter) {
obj.fire(undefined);
}
}
$registerDocumentRangeSemanticTokensProvider(handle: number, selector: IDocumentFilterDto[], legend: modes.SemanticTokensLegend): void {
this._registrations.set(handle, modes.DocumentRangeSemanticTokensProviderRegistry.register(selector, new MainThreadDocumentRangeSemanticTokensProvider(this._proxy, handle, legend)));
}
// --- suggest
private static _inflateSuggestDto(defaultRange: IRange | { insert: IRange, replace: IRange }, data: ISuggestDataDto): modes.CompletionItem {
return {
label: data[ISuggestDataDtoField.label2] ?? data[ISuggestDataDtoField.label],
kind: data[ISuggestDataDtoField.kind] ?? modes.CompletionItemKind.Property,
tags: data[ISuggestDataDtoField.kindModifier],
detail: data[ISuggestDataDtoField.detail],
documentation: data[ISuggestDataDtoField.documentation],
sortText: data[ISuggestDataDtoField.sortText],
filterText: data[ISuggestDataDtoField.filterText],
preselect: data[ISuggestDataDtoField.preselect],
insertText: typeof data.h === 'undefined' ? data[ISuggestDataDtoField.label] : data.h,
range: data[ISuggestDataDtoField.range] ?? defaultRange,
insertTextRules: data[ISuggestDataDtoField.insertTextRules],
commitCharacters: data[ISuggestDataDtoField.commitCharacters],
additionalTextEdits: data[ISuggestDataDtoField.additionalTextEdits],
command: data[ISuggestDataDtoField.command],
// not-standard
_id: data.x,
};
}
$registerSuggestSupport(handle: number, selector: IDocumentFilterDto[], triggerCharacters: string[], supportsResolveDetails: boolean, extensionId: ExtensionIdentifier): void {
const provider: modes.CompletionItemProvider = {
triggerCharacters,
_debugDisplayName: extensionId.value,
provideCompletionItems: (model: ITextModel, position: EditorPosition, context: modes.CompletionContext, token: CancellationToken): Promise<modes.CompletionList | undefined> => {
return this._proxy.$provideCompletionItems(handle, model.uri, position, context, token).then(result => {
if (!result) {
return result;
}
return {
suggestions: result[ISuggestResultDtoField.completions].map(d => MainThreadLanguageFeatures._inflateSuggestDto(result[ISuggestResultDtoField.defaultRanges], d)),
incomplete: result[ISuggestResultDtoField.isIncomplete] || false,
dispose: () => {
if (typeof result.x === 'number') {
this._proxy.$releaseCompletionItems(handle, result.x);
}
}
};
});
}
};
if (supportsResolveDetails) {
provider.resolveCompletionItem = (suggestion, token) => {
return this._proxy.$resolveCompletionItem(handle, suggestion._id!, token).then(result => {
if (!result) {
return suggestion;
}
let newSuggestion = MainThreadLanguageFeatures._inflateSuggestDto(suggestion.range, result);
return mixin(suggestion, newSuggestion, true);
});
};
}
this._registrations.set(handle, modes.CompletionProviderRegistry.register(selector, provider));
}
// --- parameter hints
$registerSignatureHelpProvider(handle: number, selector: IDocumentFilterDto[], metadata: ISignatureHelpProviderMetadataDto): void {
this._registrations.set(handle, modes.SignatureHelpProviderRegistry.register(selector, <modes.SignatureHelpProvider>{
signatureHelpTriggerCharacters: metadata.triggerCharacters,
signatureHelpRetriggerCharacters: metadata.retriggerCharacters,
provideSignatureHelp: async (model: ITextModel, position: EditorPosition, token: CancellationToken, context: modes.SignatureHelpContext): Promise<modes.SignatureHelpResult | undefined> => {
const result = await this._proxy.$provideSignatureHelp(handle, model.uri, position, context, token);
if (!result) {
return undefined;
}
return {
value: result,
dispose: () => {
this._proxy.$releaseSignatureHelp(handle, result.id);
}
};
}
}));
}
// --- links
$registerDocumentLinkProvider(handle: number, selector: IDocumentFilterDto[], supportsResolve: boolean): void {
const provider: modes.LinkProvider = {
provideLinks: (model, token) => {
return this._proxy.$provideDocumentLinks(handle, model.uri, token).then(dto => {
if (!dto) {
return undefined;
}
return {
links: dto.links.map(MainThreadLanguageFeatures._reviveLinkDTO),
dispose: () => {
if (typeof dto.id === 'number') {
this._proxy.$releaseDocumentLinks(handle, dto.id);
}
}
};
});
}
};
if (supportsResolve) {
provider.resolveLink = (link, token) => {
const dto: ILinkDto = link;
if (!dto.cacheId) {
return link;
}
return this._proxy.$resolveDocumentLink(handle, dto.cacheId, token).then(obj => {
return obj && MainThreadLanguageFeatures._reviveLinkDTO(obj);
});
};
}
this._registrations.set(handle, modes.LinkProviderRegistry.register(selector, provider));
}
// --- colors
$registerDocumentColorProvider(handle: number, selector: IDocumentFilterDto[]): void {
const proxy = this._proxy;
this._registrations.set(handle, modes.ColorProviderRegistry.register(selector, <modes.DocumentColorProvider>{
provideDocumentColors: (model, token) => {
return proxy.$provideDocumentColors(handle, model.uri, token)
.then(documentColors => {
return documentColors.map(documentColor => {
const [red, green, blue, alpha] = documentColor.color;
const color = {
red: red,
green: green,
blue: blue,
alpha
};
return {
color,
range: documentColor.range
};
});
});
},
provideColorPresentations: (model, colorInfo, token) => {
return proxy.$provideColorPresentations(handle, model.uri, {
color: [colorInfo.color.red, colorInfo.color.green, colorInfo.color.blue, colorInfo.color.alpha],
range: colorInfo.range
}, token);
}
}));
}
// --- folding
$registerFoldingRangeProvider(handle: number, selector: IDocumentFilterDto[]): void {
const proxy = this._proxy;
this._registrations.set(handle, modes.FoldingRangeProviderRegistry.register(selector, <modes.FoldingRangeProvider>{
provideFoldingRanges: (model, context, token) => {
return proxy.$provideFoldingRanges(handle, model.uri, context, token);
}
}));
}
// -- smart select
$registerSelectionRangeProvider(handle: number, selector: IDocumentFilterDto[]): void {
this._registrations.set(handle, modes.SelectionRangeRegistry.register(selector, {
provideSelectionRanges: (model, positions, token) => {
return this._proxy.$provideSelectionRanges(handle, model.uri, positions, token);
}
}));
}
// --- call hierarchy
$registerCallHierarchyProvider(handle: number, selector: IDocumentFilterDto[]): void {
this._registrations.set(handle, callh.CallHierarchyProviderRegistry.register(selector, {
prepareCallHierarchy: async (document, position, token) => {
const items = await this._proxy.$prepareCallHierarchy(handle, document.uri, position, token);
if (!items) {
return undefined;
}
return {
dispose: () => {
for (const item of items) {
this._proxy.$releaseCallHierarchy(handle, item._sessionId);
}
},
roots: items.map(MainThreadLanguageFeatures._reviveCallHierarchyItemDto)
};
},
provideOutgoingCalls: async (item, token) => {
const outgoing = await this._proxy.$provideCallHierarchyOutgoingCalls(handle, item._sessionId, item._itemId, token);
if (!outgoing) {
return outgoing;
}
outgoing.forEach(value => {
value.to = MainThreadLanguageFeatures._reviveCallHierarchyItemDto(value.to);
});
return <any>outgoing;
},
provideIncomingCalls: async (item, token) => {
const incoming = await this._proxy.$provideCallHierarchyIncomingCalls(handle, item._sessionId, item._itemId, token);
if (!incoming) {
return incoming;
}
incoming.forEach(value => {
value.from = MainThreadLanguageFeatures._reviveCallHierarchyItemDto(value.from);
});
return <any>incoming;
}
}));
}
// --- configuration
private static _reviveRegExp(regExp: IRegExpDto): RegExp {
return new RegExp(regExp.pattern, regExp.flags);
}
private static _reviveIndentationRule(indentationRule: IIndentationRuleDto): IndentationRule {
return {
decreaseIndentPattern: MainThreadLanguageFeatures._reviveRegExp(indentationRule.decreaseIndentPattern),
increaseIndentPattern: MainThreadLanguageFeatures._reviveRegExp(indentationRule.increaseIndentPattern),
indentNextLinePattern: indentationRule.indentNextLinePattern ? MainThreadLanguageFeatures._reviveRegExp(indentationRule.indentNextLinePattern) : undefined,
unIndentedLinePattern: indentationRule.unIndentedLinePattern ? MainThreadLanguageFeatures._reviveRegExp(indentationRule.unIndentedLinePattern) : undefined,
};
}
private static _reviveOnEnterRule(onEnterRule: IOnEnterRuleDto): OnEnterRule {
return {
beforeText: MainThreadLanguageFeatures._reviveRegExp(onEnterRule.beforeText),
afterText: onEnterRule.afterText ? MainThreadLanguageFeatures._reviveRegExp(onEnterRule.afterText) : undefined,
oneLineAboveText: onEnterRule.oneLineAboveText ? MainThreadLanguageFeatures._reviveRegExp(onEnterRule.oneLineAboveText) : undefined,
action: onEnterRule.action
};
}
private static _reviveOnEnterRules(onEnterRules: IOnEnterRuleDto[]): OnEnterRule[] {
return onEnterRules.map(MainThreadLanguageFeatures._reviveOnEnterRule);
}
$setLanguageConfiguration(handle: number, languageId: string, _configuration: ILanguageConfigurationDto): void {
const configuration: LanguageConfiguration = {
comments: _configuration.comments,
brackets: _configuration.brackets,
wordPattern: _configuration.wordPattern ? MainThreadLanguageFeatures._reviveRegExp(_configuration.wordPattern) : undefined,
indentationRules: _configuration.indentationRules ? MainThreadLanguageFeatures._reviveIndentationRule(_configuration.indentationRules) : undefined,
onEnterRules: _configuration.onEnterRules ? MainThreadLanguageFeatures._reviveOnEnterRules(_configuration.onEnterRules) : undefined,
autoClosingPairs: undefined,
surroundingPairs: undefined,
__electricCharacterSupport: undefined
};
if (_configuration.__characterPairSupport) {
// backwards compatibility
configuration.autoClosingPairs = _configuration.__characterPairSupport.autoClosingPairs;
}
if (_configuration.__electricCharacterSupport && _configuration.__electricCharacterSupport.docComment) {
configuration.__electricCharacterSupport = {
docComment: {
open: _configuration.__electricCharacterSupport.docComment.open,
close: _configuration.__electricCharacterSupport.docComment.close
}
};
}
const languageIdentifier = this._modeService.getLanguageIdentifier(languageId);
if (languageIdentifier) {
this._registrations.set(handle, LanguageConfigurationRegistry.register(languageIdentifier, configuration));
}
}
}
export class | implements modes.DocumentSemanticTokensProvider {
constructor(
private readonly _proxy: ExtHostLanguageFeaturesShape,
private readonly _handle: number,
private readonly _legend: modes.SemanticTokensLegend,
public readonly onDidChange: Event<void> | undefined,
) {
}
public releaseDocumentSemanticTokens(resultId: string | undefined): void {
if (resultId) {
this._proxy.$releaseDocumentSemanticTokens(this._handle, parseInt(resultId, 10));
}
}
public getLegend(): modes.SemanticTokensLegend {
return this._legend;
}
async provideDocumentSemanticTokens(model: ITextModel, lastResultId: string | null, token: CancellationToken): Promise<modes.SemanticTokens | modes.SemanticTokensEdits | null> {
const nLastResultId = lastResultId ? parseInt(lastResultId, 10) : 0;
const encodedDto = await this._proxy.$provideDocumentSemanticTokens(this._handle, model.uri, nLastResultId, token);
if (!encodedDto) {
return null;
}
if (token.isCancellationRequested) {
return null;
}
const dto = decodeSemanticTokensDto(encodedDto);
if (dto.type === 'full') {
return {
resultId: String(dto.id),
data: dto.data
};
}
return {
resultId: String(dto.id),
edits: dto.deltas
};
}
}
export class MainThreadDocumentRangeSemanticTokensProvider implements modes.DocumentRangeSemanticTokensProvider {
constructor(
private readonly _proxy: ExtHostLanguageFeaturesShape,
private readonly _handle: number,
private readonly _legend: modes.SemanticTokensLegend,
) {
}
public getLegend(): modes.SemanticTokensLegend {
return this._legend;
}
async provideDocumentRangeSemanticTokens(model: ITextModel, range: EditorRange, token: CancellationToken): Promise<modes.SemanticTokens | null> {
const encodedDto = await this._proxy.$provideDocumentRangeSemanticTokens(this._handle, model.uri, range, token);
if (!encodedDto) {
return null;
}
if (token.isCancellationRequested) {
return null;
}
const dto = decodeSemanticTokensDto(encodedDto);
if (dto.type === 'full') {
return {
resultId: String(dto.id),
data: dto.data
};
}
throw new Error(`Unexpected`);
}
}
| MainThreadDocumentSemanticTokensProvider |
app.component.spec.ts | import { TestBed, async } from '@angular/core/testing';
import { AppComponent } from './app.component';
describe('AppComponent', () => {
beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [
AppComponent
],
}).compileComponents();
}));
it('should create the app', async(() => {
const fixture = TestBed.createComponent(AppComponent);
const app = fixture.debugElement.componentInstance;
expect(app).toBeTruthy();
}));
it(`should have as title 'app'`, async(() => {
const fixture = TestBed.createComponent(AppComponent); | }));
it('should render title in a h1 tag', async(() => {
const fixture = TestBed.createComponent(AppComponent);
fixture.detectChanges();
const compiled = fixture.debugElement.nativeElement;
expect(compiled.querySelector('h1').textContent).toContain('Welcome to Dynamic-Form!');
}));
}); | const app = fixture.debugElement.componentInstance;
expect(app.title).toEqual('app'); |
conv_layers_builder_test.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conv layers builder."""
from absl.testing import parameterized
from lingvo import compat as tf
from lingvo.core import bn_layers
from lingvo.core import conv_layers_builder
from lingvo.core import conv_layers_with_time_padding
from lingvo.core import layers
from lingvo.core import test_utils
import numpy as np
class ConvPaddedLayersTest(test_utils.TestCase):
def _ConvTestHelper(self, dilation, stride, activation, batch_norm,
weight_norm, in_dim, out_dim, filter_shape, conv_last,
causal_conv):
with self.session(use_gpu=True) as sess:
p1 = layers.Conv2DLayer.Params().Set(
name='conv_2d01',
filter_shape=filter_shape + [in_dim, out_dim],
filter_stride=stride,
dilation_rate=dilation,
activation=activation,
batch_norm=batch_norm,
weight_norm=weight_norm,
bias=not batch_norm,
conv_last=conv_last,
causal_convolution=causal_conv)
builder_params = conv_layers_builder.Builder.Params().Set(
weight_norm=weight_norm)
if batch_norm:
norm_p = conv_layers_with_time_padding.ConvBatchNormLayer.Params().Set(
decay=0.999)
builder_params.norm_layer_tpl = norm_p
else:
builder_params.norm_layer_tpl = None
p2 = builder_params.Instantiate().Conv2D(
'conv_2d02',
in_dim,
out_dim,
filter_shape,
stride=stride,
dilation=dilation,
activation=activation,
conv_last=conv_last,
is_causal=causal_conv)
l1 = p1.Instantiate()
l2 = p2.Instantiate()
conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)
conv_pad = np.full([4, 5], 0.0)
conv_pad[2, 3] = 1.0
conv_pad[2, 4] = 1.0
conv_pad = tf.constant(conv_pad, tf.float32)
l1_theta = l1.theta.Transform(tf.identity)
l2_theta = l2.theta.Transform(tf.identity)
conv_out1, out1_padding = l1.FProp(l1_theta, conv_in, conv_pad)
conv_out2, out2_padding = l2.FProp(l2_theta, conv_in, conv_pad)
tf.logging.info(l1_theta)
tf.logging.info(l2_theta)
l1_num_vars = l1_theta.Flatten()
l2_num_var2 = l2_theta.Flatten()
if len(l1_num_vars) != len(l2_num_var2):
tf.logging.info(
'Mismatched number of vars: l1: %d vars, l2: %d vars',
len(l1_num_vars), len(l2_num_var2))
w1 = l1_theta.w
w2 = l2_theta.conv_2d.w
# b1 = l1_theta.b
# b2 = l2_theta.bn_or_bias.b
tf.global_variables_initializer().run()
v1, p1 = sess.run([conv_out1, out1_padding])
w1_v = sess.run(w1)
v2, p2 = sess.run([conv_out2, out2_padding], feed_dict={w2: w1_v})
self.assertAllClose(v1, v2)
self.assertAllClose(p1, p2)
def testConvBasic(self):
dilation = [1, 1]
stride = [2, 3]
activation = 'NONE'
batch_norm = False
weight_norm = False
in_dim = 3
out_dim = 3
filter_shape = [2, 2]
conv_last = False
causal_conv = False
self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,
in_dim, out_dim, filter_shape, conv_last, causal_conv)
def | (self):
dilation = [1, 1]
stride = [2, 3]
activation = 'TANH'
batch_norm = True
weight_norm = True
in_dim = 3
out_dim = 3
filter_shape = [2, 2]
conv_last = False
causal_conv = False
self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,
in_dim, out_dim, filter_shape, conv_last, causal_conv)
def testConvGn(self):
dilation = [1, 1]
stride = [2, 3]
activation = 'TANH'
in_dim = 3
out_dim = 4
filter_shape = [2, 2]
conv_last = False
causal_conv = False
with self.session(use_gpu=True) as sess:
builder_params = conv_layers_builder.Builder.Params().Set(
weight_norm=True)
builder_params.norm_layer_tpl = bn_layers.GroupNormLayer.Params().Set(
num_groups=2)
p = builder_params.Instantiate().Conv2D(
'conv_2d02',
in_dim,
out_dim,
filter_shape,
stride=stride,
dilation=dilation,
activation=activation,
conv_last=conv_last,
is_causal=causal_conv)
l = p.Instantiate()
conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)
conv_pad = np.full([4, 5], 0.0)
conv_pad[2, 3] = 1.0
conv_pad[2, 4] = 1.0
conv_pad = tf.constant(conv_pad, tf.float32)
conv_out, _ = l.FProp(l.theta, conv_in, conv_pad)
tf.global_variables_initializer().run()
v = sess.run(tf.reduce_sum(conv_out, 0))
expected_out = [[[-0.35070014, -1.7821487, 0.8349923, 1.1709788],
[-0.18872532, 0.9702145, 0.5534694, -1.1386856]],
[[0.34970748, -0.5403709, -0.9809327, -2.0930214],
[0.54232424, 1.1565661, 1.0349312, 1.3458138]],
[[0, 0, 0, 0], [0, 0, 0, 0]]]
self.assertAllClose(v, expected_out)
def testConvLastWnTanh(self):
dilation = [1, 1]
stride = [2, 3]
activation = 'TANH'
batch_norm = False
weight_norm = True
in_dim = 3
out_dim = 3
filter_shape = [2, 2]
conv_last = True
causal_conv = False
self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,
in_dim, out_dim, filter_shape, conv_last, causal_conv)
def testConvLastCausal(self):
dilation = [1, 1]
stride = [2, 3]
activation = 'TANH'
batch_norm = True
weight_norm = True
in_dim = 3
out_dim = 3
filter_shape = [2, 1]
conv_last = True
causal_conv = True
self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,
in_dim, out_dim, filter_shape, conv_last, causal_conv)
def _DepthwiseConvTestHelper(self, dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
filter_shape, conv_last, causal_conv):
with self.session(use_gpu=True) as sess:
p1 = layers.DepthwiseConv2DLayer.Params().Set(
name='conv_2d01',
filter_shape=filter_shape + [in_dim, depth_multiplier],
filter_stride=stride,
dilation_rate=dilation,
activation=activation,
batch_norm=batch_norm,
weight_norm=weight_norm,
bias=not batch_norm,
conv_last=conv_last,
causal_convolution=causal_conv)
builder_params = conv_layers_builder.Builder.Params().Set(
weight_norm=weight_norm)
if batch_norm:
norm_p = conv_layers_with_time_padding.ConvBatchNormLayer.Params().Set(
decay=0.999)
builder_params.norm_layer_tpl = norm_p
else:
builder_params.norm_layer_tpl = None
p2 = builder_params.Instantiate().DepthwiseConv2D(
'conv_2d02',
in_dim,
depth_multiplier,
filter_shape,
stride=stride,
activation=activation,
dilation=dilation,
conv_last=conv_last,
is_causal=causal_conv)
l1 = p1.Instantiate()
l2 = p2.Instantiate()
conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)
conv_pad = np.full([4, 5], 0.0)
conv_pad[2, 3] = 1.0
conv_pad[2, 4] = 1.0
conv_pad = tf.constant(conv_pad, tf.float32)
l1_theta = l1.theta.Transform(tf.identity)
l2_theta = l2.theta.Transform(tf.identity)
conv_out1, out1_padding = l1.FProp(l1_theta, conv_in, conv_pad)
conv_out2, out2_padding = l2.FProp(l2_theta, conv_in, conv_pad)
tf.logging.info(l1_theta)
tf.logging.info(l2_theta)
l1_num_vars = l1_theta.Flatten()
l2_num_var2 = l2_theta.Flatten()
if len(l1_num_vars) != len(l2_num_var2):
tf.logging.info(
'Mismatched number of vars: l1: %d vars, l2: %d vars',
len(l1_num_vars), len(l2_num_var2))
w1 = l1_theta.w
w2 = l2_theta.conv_2d.w
# b1 = l1_theta.b
# b2 = l2_theta.bn_or_bias.b
tf.global_variables_initializer().run()
v1, p1 = sess.run([conv_out1, out1_padding])
w1_v = sess.run([w1])[0]
v2, p2 = sess.run([conv_out2, out2_padding], feed_dict={w2: w1_v})
self.assertAllClose(v1, v2)
self.assertAllClose(p1, p2)
def testDepthConvBasic(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'NONE'
batch_norm = False
weight_norm = False
in_dim = 3
depth_multiplier = 2
filter_shape = [2, 2]
conv_last = False
causal_conv = False
self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
filter_shape, conv_last, causal_conv)
def testDepthConvBnWnTanh(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
batch_norm = True
weight_norm = True
in_dim = 3
depth_multiplier = 3
filter_shape = [2, 2]
conv_last = False
causal_conv = False
self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
filter_shape, conv_last, causal_conv)
def testDepthConvGn(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
in_dim = 4
depth_multiplier = 1
filter_shape = [2, 2]
conv_last = False
causal_conv = False
with self.session(use_gpu=True) as sess:
builder_params = conv_layers_builder.Builder.Params().Set(
weight_norm=True)
builder_params.norm_layer_tpl = bn_layers.GroupNormLayer.Params().Set(
num_groups=2)
p = builder_params.Instantiate().DepthwiseConv2D(
'conv_2d02',
in_dim,
depth_multiplier,
filter_shape,
stride=stride,
activation=activation,
dilation=dilation,
conv_last=conv_last,
is_causal=causal_conv)
l = p.Instantiate()
conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 4]), tf.float32)
conv_pad = np.full([4, 5], 0.0)
conv_pad[2, 3] = 1.0
conv_pad[2, 4] = 1.0
conv_pad = tf.constant(conv_pad, tf.float32)
conv_out, _ = l.FProp(l.theta, conv_in, conv_pad)
tf.global_variables_initializer().run()
v = sess.run(tf.reduce_sum(conv_out, 0))
expected_out = [[[-0.77095497, 0.30285388, -0.05714864, 1.0386012],
[0.74034333, 0.04982221, -0.41769135, -2.9531932],
[-0.2647084, -0.1936804, 0.6598473, 0.42537105]],
[[1.3095646, -0.85996866, 2.2734299, -1.8457952],
[-0.9542263, -0.14199251, 0.51472515, 0.91931283],
[0.47267163, 1.4824618, 0.4548889, 0.93488806]],
[[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]]]
self.assertAllClose(expected_out, v)
def testDepthConvLastWnTanh(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
batch_norm = False
weight_norm = True
in_dim = 3
depth_multiplier = 3
filter_shape = [2, 2]
conv_last = True
causal_conv = False
self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
filter_shape, conv_last, causal_conv)
def testDepthConvLastCausal(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
batch_norm = True
weight_norm = True
in_dim = 3
depth_multiplier = 3
filter_shape = [2, 1]
conv_last = True
causal_conv = True
self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
filter_shape, conv_last, causal_conv)
def _SeparableConvTestHelper(self, dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier, out_dim,
filter_shape, conv_last, causal_conv,
assert_equality=True):
with self.session(use_gpu=True) as sess:
p1 = layers.SeparableConv2DLayer.Params().Set(
name='conv_2d01',
filter_shape=filter_shape + [in_dim, out_dim],
depth_multiplier=depth_multiplier,
filter_stride=stride,
dilation_rate=dilation,
activation=activation,
batch_norm=batch_norm,
weight_norm=weight_norm,
bias=not batch_norm,
conv_last=conv_last,
causal_convolution=causal_conv)
builder_params = conv_layers_builder.Builder.Params().Set(
weight_norm=weight_norm)
if batch_norm:
norm_p = conv_layers_with_time_padding.ConvBatchNormLayer.Params().Set(
decay=0.999)
builder_params.norm_layer_tpl = norm_p
else:
builder_params.norm_layer_tpl = None
p2 = builder_params.Instantiate().SeparableConv2D(
'conv_2d02',
in_dim,
out_dim,
depth_multiplier,
filter_shape,
stride=stride,
activation=activation,
dilation=dilation,
conv_last=conv_last,
is_causal=causal_conv)
l1 = p1.Instantiate()
l2 = p2.Instantiate()
conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)
conv_pad = np.full([4, 5], 0.0)
conv_pad[2, 3] = 1.0
conv_pad[2, 4] = 1.0
conv_pad = tf.constant(conv_pad, tf.float32)
l1_theta = l1.theta.Transform(tf.identity)
l2_theta = l2.theta.Transform(tf.identity)
conv_out1, out1_padding = l1.FProp(l1_theta, conv_in, conv_pad)
conv_out2, out2_padding = l2.FProp(l2_theta, conv_in, conv_pad)
tf.logging.info(l1_theta)
tf.logging.info(l2_theta)
l1_num_vars = l1_theta.Flatten()
l2_num_var2 = l2_theta.Flatten()
if len(l1_num_vars) != len(l2_num_var2):
tf.logging.info(
'Mismatched number of vars: l1: %d vars, l2: %d vars',
len(l1_num_vars), len(l2_num_var2))
pointwise_conv_w1 = l1_theta.w
depth_conv_w1 = l1_theta.depthwise_conv.w
pointwise_conv_w2 = l2_theta.conv_1x1.w
depth_conv_w2 = l2_theta.conv_2d.w
# b1 = l1_theta.b
# b2 = l2_theta.bn_or_bias.b
tf.global_variables_initializer().run()
v1, p1 = sess.run([conv_out1, out1_padding])
p_w1_v, d_w1_v = sess.run([pointwise_conv_w1, depth_conv_w1])
v2, p2 = sess.run([conv_out2, out2_padding],
feed_dict={
pointwise_conv_w2: p_w1_v,
depth_conv_w2: d_w1_v
})
if assert_equality:
self.assertAllClose(v1, v2)
self.assertAllClose(p1, p2)
def testSeparableConv2DLayerBasic(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'NONE'
batch_norm = False
weight_norm = False
in_dim = 3
depth_multiplier = 3
out_dim = 2
filter_shape = [2, 2]
conv_last = False
causal_conv = False
self._SeparableConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
out_dim, filter_shape, conv_last, causal_conv)
def testSeparableConvWnWnTanh(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
batch_norm = False
weight_norm = True
in_dim = 3
depth_multiplier = 3
out_dim = 2
filter_shape = [2, 1]
conv_last = False
causal_conv = True
self._SeparableConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
out_dim, filter_shape, conv_last, causal_conv)
def testSeparableConvLastBnWnTanh(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
batch_norm = True
weight_norm = True
in_dim = 3
depth_multiplier = 3
out_dim = 2
filter_shape = [2, 1]
conv_last = True
causal_conv = True
# New implementation is not equivallent to the old.
self._SeparableConvTestHelper(dilation, stride, activation, batch_norm,
weight_norm, in_dim, depth_multiplier,
out_dim, filter_shape, conv_last, causal_conv,
assert_equality=False)
def testSeparableConvGn(self):
dilation = [1, 1]
stride = [2, 2]
activation = 'TANH'
in_dim = 4
depth_multiplier = 1
out_dim = 2
filter_shape = [2, 1]
conv_last = True
causal_conv = True
with self.session(use_gpu=True) as sess:
builder_params = conv_layers_builder.Builder.Params().Set(
weight_norm=True)
builder_params.norm_layer_tpl = bn_layers.GroupNormLayer.Params().Set(
num_groups=2)
p = builder_params.Instantiate().SeparableConv2D(
'conv_2d02',
in_dim,
out_dim,
depth_multiplier,
filter_shape,
stride=stride,
activation=activation,
dilation=dilation,
conv_last=conv_last,
is_causal=causal_conv)
l = p.Instantiate()
conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 4]), tf.float32)
conv_pad = np.full([4, 5], 0.0)
conv_pad[2, 3] = 1.0
conv_pad[2, 4] = 1.0
conv_pad = tf.constant(conv_pad, tf.float32)
conv_out, _ = l.FProp(l.theta, conv_in, conv_pad)
tf.global_variables_initializer().run()
v = sess.run(tf.reduce_sum(conv_out, 0))
expected_out = [[[0.00963847, -0.04019006], [0.36265337, -0.06592329],
[0.65582913, -0.1533944]],
[[0.7512939, -0.7282307], [0.96100605, -1.9509676],
[0.4639647, 0.2485837]], [[0., 0.], [0., 0.], [0., 0.]]]
self.assertAllClose(expected_out, v)
class CausalPoolingLayerTest(test_utils.TestCase, parameterized.TestCase):
"""Tests for CausalPoolingLayer."""
@parameterized.named_parameters(
{
'testcase_name': 'max_pooling',
'pooling_type': 'MAX',
'left_context': 2,
'inputs': np.array([-2, 0, 2, 4, 0, 0]),
'input_paddings': np.array([0, 0, 0, 0, 1, 1]),
'expected_output': np.array([-2, 0, 2, 4, 0, 0]),
'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),
}, {
'testcase_name': 'avg_pooling',
'pooling_type': 'AVG',
'left_context': 2,
'inputs': np.array([-2, 0, 2, 4, 0, 0]),
'input_paddings': np.array([0, 0, 0, 0, 1, 1]),
'expected_output': np.array([-2, -1, 1, 3, 0, 0]),
'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),
}, {
'testcase_name': 'max_pooling_large_window',
'pooling_type': 'MAX',
'left_context': 10,
'inputs': np.array([-2, 0, 2, 4, 0, 0]),
'input_paddings': np.array([0, 0, 0, 0, 1, 1]),
'expected_output': np.array([-2, 0, 2, 4, 0, 0]),
'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),
}, {
'testcase_name': 'avg_pooling_large_window',
'pooling_type': 'AVG',
'left_context': 10,
'inputs': np.array([-2, 0, 2, 4, 0, 0]),
'input_paddings': np.array([0, 0, 0, 0, 1, 1]),
'expected_output': np.array([-2, -1, 0, 1, 0, 0]),
'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),
}, {
'testcase_name': 'avg_pooling_infinte_window',
'pooling_type': 'AVG',
'left_context': -1,
'inputs': np.array([-2, 0, 2, 4, 0, 0]),
'input_paddings': np.array([0, 0, 0, 0, 1, 1]),
'expected_output': np.array([-2, -1, 0, 1, 0, 0]),
'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),
})
def testSimpleCase(self, pooling_type, left_context, inputs, input_paddings,
expected_output, expected_output_padding):
inputs = inputs[np.newaxis, :, np.newaxis, np.newaxis]
input_paddings = input_paddings[np.newaxis, :]
param = conv_layers_builder.CausalPoolingLayer.Params().Set(
name='test_layer', pooling_type=pooling_type, left_context=left_context)
pooling_layer = param.Instantiate()
with self.session(use_gpu=True) as sess:
inputs = tf.convert_to_tensor(inputs, dtype=tf.float32)
input_paddings = tf.convert_to_tensor(input_paddings, dtype=tf.float32)
output, output_paddings = pooling_layer.FPropDefaultTheta(
inputs, input_paddings)
tf.global_variables_initializer().run()
output_val, output_paddings_val = sess.run([output, output_paddings])
self.assertAllClose(expected_output, output_val.flatten())
self.assertAllEqual(expected_output_padding, output_paddings_val.flatten())
if __name__ == '__main__':
tf.test.main()
| testConvBnWnTanh |
validate.go | package main
import (
"errors"
"github.com/ostrowr/send-me-a-secret/internal/githubapi"
"github.com/ostrowr/send-me-a-secret/internal/rsahelpers"
"github.com/ostrowr/send-me-a-secret/internal/utils"
)
func | (githubUsername, privateKeyPassword string) error {
client := githubapi.GetGithubClient("")
utils.PrintDefaultf("Validating send-me-a-secret\n")
utils.PrintDefaultf("Fetching public key from GitHub...\n")
publicKey, err := githubapi.GetPublicKeyFromGithubUnauthenticated(client, githubUsername, rsahelpers.IsValidSendMeASecretKey)
utils.FatallyLogOnError("Couldn't fetch public key from GitHub", err)
utils.PrintCyanf("Public key successfully fetched\n\n")
utils.PrintDefaultf("Encrypting a test message using that public key\n")
testMessage := "Hello, my name is Inigo Montoya."
ciphertext, err := rsahelpers.Encrypt(publicKey, []byte(testMessage))
utils.FatallyLogOnError("Couldn't encrypt message", err)
utils.PrintCyanf("Message successfully encrypted\n\n")
utils.PrintDefaultf("Decrypting the test message using your private key\n")
if privateKeyPassword == "" {
var passwordBytes []byte
passwordBytes, err = utils.ReadPassword("Enter passphrase for private key: ")
utils.FatallyLogOnError("Unable to read passphrase", err)
privateKeyPassword = string(passwordBytes)
}
privateKey, err := rsahelpers.ReadPrivateKeyFromFile([]byte(privateKeyPassword))
utils.FatallyLogOnError("Couldn't read private key", err)
decrypted, err := rsahelpers.Decrypt(privateKey, ciphertext)
utils.FatallyLogOnError("Couldn't decrypt message", err)
if string(decrypted) != testMessage {
utils.FatallyLogOnError("", errors.New("messages don't match"))
}
utils.PrintCyanf("Message successfully decrypted\n\n")
utils.PrintGreenf("Validation succeeded! You're good to go.\n")
return nil
}
| validate |
block_impl.rs | //! `https://clang.llvm.org/docs/Block-ABI-Apple.html#high-level`
use std::os::raw::{c_int};
use crate::data::Unmanaged;
use blocksr::once_escaping;
once_escaping!(pub(crate) ReadEscapingBlock(data: &Unmanaged, error: c_int) -> ());
once_escaping!(pub(crate) WriteEscapingBlock(data: Option<&Unmanaged>, error: c_int) -> ());
//all arguments to this one passed in via closure
once_escaping!(pub(crate) DropBlock() -> ());
///A block that will drop the receiver. This can be used to transfer
/// ownership of the receiver into dispatch.
///
/// # Safety
/// You must verify that
// * Block will execute exactly once:
// * If ObjC executes the block several times, it's UB
// * If ObjC executes the block less than once, it is not UB, but it will leak.
pub(crate) unsafe fn drop_block<T: Send + 'static>(t: T) -> DropBlock | {
DropBlock::new(move || {
std::mem::drop(t)
})
} |
|
godwoken.rs | use crate::blockchain::Script;
use crate::fixed_bytes::Byte65;
use anyhow::{anyhow, Error as JsonError};
use ckb_fixed_hash::H256;
use ckb_jsonrpc_types::{JsonBytes, Uint128, Uint32, Uint64};
use gw_types::{bytes::Bytes, offchain, packed, prelude::*};
use serde::{Deserialize, Serialize};
use std::convert::{TryFrom, TryInto};
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug, Default)]
#[serde(rename_all = "snake_case")]
pub struct RawL2Transaction {
pub from_id: Uint32,
pub to_id: Uint32,
pub nonce: Uint32,
pub args: JsonBytes,
}
impl From<RawL2Transaction> for packed::RawL2Transaction {
fn from(tx: RawL2Transaction) -> Self {
let RawL2Transaction {
from_id,
to_id,
nonce,
args,
} = tx;
let args: Bytes = args.into_bytes();
packed::RawL2Transaction::new_builder()
.from_id(u32::from(from_id).pack())
.to_id(u32::from(to_id).pack())
.nonce(u32::from(nonce).pack())
.args(args.pack())
.build()
}
}
impl From<packed::RawL2Transaction> for RawL2Transaction {
fn from(raw_l2_transaction: packed::RawL2Transaction) -> RawL2Transaction {
let from_id: u32 = raw_l2_transaction.from_id().unpack();
let to_id: u32 = raw_l2_transaction.to_id().unpack();
let nonce: u32 = raw_l2_transaction.nonce().unpack();
Self {
from_id: from_id.into(),
to_id: to_id.into(),
nonce: nonce.into(),
args: JsonBytes::from_bytes(raw_l2_transaction.args().unpack()),
}
}
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug, Default)]
#[serde(rename_all = "snake_case")]
pub struct L2Transaction {
pub raw: RawL2Transaction,
pub signature: Byte65,
}
impl From<L2Transaction> for packed::L2Transaction {
fn from(tx: L2Transaction) -> Self {
let L2Transaction { raw, signature } = tx;
packed::L2Transaction::new_builder()
.raw(raw.into())
.signature(signature.into())
.build()
}
}
impl From<packed::L2Transaction> for L2Transaction {
fn from(l2_transaction: packed::L2Transaction) -> L2Transaction {
Self {
raw: l2_transaction.raw().into(),
signature: l2_transaction.signature().into(),
}
}
}
#[derive(Clone, Default, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)]
#[serde(rename_all = "snake_case")]
pub struct L2TransactionView {
#[serde(flatten)]
pub inner: L2Transaction,
pub hash: H256,
}
impl From<packed::L2Transaction> for L2TransactionView {
fn from(l2_tx: packed::L2Transaction) -> L2TransactionView {
let hash = H256::from(l2_tx.raw().hash());
let inner = L2Transaction::from(l2_tx);
L2TransactionView { inner, hash }
}
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Debug, Hash, Default)]
#[serde(rename_all = "snake_case")]
pub struct LogItem {
pub account_id: Uint32,
// The actual type is `u8`
pub service_flag: Uint32,
pub data: JsonBytes,
}
impl From<LogItem> for packed::LogItem {
fn from(json: LogItem) -> packed::LogItem {
let LogItem {
account_id,
service_flag,
data,
} = json;
packed::LogItem::new_builder()
.account_id(account_id.value().pack())
.service_flag((service_flag.value() as u8).into())
.data(data.into_bytes().pack())
.build()
}
}
impl From<packed::LogItem> for LogItem {
fn from(data: packed::LogItem) -> LogItem {
let account_id: u32 = data.account_id().unpack();
let service_flag: u8 = data.service_flag().into();
let data = JsonBytes::from_bytes(data.data().unpack());
LogItem {
account_id: Uint32::from(account_id),
service_flag: Uint32::from(service_flag as u32),
data,
}
}
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug, Default)]
#[serde(rename_all = "snake_case")]
pub struct TxReceipt {
pub tx_witness_hash: H256,
pub post_state: AccountMerkleState,
pub read_data_hashes: Vec<H256>,
pub logs: Vec<LogItem>,
}
impl From<TxReceipt> for packed::TxReceipt {
fn from(json: TxReceipt) -> packed::TxReceipt {
let TxReceipt {
tx_witness_hash,
post_state,
read_data_hashes,
logs,
} = json;
let tx_witness_hash: [u8; 32] = tx_witness_hash.into();
let read_data_hashes: Vec<_> = read_data_hashes
.into_iter()
.map(|hash| {
let hash: [u8; 32] = hash.into();
hash.pack()
})
.collect();
let logs: Vec<packed::LogItem> = logs.into_iter().map(|item| item.into()).collect();
packed::TxReceipt::new_builder()
.tx_witness_hash(tx_witness_hash.pack())
.post_state(post_state.into())
.read_data_hashes(read_data_hashes.pack())
.logs(logs.pack())
.build()
}
}
impl From<packed::TxReceipt> for TxReceipt {
fn from(data: packed::TxReceipt) -> TxReceipt {
let tx_witness_hash: [u8; 32] = data.tx_witness_hash().unpack();
let post_state: AccountMerkleState = data.post_state().into();
let read_data_hashes: Vec<_> = data
.read_data_hashes()
.into_iter()
.map(|hash| {
let hash: [u8; 32] = hash.unpack();
hash.into()
})
.collect();
let logs: Vec<LogItem> = data.logs().into_iter().map(|item| item.into()).collect();
TxReceipt {
tx_witness_hash: tx_witness_hash.into(),
post_state,
read_data_hashes,
logs,
}
}
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)]
#[serde(rename_all = "snake_case")]
pub enum ChallengeTargetType {
Transaction,
Withdrawal,
}
impl Default for ChallengeTargetType {
fn default() -> Self {
Self::Transaction
}
}
impl From<ChallengeTargetType> for packed::Byte {
fn from(json: ChallengeTargetType) -> packed::Byte {
match json {
ChallengeTargetType::Transaction => packed::Byte::new(0),
ChallengeTargetType::Withdrawal => packed::Byte::new(1),
}
}
}
impl TryFrom<packed::Byte> for ChallengeTargetType {
type Error = JsonError;
fn try_from(v: packed::Byte) -> Result<ChallengeTargetType, Self::Error> {
match u8::from(v) {
0 => Ok(ChallengeTargetType::Transaction),
1 => Ok(ChallengeTargetType::Withdrawal),
_ => Err(anyhow!("Invalid challenge target type {}", v)),
}
}
}
#[derive(Clone, Default, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)]
#[serde(rename_all = "snake_case")]
pub struct ChallengeTarget {
pub block_hash: H256, // hash of challenged block
pub target_index: Uint32, // target index
pub target_type: ChallengeTargetType, // target type
}
impl From<ChallengeTarget> for packed::ChallengeTarget {
fn from(json: ChallengeTarget) -> packed::ChallengeTarget {
let ChallengeTarget {
block_hash,
target_index,
target_type,
} = json;
packed::ChallengeTarget::new_builder()
.block_hash(block_hash.pack())
.target_index(u32::from(target_index).pack())
.target_type(target_type.into())
.build()
}
}
impl From<packed::ChallengeTarget> for ChallengeTarget {
fn from(challenge_target: packed::ChallengeTarget) -> ChallengeTarget {
let target_index: u32 = challenge_target.target_index().unpack();
let target_type: packed::Byte = challenge_target.target_type();
Self {
block_hash: challenge_target.block_hash().unpack(),
target_index: Uint32::from(target_index),
target_type: target_type.try_into().expect("invalid target type"),
}
}
}
#[derive(Clone, Default, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)]
#[serde(rename_all = "snake_case")]
pub struct ChallengeWitness {
pub raw_l2block: RawL2Block,
pub block_proof: JsonBytes, // block proof
}
impl From<ChallengeWitness> for packed::ChallengeWitness {
fn from(json: ChallengeWitness) -> packed::ChallengeWitness {
let ChallengeWitness {
raw_l2block,
block_proof,
} = json;
let raw_l2block: packed::RawL2Block = raw_l2block.into();
packed::ChallengeWitness::new_builder()
.raw_l2block(raw_l2block)
.block_proof(block_proof.into_bytes().pack())
.build()
}
}
impl From<packed::ChallengeWitness> for ChallengeWitness {
fn from(data: packed::ChallengeWitness) -> ChallengeWitness {
let raw_l2block: RawL2Block = data.raw_l2block().into();
let block_proof = JsonBytes::from_bytes(data.block_proof().unpack());
Self {
raw_l2block,
block_proof,
}
}
}
#[derive(Clone, Default, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)]
#[serde(rename_all = "snake_case")]
pub struct L2Block {
pub raw: RawL2Block,
pub kv_state: Vec<KVPair>,
pub kv_state_proof: JsonBytes,
pub transactions: Vec<L2Transaction>,
pub block_proof: JsonBytes,
pub withdrawals: Vec<WithdrawalRequest>,
}
impl From<L2Block> for packed::L2Block {
fn from(json: L2Block) -> packed::L2Block {
let L2Block {
raw,
kv_state,
kv_state_proof,
transactions,
block_proof,
withdrawals,
} = json;
let kv_pair_vec: Vec<packed::KVPair> = kv_state.into_iter().map(|k| k.into()).collect();
let packed_kv_state = packed::KVPairVec::new_builder().set(kv_pair_vec).build();
let transaction_vec: Vec<packed::L2Transaction> =
transactions.into_iter().map(|t| t.into()).collect();
let packed_transactions = packed::L2TransactionVec::new_builder()
.set(transaction_vec)
.build();
let withdrawal_requests_vec: Vec<packed::WithdrawalRequest> =
withdrawals.into_iter().map(|w| w.into()).collect();
let packed_withdrawal_requests = packed::WithdrawalRequestVec::new_builder()
.set(withdrawal_requests_vec)
.build();
packed::L2Block::new_builder()
.raw(raw.into())
.kv_state(packed_kv_state)
.kv_state_proof(kv_state_proof.into_bytes().pack())
.transactions(packed_transactions)
.block_proof(block_proof.into_bytes().pack())
.withdrawals(packed_withdrawal_requests)
.build()
}
}
impl From<packed::L2Block> for L2Block {
fn from(l2_block: packed::L2Block) -> L2Block {
Self {
raw: l2_block.raw().into(),
kv_state: l2_block.kv_state().into_iter().map(|k| k.into()).collect(),
kv_state_proof: JsonBytes::from_bytes(l2_block.kv_state_proof().unpack()),
transactions: l2_block
.transactions()
.into_iter()
.map(|t| t.into())
.collect(),
block_proof: JsonBytes::from_bytes(l2_block.block_proof().unpack()),
withdrawals: l2_block
.withdrawals()
.into_iter()
.map(|w| w.into())
.collect(),
}
}
}
#[derive(Clone, Default, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)]
#[serde(rename_all = "snake_case")]
pub struct RawL2Block {
pub number: Uint64,
pub parent_block_hash: H256,
pub block_producer_id: Uint32,
pub stake_cell_owner_lock_hash: H256,
pub timestamp: Uint64,
pub prev_account: AccountMerkleState,
pub post_account: AccountMerkleState,
pub submit_transactions: SubmitTransactions,
pub submit_withdrawals: SubmitWithdrawals,
// hash(account_root | account_count) of each withdrawals & transactions
pub state_checkpoint_list: Vec<H256>,
}
impl From<RawL2Block> for packed::RawL2Block {
fn from(json: RawL2Block) -> packed::RawL2Block {
let RawL2Block {
number,
parent_block_hash,
block_producer_id,
stake_cell_owner_lock_hash,
timestamp,
prev_account,
post_account,
submit_transactions,
submit_withdrawals,
state_checkpoint_list,
} = json;
let state_checkpoint_list = state_checkpoint_list
.into_iter()
.map(|checkpoint| checkpoint.pack())
.pack();
packed::RawL2Block::new_builder()
.number(u64::from(number).pack())
.parent_block_hash(parent_block_hash.pack())
.block_producer_id(u32::from(block_producer_id).pack())
.stake_cell_owner_lock_hash(stake_cell_owner_lock_hash.pack())
.timestamp(u64::from(timestamp).pack())
.prev_account(prev_account.into())
.post_account(post_account.into())
.submit_transactions(submit_transactions.into())
.submit_withdrawals(submit_withdrawals.into())
.state_checkpoint_list(state_checkpoint_list)
.build()
}
}
impl From<packed::RawL2Block> for RawL2Block {
fn from(raw_l2_block: packed::RawL2Block) -> RawL2Block {
let number: u64 = raw_l2_block.number().unpack();
let block_producer_id: u32 = raw_l2_block.block_producer_id().unpack();
let timestamp: u64 = raw_l2_block.timestamp().unpack();
let state_checkpoint_list = raw_l2_block
.state_checkpoint_list()
.into_iter()
.map(|checkpoint| checkpoint.unpack())
.collect();
Self {
number: number.into(),
parent_block_hash: raw_l2_block.parent_block_hash().unpack(),
block_producer_id: block_producer_id.into(),
stake_cell_owner_lock_hash: raw_l2_block.stake_cell_owner_lock_hash().unpack(),
timestamp: timestamp.into(),
prev_account: raw_l2_block.prev_account().into(),
post_account: raw_l2_block.post_account().into(),
submit_transactions: raw_l2_block.submit_transactions().into(),
submit_withdrawals: raw_l2_block.submit_withdrawals().into(),
state_checkpoint_list,
}
}
}
#[derive(Clone, Default, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)]
#[serde(rename_all = "snake_case")]
pub struct L2BlockView {
pub raw: RawL2Block,
pub kv_state: Vec<KVPair>,
pub kv_state_proof: JsonBytes,
pub transactions: Vec<L2TransactionView>,
pub block_proof: JsonBytes,
pub withdrawal_requests: Vec<WithdrawalRequest>,
pub hash: H256,
}
impl From<packed::L2Block> for L2BlockView {
fn from(l2_block: packed::L2Block) -> L2BlockView {
Self {
hash: H256::from(l2_block.raw().hash()),
raw: l2_block.raw().into(),
kv_state: l2_block.kv_state().into_iter().map(|k| k.into()).collect(),
kv_state_proof: JsonBytes::from_bytes(l2_block.kv_state_proof().unpack()),
transactions: l2_block
.transactions()
.into_iter()
.map(|t| t.into())
.collect(),
block_proof: JsonBytes::from_bytes(l2_block.block_proof().unpack()),
withdrawal_requests: l2_block
.withdrawals()
.into_iter()
.map(|w| w.into())
.collect(),
}
}
}
#[derive(Clone, Default, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)]
#[serde(rename_all = "snake_case")]
pub struct SubmitTransactions {
pub tx_witness_root: H256,
pub tx_count: Uint32,
// hash(account_root | account_count) before apply all transactions
pub prev_state_checkpoint: H256,
}
impl From<SubmitTransactions> for packed::SubmitTransactions {
fn from(json: SubmitTransactions) -> packed::SubmitTransactions {
let SubmitTransactions {
tx_witness_root,
tx_count,
prev_state_checkpoint,
} = json;
packed::SubmitTransactions::new_builder()
.tx_witness_root(tx_witness_root.pack())
.tx_count(u32::from(tx_count).pack())
.prev_state_checkpoint(prev_state_checkpoint.pack())
.build()
}
}
impl From<packed::SubmitTransactions> for SubmitTransactions {
fn from(submit_transactions: packed::SubmitTransactions) -> SubmitTransactions {
let tx_count: u32 = submit_transactions.tx_count().unpack();
Self {
tx_witness_root: submit_transactions.tx_witness_root().unpack(),
tx_count: tx_count.into(),
prev_state_checkpoint: submit_transactions.prev_state_checkpoint().unpack(),
}
}
}
#[derive(Clone, Default, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)]
#[serde(rename_all = "snake_case")]
pub struct SubmitWithdrawals {
pub withdrawal_witness_root: H256,
pub withdrawal_count: Uint32,
}
impl From<SubmitWithdrawals> for packed::SubmitWithdrawals {
fn from(json: SubmitWithdrawals) -> packed::SubmitWithdrawals {
let SubmitWithdrawals {
withdrawal_witness_root,
withdrawal_count,
} = json;
packed::SubmitWithdrawals::new_builder()
.withdrawal_witness_root(withdrawal_witness_root.pack())
.withdrawal_count(u32::from(withdrawal_count).pack())
.build()
}
}
impl From<packed::SubmitWithdrawals> for SubmitWithdrawals {
fn from(data: packed::SubmitWithdrawals) -> SubmitWithdrawals {
let withdrawal_count: u32 = data.withdrawal_count().unpack();
Self {
withdrawal_witness_root: data.withdrawal_witness_root().unpack(),
withdrawal_count: withdrawal_count.into(),
}
}
}
#[derive(Clone, Default, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)]
#[serde(rename_all = "snake_case")]
pub struct AccountMerkleState {
pub merkle_root: H256,
pub count: Uint32,
}
impl From<AccountMerkleState> for packed::AccountMerkleState {
fn from(json: AccountMerkleState) -> packed::AccountMerkleState {
let AccountMerkleState { merkle_root, count } = json;
packed::AccountMerkleState::new_builder()
.merkle_root(merkle_root.pack())
.count(u32::from(count).pack())
.build()
}
}
impl From<packed::AccountMerkleState> for AccountMerkleState {
fn from(account_merkel_state: packed::AccountMerkleState) -> AccountMerkleState {
let count: u32 = account_merkel_state.count().unpack();
Self {
merkle_root: account_merkel_state.merkle_root().unpack(),
count: count.into(),
}
}
}
#[derive(Clone, Default, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)]
#[serde(rename_all = "snake_case")]
pub struct BlockMerkleState {
pub merkle_root: H256,
pub count: Uint64,
}
impl From<BlockMerkleState> for packed::BlockMerkleState {
fn from(json: BlockMerkleState) -> packed::BlockMerkleState {
let count: u64 = json.count.into();
packed::BlockMerkleState::new_builder()
.merkle_root(json.merkle_root.pack())
.count(count.pack())
.build()
}
}
impl From<packed::BlockMerkleState> for BlockMerkleState {
fn from(block_merkle_state: packed::BlockMerkleState) -> BlockMerkleState {
let count: u64 = block_merkle_state.count().unpack();
Self {
merkle_root: block_merkle_state.merkle_root().unpack(),
count: count.into(),
}
}
}
#[derive(Clone, Default, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)]
#[serde(rename_all = "snake_case")]
pub struct KVPair {
pub k: H256,
pub v: H256,
}
impl From<KVPair> for packed::KVPair {
fn from(json: KVPair) -> packed::KVPair {
let KVPair { k, v } = json;
packed::KVPair::new_builder()
.k(k.pack())
.v(v.pack())
.build()
}
}
impl From<packed::KVPair> for KVPair {
fn from(kvpair: packed::KVPair) -> KVPair {
Self {
k: kvpair.k().unpack(),
v: kvpair.v().unpack(),
}
}
}
#[derive(Clone, Default, Serialize, Deserialize, PartialEq, Eq, Hash, Debug)]
#[serde(rename_all = "snake_case")]
pub struct GlobalState {
pub account: AccountMerkleState,
pub block: BlockMerkleState,
pub reverted_block_root: H256,
pub last_finalized_block_number: Uint64,
pub status: Uint32,
}
impl From<GlobalState> for packed::GlobalState {
fn from(json: GlobalState) -> packed::GlobalState {
let GlobalState {
account,
block,
reverted_block_root,
last_finalized_block_number,
status,
} = json;
let last_finalized_block_number: u64 = last_finalized_block_number.into();
let status: u32 = status.into();
packed::GlobalState::new_builder()
.account(account.into())
.block(block.into())
.reverted_block_root(reverted_block_root.pack())
.last_finalized_block_number(last_finalized_block_number.pack())
.status((status as u8).into())
.build()
}
}
impl From<packed::GlobalState> for GlobalState {
fn from(global_state: packed::GlobalState) -> GlobalState {
let last_finalized_block_number: u64 = global_state.last_finalized_block_number().unpack();
let status: u8 = global_state.status().into();
Self {
account: global_state.account().into(),
block: global_state.block().into(),
reverted_block_root: global_state.reverted_block_root().unpack(),
last_finalized_block_number: last_finalized_block_number.into(),
status: (status as u32).into(),
}
}
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug, Default)]
#[serde(rename_all = "snake_case")]
pub struct DepositionRequest {
pub script: Script,
pub sudt_script_hash: H256,
pub amount: Uint128,
pub capacity: Uint64,
}
impl From<DepositionRequest> for packed::DepositionRequest {
fn from(json: DepositionRequest) -> packed::DepositionRequest {
let DepositionRequest {
script,
sudt_script_hash,
amount,
capacity,
} = json;
packed::DepositionRequest::new_builder()
.script(script.into())
.sudt_script_hash(sudt_script_hash.pack())
.amount(u128::from(amount).pack())
.capacity(u64::from(capacity).pack())
.build()
}
}
impl From<packed::DepositionRequest> for DepositionRequest {
fn from(deposition_request: packed::DepositionRequest) -> DepositionRequest {
let amount: u128 = deposition_request.amount().unpack();
let capacity: u64 = deposition_request.capacity().unpack();
Self {
script: deposition_request.script().into(),
sudt_script_hash: deposition_request.sudt_script_hash().unpack(),
amount: amount.into(),
capacity: capacity.into(),
}
}
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug, Default)]
#[serde(rename_all = "snake_case")]
pub struct WithdrawalRequest {
pub raw: RawWithdrawalRequest,
pub signature: Byte65,
}
impl From<WithdrawalRequest> for packed::WithdrawalRequest {
fn from(json: WithdrawalRequest) -> packed::WithdrawalRequest {
let WithdrawalRequest { raw, signature } = json;
packed::WithdrawalRequest::new_builder()
.raw(raw.into())
.signature(signature.into())
.build()
}
}
impl From<packed::WithdrawalRequest> for WithdrawalRequest {
fn from(withdrawal_request: packed::WithdrawalRequest) -> WithdrawalRequest {
Self {
raw: withdrawal_request.raw().into(),
signature: withdrawal_request.signature().into(),
}
}
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug, Default)]
#[serde(rename_all = "snake_case")]
pub struct RawWithdrawalRequest {
pub nonce: Uint32,
pub capacity: Uint64,
pub amount: Uint128,
// buyer can pay sell_amount and sell_capacity to unlock
pub sell_amount: Uint128,
pub sell_capacity: Uint64,
pub sudt_script_hash: H256,
pub account_script_hash: H256,
// layer1 lock to withdraw after challenge period
pub owner_lock_hash: H256,
// layer1 lock to receive the payment, must exists on the chain
pub payment_lock_hash: H256,
}
impl From<RawWithdrawalRequest> for packed::RawWithdrawalRequest {
fn from(json: RawWithdrawalRequest) -> packed::RawWithdrawalRequest {
let RawWithdrawalRequest {
nonce,
capacity,
amount,
sell_amount,
sell_capacity,
sudt_script_hash,
account_script_hash,
owner_lock_hash,
payment_lock_hash,
} = json;
packed::RawWithdrawalRequest::new_builder()
.nonce(u32::from(nonce).pack())
.capacity(u64::from(capacity).pack())
.amount(u128::from(amount).pack())
.sell_amount(u128::from(sell_amount).pack())
.sell_capacity(u64::from(sell_capacity).pack())
.sudt_script_hash(sudt_script_hash.pack())
.account_script_hash(account_script_hash.pack())
.owner_lock_hash(owner_lock_hash.pack())
.payment_lock_hash(payment_lock_hash.pack())
.build()
}
}
impl From<packed::RawWithdrawalRequest> for RawWithdrawalRequest {
fn from(raw_withdrawal_request: packed::RawWithdrawalRequest) -> RawWithdrawalRequest {
let nonce: u32 = raw_withdrawal_request.nonce().unpack();
let capacity: u64 = raw_withdrawal_request.capacity().unpack();
let amount: u128 = raw_withdrawal_request.amount().unpack();
let sell_capacity: u64 = raw_withdrawal_request.sell_capacity().unpack();
let sell_amount: u128 = raw_withdrawal_request.sell_amount().unpack();
Self {
nonce: nonce.into(),
capacity: capacity.into(),
amount: amount.into(),
sell_capacity: sell_capacity.into(),
sell_amount: sell_amount.into(),
sudt_script_hash: raw_withdrawal_request.sudt_script_hash().unpack(),
account_script_hash: raw_withdrawal_request.account_script_hash().unpack(),
owner_lock_hash: raw_withdrawal_request.owner_lock_hash().unpack(),
payment_lock_hash: raw_withdrawal_request.payment_lock_hash().unpack(),
}
}
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug, Default)]
#[serde(rename_all = "snake_case")]
pub struct L2BlockCommittedInfo {
pub number: Uint64,
pub block_hash: H256,
pub transaction_hash: H256,
}
impl From<L2BlockCommittedInfo> for packed::L2BlockCommittedInfo {
fn from(json: L2BlockCommittedInfo) -> packed::L2BlockCommittedInfo {
let L2BlockCommittedInfo {
number,
block_hash,
transaction_hash,
} = json;
packed::L2BlockCommittedInfo::new_builder()
.number(u64::from(number).pack())
.block_hash(block_hash.pack())
.transaction_hash(transaction_hash.pack())
.build()
}
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug, Default)]
#[serde(rename_all = "snake_case")]
pub struct RollupConfig {
pub l1_sudt_script_type_hash: H256,
pub custodian_script_type_hash: H256,
pub deposition_script_type_hash: H256,
pub withdrawal_script_type_hash: H256,
pub challenge_script_type_hash: H256,
pub stake_script_type_hash: H256,
pub l2_sudt_validator_script_type_hash: H256,
pub burn_lock_hash: H256,
pub required_staking_capacity: Uint64,
pub challenge_maturity_blocks: Uint64,
pub finality_blocks: Uint64,
pub reward_burn_rate: Uint32, // * reward_burn_rate / 100
pub allowed_eoa_type_hashes: Vec<H256>, // list of script code_hash allowed an EOA(external owned account) to use
pub allowed_contract_type_hashes: Vec<H256>, // list of script code_hash allowed a contract account to use
pub compatible_chain_id: Uint32,
}
impl From<RollupConfig> for packed::RollupConfig {
fn from(json: RollupConfig) -> packed::RollupConfig |
}
impl From<packed::RollupConfig> for RollupConfig {
fn from(data: packed::RollupConfig) -> RollupConfig {
let required_staking_capacity: u64 = data.required_staking_capacity().unpack();
let challenge_maturity_blocks: u64 = data.challenge_maturity_blocks().unpack();
let finality_blocks: u64 = data.finality_blocks().unpack();
let reward_burn_date: u8 = data.reward_burn_rate().into();
let compatible_chain_id: u32 = data.compatible_chain_id().unpack();
RollupConfig {
l1_sudt_script_type_hash: data.l1_sudt_script_type_hash().unpack(),
custodian_script_type_hash: data.custodian_script_type_hash().unpack(),
deposition_script_type_hash: data.deposition_script_type_hash().unpack(),
withdrawal_script_type_hash: data.withdrawal_script_type_hash().unpack(),
challenge_script_type_hash: data.challenge_script_type_hash().unpack(),
stake_script_type_hash: data.stake_script_type_hash().unpack(),
l2_sudt_validator_script_type_hash: data.l2_sudt_validator_script_type_hash().unpack(),
burn_lock_hash: data.burn_lock_hash().unpack(),
required_staking_capacity: required_staking_capacity.into(),
challenge_maturity_blocks: challenge_maturity_blocks.into(),
finality_blocks: finality_blocks.into(),
reward_burn_rate: (reward_burn_date as u32).into(),
allowed_eoa_type_hashes: data
.allowed_eoa_type_hashes()
.into_iter()
.map(|hash| hash.unpack())
.collect(),
allowed_contract_type_hashes: data
.allowed_contract_type_hashes()
.into_iter()
.map(|hash| hash.unpack())
.collect(),
compatible_chain_id: compatible_chain_id.into(),
}
}
}
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Debug, Default)]
#[serde(rename_all = "snake_case")]
pub struct RunResult {
// return data
pub return_data: JsonBytes,
// log data
pub logs: Vec<LogItem>,
}
impl From<offchain::RunResult> for RunResult {
fn from(data: offchain::RunResult) -> RunResult {
let offchain::RunResult {
return_data, logs, ..
} = data;
RunResult {
return_data: JsonBytes::from_vec(return_data),
logs: logs.into_iter().map(Into::into).collect(),
}
}
}
| {
let RollupConfig {
l1_sudt_script_type_hash,
custodian_script_type_hash,
deposition_script_type_hash,
withdrawal_script_type_hash,
challenge_script_type_hash,
stake_script_type_hash,
l2_sudt_validator_script_type_hash,
burn_lock_hash,
required_staking_capacity,
challenge_maturity_blocks,
finality_blocks,
reward_burn_rate, // * reward_burn_rate / 100
allowed_eoa_type_hashes, // list of script code_hash allowed an EOA(external owned account) to use
allowed_contract_type_hashes, // list of script code_hash allowed a contract account to use
compatible_chain_id,
} = json;
let required_staking_capacity: u64 = required_staking_capacity.into();
let challenge_maturity_blocks: u64 = challenge_maturity_blocks.into();
let finality_blocks: u64 = finality_blocks.into();
let reward_burn_rate: u32 = reward_burn_rate.into();
let reward_burn_rate: u8 = reward_burn_rate.try_into().expect("reward burn rate");
packed::RollupConfig::new_builder()
.l1_sudt_script_type_hash(l1_sudt_script_type_hash.pack())
.custodian_script_type_hash(custodian_script_type_hash.pack())
.deposition_script_type_hash(deposition_script_type_hash.pack())
.withdrawal_script_type_hash(withdrawal_script_type_hash.pack())
.challenge_script_type_hash(challenge_script_type_hash.pack())
.stake_script_type_hash(stake_script_type_hash.pack())
.l2_sudt_validator_script_type_hash(l2_sudt_validator_script_type_hash.pack())
.burn_lock_hash(burn_lock_hash.pack())
.required_staking_capacity(required_staking_capacity.pack())
.challenge_maturity_blocks(challenge_maturity_blocks.pack())
.finality_blocks(finality_blocks.pack())
.reward_burn_rate(reward_burn_rate.into())
.allowed_eoa_type_hashes(
allowed_eoa_type_hashes
.into_iter()
.map(|hash| hash.pack())
.pack(),
)
.allowed_contract_type_hashes(
allowed_contract_type_hashes
.into_iter()
.map(|hash| hash.pack())
.pack(),
)
.compatible_chain_id(compatible_chain_id.value().pack())
.build()
} |
ProductTypeDetailsPage.tsx | // @ts-nocheck
import CardSpacer from "@mzawadie/components/CardSpacer";
import { ConfirmButtonTransitionState } from "@mzawadie/components/ConfirmButton";
import Container from "@mzawadie/components/Container";
import ControlledSwitch from "@mzawadie/components/ControlledSwitch";
import Form from "@mzawadie/components/Form";
import Grid from "@mzawadie/components/Grid";
import Metadata from "@mzawadie/components/Metadata/Metadata";
import { MetadataFormData } from "@mzawadie/components/Metadata/types";
import PageHeader from "@mzawadie/components/PageHeader";
import Savebar from "@mzawadie/components/Savebar";
import { ListActions, ReorderEvent, UserError, maybe, sectionNames } from "@mzawadie/core";
import { ChangeEvent, FormChange, SubmitPromise } from "@mzawadie/hooks/useForm";
import useStateFromProps from "@mzawadie/hooks/useStateFromProps";
import { ProductAttributeType, WeightUnitsEnum } from "@mzawadie/types/globalTypes";
import { mapMetadataItemToInput } from "@mzawadie/utils/maps";
import useMetadataChangeTrigger from "@mzawadie/utils/metadata/useMetadataChangeTrigger";
import { Backlink } from "@saleor/macaw-ui";
import React from "react";
import { useIntl } from "react-intl";
import {
ProductTypeDetails_productType,
ProductTypeDetails_taxTypes,
} from "../../types/ProductTypeDetails";
import ProductTypeAttributes from "../ProductTypeAttributes/ProductTypeAttributes";
import ProductTypeDetails from "../ProductTypeDetails/ProductTypeDetails";
import ProductTypeShipping from "../ProductTypeShipping/ProductTypeShipping";
import ProductTypeTaxes from "../ProductTypeTaxes/ProductTypeTaxes";
interface ChoiceType {
label: string;
value: string;
}
export interface ProductTypeForm extends MetadataFormData {
name: string;
hasVariants: boolean;
isShippingRequired: boolean;
taxType: string;
productAttributes: ChoiceType[];
variantAttributes: ChoiceType[];
weight: number;
}
export interface ProductTypeDetailsPageProps {
errors: UserError[];
productType: ProductTypeDetails_productType;
defaultWeightUnit: WeightUnitsEnum;
disabled: boolean;
pageTitle: string;
productAttributeList: ListActions;
saveButtonBarState: ConfirmButtonTransitionState;
taxTypes: ProductTypeDetails_taxTypes[];
variantAttributeList: ListActions;
onAttributeAdd: (type: ProductAttributeType) => void;
onAttributeClick: (id: string) => void;
onAttributeReorder: (event: ReorderEvent, type: ProductAttributeType) => void;
onAttributeUnassign: (id: string) => void;
onBack: () => void;
onDelete: () => void;
onHasVariantsToggle: (hasVariants: boolean) => void;
onSubmit: (data: ProductTypeForm) => SubmitPromise;
}
function handleTaxTypeChange(
event: ChangeEvent,
taxTypes: ProductTypeDetails_taxTypes[],
formChange: FormChange,
displayChange: (name: string) => void
) {
formChange(event);
displayChange(taxTypes.find((taxType) => taxType.taxCode === event.target.value).description);
}
const ProductTypeDetailsPage: React.FC<ProductTypeDetailsPageProps> = ({
defaultWeightUnit,
disabled,
errors,
pageTitle,
productType,
productAttributeList,
saveButtonBarState,
taxTypes,
variantAttributeList,
onAttributeAdd,
onAttributeUnassign,
onAttributeReorder,
onAttributeClick,
onBack,
onDelete,
onHasVariantsToggle,
onSubmit,
}) => {
const intl = useIntl();
const {
isMetadataModified,
isPrivateMetadataModified,
makeChangeHandler: makeMetadataChangeHandler,
} = useMetadataChangeTrigger();
const [taxTypeDisplayName, setTaxTypeDisplayName] = useStateFromProps(
maybe(() => productType.taxType?.description, "")
);
const formInitialData: ProductTypeForm = {
hasVariants:
maybe(() => productType.hasVariants) !== undefined ? productType.hasVariants : false,
isShippingRequired:
maybe(() => productType.isShippingRequired) !== undefined
? productType.isShippingRequired
: false,
metadata: productType?.metadata?.map(mapMetadataItemToInput),
name: maybe(() => productType.name) !== undefined ? productType.name : "",
privateMetadata: productType?.privateMetadata?.map(mapMetadataItemToInput),
productAttributes:
maybe(() => productType.productAttributes) !== undefined
? productType.productAttributes?.map((attribute) => ({
label: attribute.name,
value: attribute.id,
}))
: [],
taxType: maybe(() => productType.taxType.taxCode, ""),
variantAttributes:
maybe(() => productType.variantAttributes) !== undefined
? productType.variantAttributes.map((attribute) => ({
label: attribute.name,
value: attribute.id,
}))
: [],
weight: maybe(() => productType.weight.value),
};
const handleSubmit = (data: ProductTypeForm) => {
const metadata = isMetadataModified ? data.metadata : undefined;
const privateMetadata = isPrivateMetadataModified ? data.privateMetadata : undefined;
return onSubmit({
...data,
metadata,
privateMetadata,
});
};
return (
<Form initial={formInitialData} onSubmit={handleSubmit} confirmLeave>
{({ change, data, hasChanged, submit }) => {
const changeMetadata = makeMetadataChangeHandler(change);
return (
<Container>
<Backlink onClick={onBack}>
{intl.formatMessage(sectionNames.productTypes)}
</Backlink>
<PageHeader title={pageTitle} />
<Grid>
<div>
<ProductTypeDetails
data={data}
disabled={disabled}
errors={errors}
onChange={change}
/>
<CardSpacer />
<ProductTypeTaxes
disabled={disabled}
data={data}
taxTypes={taxTypes}
taxTypeDisplayName={taxTypeDisplayName}
onChange={(event) =>
handleTaxTypeChange(
event,
taxTypes,
change,
setTaxTypeDisplayName | )
}
/>
<CardSpacer />
<ProductTypeAttributes
testId="assignProductsAttributes"
attributes={maybe(() => productType.productAttributes)}
disabled={disabled}
type={ProductAttributeType.PRODUCT}
onAttributeAssign={onAttributeAdd}
onAttributeClick={onAttributeClick}
onAttributeReorder={(event: ReorderEvent) =>
onAttributeReorder(event, ProductAttributeType.PRODUCT)
}
onAttributeUnassign={onAttributeUnassign}
{...productAttributeList}
/>
<CardSpacer />
<ControlledSwitch
checked={data.hasVariants}
disabled={disabled}
label={intl.formatMessage({
defaultMessage: "Product type uses Variant Attributes",
id: "5pHBSU",
description: "switch button",
})}
name="hasVariants"
onChange={(event) => onHasVariantsToggle(event.target.value)}
/>
{data.hasVariants && (
<>
<CardSpacer />
<ProductTypeAttributes
testId="assignVariantsAttributes"
attributes={maybe(() => productType.variantAttributes)}
disabled={disabled}
type={ProductAttributeType.VARIANT}
onAttributeAssign={onAttributeAdd}
onAttributeClick={onAttributeClick}
onAttributeReorder={(event: ReorderEvent) =>
onAttributeReorder(event, ProductAttributeType.VARIANT)
}
onAttributeUnassign={onAttributeUnassign}
{...variantAttributeList}
/>
</>
)}
<CardSpacer />
<Metadata data={data} onChange={changeMetadata} />
</div>
<div>
<ProductTypeShipping
disabled={disabled}
data={data}
weightUnit={productType?.weight?.unit || defaultWeightUnit}
onChange={change}
/>
</div>
</Grid>
<Savebar
onCancel={onBack}
onDelete={onDelete}
onSubmit={submit}
disabled={disabled || !hasChanged}
state={saveButtonBarState}
/>
</Container>
);
}}
</Form>
);
};
ProductTypeDetailsPage.displayName = "ProductTypeDetailsPage";
export default ProductTypeDetailsPage; | |
backend.rs | use super::error;
use super::types;
pub mod headstr;
pub mod parse_fn;
pub mod parse_token;
pub fn | (term: types::Term) -> Result<String, error::Error> {
let (head, setting, bnfs) = term;
let head_str = headstr::head_to_str(head, setting.clone());
let parse_token_fn_str = parse_token::make_parse_token_fn_str(setting.clone());
let parse_fn_fn_str = parse_fn::make_parse_fn_fn_str(setting, &bnfs)?;
Ok(format!(
"{}\n{}\n{}\n",
head_str, parse_fn_fn_str, parse_token_fn_str
))
}
| to_string |
test_control.py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import MagicMock
from scaaml.capture.aes.control import CWControl
def | ():
chip_id = 314159
with CWControl(chip_id=chip_id, scope_io=MagicMock()) as control:
assert control.chip_id == chip_id
assert control._scope_io.tio1 == "serial_rx"
assert control._scope_io.tio2 == "serial_tx"
assert control._scope_io.hs2 == "clkgen"
assert control._scope_io.nrst == "high_z"
| test_control |
query.rs | //! For query parameter extractor documentation, see [`Query`].
use std::{fmt, ops, sync::Arc};
use futures_util::future::{err, ok, Ready};
use serde::de;
use crate::{dev::Payload, error::QueryPayloadError, Error, FromRequest, HttpRequest};
/// Extract typed information from the request's query.
///
/// To extract typed data from the URL query string, the inner type `T` must implement the
/// [`serde::Deserialize`] trait.
///
/// Use [`QueryConfig`] to configure extraction process.
///
/// # Panics
/// A query string consists of unordered `key=value` pairs, therefore it cannot be decoded into any
/// type which depends upon data ordering (eg. tuples). Trying to do so will result in a panic.
///
/// # Examples
/// ```
/// use actix_web::{get, web};
/// use serde::Deserialize;
///
/// #[derive(Debug, Deserialize)]
/// pub enum ResponseType {
/// Token,
/// Code
/// }
///
/// #[derive(Debug, Deserialize)]
/// pub struct AuthRequest {
/// id: u64,
/// response_type: ResponseType,
/// }
///
/// // Deserialize `AuthRequest` struct from query string.
/// // This handler gets called only if the request's query parameters contain both fields.
/// // A valid request path for this handler would be `/?id=64&response_type=Code"`.
/// #[get("/")]
/// async fn index(info: web::Query<AuthRequest>) -> String {
/// format!("Authorization request for id={} and type={:?}!", info.id, info.response_type)
/// }
///
/// // To access the entire underlying query struct, use `.into_inner()`.
/// #[get("/debug1")]
/// async fn debug1(info: web::Query<AuthRequest>) -> String {
/// dbg!("Authorization object={:?}", info.into_inner());
/// "OK".to_string()
/// }
///
/// // Or use `.0`, which is equivalent to `.into_inner()`.
/// #[get("/debug2")]
/// async fn debug2(info: web::Query<AuthRequest>) -> String {
/// dbg!("Authorization object={:?}", info.0);
/// "OK".to_string()
/// }
/// ```
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct Query<T>(pub T);
impl<T> Query<T> {
/// Unwrap into inner `T` value.
pub fn into_inner(self) -> T {
self.0
}
/// Deserialize `T` from a URL encoded query parameter string.
///
/// ```
/// # use std::collections::HashMap;
/// # use actix_web::web::Query;
/// let numbers = Query::<HashMap<String, u32>>::from_query("one=1&two=2").unwrap();
/// assert_eq!(numbers.get("one"), Some(&1));
/// assert_eq!(numbers.get("two"), Some(&2));
/// assert!(numbers.get("three").is_none());
/// ```
pub fn from_query(query_str: &str) -> Result<Self, QueryPayloadError>
where
T: de::DeserializeOwned,
{
serde_urlencoded::from_str::<T>(query_str)
.map(Self)
.map_err(QueryPayloadError::Deserialize)
}
}
impl<T> ops::Deref for Query<T> {
type Target = T;
fn deref(&self) -> &T {
&self.0
}
}
impl<T> ops::DerefMut for Query<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.0
}
}
impl<T: fmt::Debug> fmt::Debug for Query<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
impl<T: fmt::Display> fmt::Display for Query<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result |
}
/// See [here](#usage) for example of usage as an extractor.
impl<T> FromRequest for Query<T>
where
T: de::DeserializeOwned,
{
type Error = Error;
type Future = Ready<Result<Self, Error>>;
type Config = QueryConfig;
#[inline]
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
let error_handler = req
.app_data::<Self::Config>()
.map(|c| c.err_handler.clone())
.unwrap_or(None);
serde_urlencoded::from_str::<T>(req.query_string())
.map(|val| ok(Query(val)))
.unwrap_or_else(move |e| {
let e = QueryPayloadError::Deserialize(e);
log::debug!(
"Failed during Query extractor deserialization. \
Request path: {:?}",
req.path()
);
let e = if let Some(error_handler) = error_handler {
(error_handler)(e, req)
} else {
e.into()
};
err(e)
})
}
}
/// Query extractor configuration.
///
/// # Examples
/// ```
/// use actix_web::{error, get, web, App, FromRequest, HttpResponse};
/// use serde::Deserialize;
///
/// #[derive(Deserialize)]
/// struct Info {
/// username: String,
/// }
///
/// /// deserialize `Info` from request's querystring
/// #[get("/")]
/// async fn index(info: web::Query<Info>) -> String {
/// format!("Welcome {}!", info.username)
/// }
///
/// // custom `Query` extractor configuration
/// let query_cfg = web::QueryConfig::default()
/// // use custom error handler
/// .error_handler(|err, req| {
/// error::InternalError::from_response(err, HttpResponse::Conflict().finish()).into()
/// });
///
/// App::new()
/// .app_data(query_cfg)
/// .service(index);
/// ```
#[derive(Clone)]
pub struct QueryConfig {
err_handler: Option<Arc<dyn Fn(QueryPayloadError, &HttpRequest) -> Error + Send + Sync>>,
}
impl QueryConfig {
/// Set custom error handler
pub fn error_handler<F>(mut self, f: F) -> Self
where
F: Fn(QueryPayloadError, &HttpRequest) -> Error + Send + Sync + 'static,
{
self.err_handler = Some(Arc::new(f));
self
}
}
impl Default for QueryConfig {
fn default() -> Self {
QueryConfig { err_handler: None }
}
}
#[cfg(test)]
mod tests {
use actix_http::http::StatusCode;
use derive_more::Display;
use serde::Deserialize;
use super::*;
use crate::error::InternalError;
use crate::test::TestRequest;
use crate::HttpResponse;
#[derive(Deserialize, Debug, Display)]
struct Id {
id: String,
}
#[actix_rt::test]
async fn test_service_request_extract() {
let req = TestRequest::with_uri("/name/user1/").to_srv_request();
assert!(Query::<Id>::from_query(&req.query_string()).is_err());
let req = TestRequest::with_uri("/name/user1/?id=test").to_srv_request();
let mut s = Query::<Id>::from_query(&req.query_string()).unwrap();
assert_eq!(s.id, "test");
assert_eq!(format!("{}, {:?}", s, s), "test, Id { id: \"test\" }");
s.id = "test1".to_string();
let s = s.into_inner();
assert_eq!(s.id, "test1");
}
#[actix_rt::test]
async fn test_request_extract() {
let req = TestRequest::with_uri("/name/user1/").to_srv_request();
let (req, mut pl) = req.into_parts();
assert!(Query::<Id>::from_request(&req, &mut pl).await.is_err());
let req = TestRequest::with_uri("/name/user1/?id=test").to_srv_request();
let (req, mut pl) = req.into_parts();
let mut s = Query::<Id>::from_request(&req, &mut pl).await.unwrap();
assert_eq!(s.id, "test");
assert_eq!(format!("{}, {:?}", s, s), "test, Id { id: \"test\" }");
s.id = "test1".to_string();
let s = s.into_inner();
assert_eq!(s.id, "test1");
}
#[actix_rt::test]
#[should_panic]
async fn test_tuple_panic() {
let req = TestRequest::with_uri("/?one=1&two=2").to_srv_request();
let (req, mut pl) = req.into_parts();
Query::<(u32, u32)>::from_request(&req, &mut pl)
.await
.unwrap();
}
#[actix_rt::test]
async fn test_custom_error_responder() {
let req = TestRequest::with_uri("/name/user1/")
.app_data(QueryConfig::default().error_handler(|e, _| {
let resp = HttpResponse::UnprocessableEntity().finish();
InternalError::from_response(e, resp).into()
}))
.to_srv_request();
let (req, mut pl) = req.into_parts();
let query = Query::<Id>::from_request(&req, &mut pl).await;
assert!(query.is_err());
assert_eq!(
query
.unwrap_err()
.as_response_error()
.error_response()
.status(),
StatusCode::UNPROCESSABLE_ENTITY
);
}
}
| {
self.0.fmt(f)
} |
vae.py | """Example of an MLP in Myia.
Myia is still a work in progress, and this example may change in the future.
"""
import time
from dataclasses import dataclass
import numpy
import torch
from numpy.random import RandomState
from torchvision import datasets, transforms
import myia.public_api as pub
from myia import ArithmeticData, myia, value_and_grad
from myia.api import to_device
from myia.debug import traceback # noqa
from myia.operations import array_exp, array_pow, random_initialize
###########
# Options #
###########
dtype = "float32"
backend = "pytorch"
# backend = 'relay' # Uncomment to use relay backend
device_type = "cpu"
# device_type = 'cuda' # Uncomment to run on the gpu
backend_options_dict = {
"pytorch": {"device": device_type},
"relay": {"target": device_type, "device_id": 0},
}
backend_options = backend_options_dict[backend]
###############
# Hyperparams #
###############
lr = getattr(numpy, dtype)(0.01)
########
# Data #
########
# This just generates random data so we don't have to load a real dataset, | def param(R, *size):
"""Generates a random array using the generator R."""
return numpy.array(R.rand(*size) * 2 - 1, dtype=dtype)
def generate_data(n, batch_size, input_size, target_size, *, seed=87):
"""Generate inputs and targets.
Generates n batches of samples of size input_size, matched with
a single target.
"""
R = RandomState(seed=seed)
return [
(param(R, batch_size, input_size), param(R, batch_size, target_size))
for i in range(n)
]
def mlp_parameters(*layer_sizes, seed=90909):
"""Generates parameters for a MLP given a list of layer sizes."""
R = RandomState(seed=seed)
parameters = []
for i, o in zip(layer_sizes[:-1], layer_sizes[1:]):
W = param(R, i, o)
b = param(R, 1, o)
parameters.append((W, b))
return parameters
#########
# Model #
#########
# We generate a MLP model with some arbitrary number of layers and tanh
# activations.
@dataclass(frozen=True)
class Linear(ArithmeticData):
"""Linear layer."""
W: "Weights array"
b: "Biases vector"
def apply(self, input):
"""Apply the layer."""
return input @ self.W + self.b
@dataclass(frozen=True)
class Tanh(ArithmeticData):
"""Tanh layer."""
def apply(self, input):
"""Apply the layer."""
return numpy.tanh(input)
@dataclass(frozen=True)
class Sequential(ArithmeticData):
"""Sequential layer, applies all sub-layers in order."""
layers: "Tuple of layers"
def apply(self, x):
"""Apply the layer."""
for layer in self.layers:
x = layer.apply(x)
return x
@dataclass(frozen=True)
class VAE(ArithmeticData):
"""Sequential layer, applies all sub-layers in order."""
fc1: "layer fc1"
fc21: "layer fc21"
fc22: "layer fc22"
fc3: "layer fc3"
fc4: "layer fc4"
def encode(self, x):
h1 = pub.relu(self.fc1.apply(x))
return self.fc21.apply(h1), self.fc22.apply(h1)
def reparameterize(self, mu, logvar, rstate):
std = array_exp(0.5 * logvar)
eps, rstate = pub.uniform(rstate, (2, 20), -1.0, 1.0)
return mu + eps * std, rstate
def decode(self, z):
h3 = pub.relu(self.fc3.apply(z))
return pub.sigmoid(self.fc4.apply(h3))
def forward(self, x, rstate):
mu, logvar = self.encode(pub.reshape(x, (-1, 784)))
z, rstate = self.reparameterize(mu, logvar, rstate)
return self.decode(z), mu, logvar, rstate
params = (
mlp_parameters(*(784, 400))[0],
mlp_parameters(*(400, 20))[0],
mlp_parameters(*(400, 20))[0],
mlp_parameters(*(20, 400))[0],
mlp_parameters(*(400, 784))[0],
)
model = VAE(
Linear(params[0][0], params[0][1]),
Linear(params[1][0], params[1][1]),
Linear(params[2][0], params[2][1]),
Linear(params[3][0], params[3][1]),
Linear(params[4][0], params[4][1]),
)
model = to_device(model, backend, backend_options, broaden=False)
# Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(recon_x, x, mu, logvar):
BCE = pub.binary_cross_entropy(
recon_x, pub.reshape(x, (-1, 784)), reduction="sum"
)
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * pub._sum(1 + logvar - array_pow(mu, 2) - array_exp(logvar))
return BCE + KLD
def cost(model, data, rstate):
recon_batch, mu, logvar, _rstate = model.forward(data, rstate)
loss = loss_function(recon_batch, data, mu, logvar)
return loss.item(), _rstate
@myia(backend=backend, backend_options=backend_options, return_backend=True)
def step(model, data, lr, rstate):
"""Returns the loss and parameter gradients.
value_and_grad will return cost(model, x, y) and dcost(...)/dmodel.
The 'model' argument can be omitted: by default the derivative wrt
the first argument is returned.
"""
(_cost, rstate), dmodel = value_and_grad(cost, "model")(
model, data, rstate, dout=(1, 1)
)
return _cost, model - lr * dmodel, rstate
@myia(backend=backend, backend_options=backend_options, return_backend=True)
def step_eval(model, data, rstate):
"""Returns the loss and parameter gradients.
value_and_grad will return cost(model, x, y) and dcost(...)/dmodel.
The 'model' argument can be omitted: by default the derivative wrt
the first argument is returned.
"""
return cost(model, data, rstate)
@myia(backend=backend, backend_options=backend_options, return_backend=True)
def step_init_seed():
"""Returns the loss and parameter gradients.
value_and_grad will return cost(model, x, y) and dcost(...)/dmodel.
The 'model' argument can be omitted: by default the derivative wrt
the first argument is returned.
"""
return random_initialize(1)
lr = getattr(numpy, dtype)(0.01)
if __name__ == "__main__":
seed = 123
cuda = False
batch_size = 2
epochs = 1
torch.manual_seed(seed)
device = torch.device("cuda" if cuda else "cpu")
kwargs = {"num_workers": 1, "pin_memory": True} if cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"../data",
train=True,
download=True,
transform=transforms.ToTensor(),
),
batch_size=batch_size,
shuffle=True,
**kwargs,
)
rand_state = step_init_seed()
for _ in range(epochs):
costs = []
t0 = time.time()
for i, (data, _) in enumerate(train_loader):
print("i", i + 1, "/", len(train_loader))
_cost, model, rand_state = step(
model, data.reshape((batch_size, 784)).numpy(), lr, rand_state
)
costs.append(_cost)
costs = [float(c.from_device()) for c in costs]
c = sum(costs) / len(costs)
t = time.time() - t0
print(f"Cost: {c:15.10f}\tTime: {t:15.10f}")
test_loader = torch.utils.data.DataLoader(
datasets.MNIST("../data", train=False, transform=transforms.ToTensor()),
batch_size=batch_size,
shuffle=True,
**kwargs,
)
costs = []
t0 = time.time()
for i, (data, _) in enumerate(test_loader):
_cost, rand_state = step_eval(
model, data.reshape((batch_size, 784)).numpy(), rand_state
)
costs.append(_cost)
costs = [float(c.from_device()) for c in costs]
c = sum(costs) / len(costs)
t = time.time() - t0
print(f"Cost: {c:15.10f}\tTime: {t:15.10f}") | # but the model will work just as well on a real dataset.
|
app_auth_test.go | package dao
import (
"context" |
func TestDaoAllAppsInfo(t *testing.T) {
var (
c = context.Background()
)
convey.Convey("AllAppsInfo", t, func(ctx convey.C) {
res, err := d.AllAppsInfo(c)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
}
func TestDaoAllAppsAuth(t *testing.T) {
var (
c = context.Background()
)
convey.Convey("AllAppsAuth", t, func(ctx convey.C) {
res, err := d.AllAppsAuth(c)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
} | "testing"
"github.com/smartystreets/goconvey/convey"
) |
designation.py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Teampro and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class | (Document):
pass
| Designation |
slo.py | from swift.ipvl.inspect_custom import whoami, whosdaddy
pass # (WIS) print __name__
class StaticLargeObject(object):
|
def filter_factory(global_conf, **local_conf):
pass # (WIS) print "%s (%s -> %s)" % (__name__, whosdaddy(), whoami())
conf = global_conf.copy()
conf.update(local_conf)
# max_manifest_segments = int(conf.get('max_manifest_segments',
# DEFAULT_MAX_MANIFEST_SEGMENTS))
# max_manifest_size = int(conf.get('max_manifest_size',
# DEFAULT_MAX_MANIFEST_SIZE))
# min_segment_size = int(conf.get('min_segment_size',
# DEFAULT_MIN_SEGMENT_SIZE))
#
# register_swift_info('slo',
# max_manifest_segments=max_manifest_segments,
# max_manifest_size=max_manifest_size,
# min_segment_size=min_segment_size)
def slo_filter(app):
pass # (WIS) print "%s (%s -> %s)" % (__name__, whosdaddy(), whoami())
return StaticLargeObject(app, conf)
return slo_filter
| """docstring for StaticLargeObject"""
def __init__(self, app, conf):
pass # (WIS) print "%s %s (%s -> %s)" % (__name__, self.__class__.__name__, whosdaddy(), whoami())
self.app = app
self.conf = conf
def __call__(self, env, start_response):
pass # (WIS) print "%s %s\n" % (self.__class__.__name__, env)
start_response('200 OK', [('Content-Type', 'text/plain')])
return self.__class__.__name__ + " -> " + self.app(env, start_response) |
use-experience-hook.ts | import {useSelector, useDispatch} from 'react-redux';
import {WrappedExperience, ExperienceProps, Experience} from '../interfaces/experience';
import {RootState} from '../reducers';
import {pick} from 'lodash';
import {
searchExperiences,
searchPeople,
searchTags,
cloneExperience,
loadExperiences,
loadExperiencesPostList,
fetchPostsExperience,
addPostsExperience,
createExperience,
fetchDetailExperience,
subscribeExperience,
updateExperience,
deleteExperience,
unsubscribeExperience,
clearExperiences,
fetchTrendingExperience,
} from 'src/reducers/experience/actions';
import {ExperienceState} from 'src/reducers/experience/reducer';
import {fetchUserExperience} from 'src/reducers/user/actions';
export enum ExperienceOwner {
ALL = 'all',
CURRENT_USER = 'current_user',
PROFILE = 'profile',
TRENDING = 'trending',
}
//TODO: isn't it better to rename this to something more general like, useSearchHook?
// it's not obvious if we want to searchPeople we can use this hook
export const useExperienceHook = () => {
const dispatch = useDispatch();
const {
experiences,
experiencePosts,
trendingExperiences,
selectedExperience,
searchTags: tags, | detail: experience,
hasMore,
meta,
loading,
} = useSelector<RootState, ExperienceState>(state => state.experienceState);
const profileExperiences = useSelector<RootState, WrappedExperience[]>(
state => state.profileState.experience.data,
);
const userExperiences = useSelector<RootState, WrappedExperience[]>(
state => state.userState.experiences,
);
const loadExperience = () => {
dispatch(loadExperiences());
};
const loadExperiencePostList = (
postId: string,
callback: (postsExperiences: Experience[]) => void,
) => {
dispatch(loadExperiencesPostList(postId, callback));
};
const addPostsToExperience = (
postId: string,
listExperiences: string[],
callback: () => void,
) => {
dispatch(addPostsExperience(postId, listExperiences, callback));
};
const loadPostExperience = (experienceId: string) => {
dispatch(fetchPostsExperience(experienceId));
};
const loadNextPostExperience = (experienceId: string) => {
const page = meta.currentPage + 1;
dispatch(fetchPostsExperience(experienceId, page));
};
const loadTrendingExperience = () => {
dispatch(fetchTrendingExperience());
};
const nextPage = async () => {
const page = meta.currentPage + 1;
dispatch(loadExperiences(page));
};
const getExperienceDetail = (experienceId: string | string[]) => {
const id = experienceId as string;
dispatch(fetchDetailExperience(id));
};
const findExperience = async (query: string, page = 1) => {
dispatch(searchExperiences(query, page));
};
const findPeople = (query: string) => {
dispatch(searchPeople(query));
};
const findTags = (query: string) => {
dispatch(searchTags(query));
};
const followExperience = (
experienceId: string,
newExperience: ExperienceProps,
callback?: (id: string) => void,
) => {
const attributes = pick(newExperience, [
'name',
'description',
'allowedTags',
'experienceImageURL',
'prohibitedTags',
'people',
]);
dispatch(cloneExperience(experienceId, attributes, callback));
};
const editExperience = (
experienceId: string,
newExperience: ExperienceProps,
callback?: (id: string) => void,
) => {
const attributes = pick(newExperience, [
'name',
'description',
'allowedTags',
'experienceImageURL',
'prohibitedTags',
'people',
]);
dispatch(updateExperience(experienceId, attributes, callback));
};
const saveExperience = (newExperience: ExperienceProps, callback?: (id: string) => void) => {
dispatch(createExperience(newExperience, callback));
};
const beSubscribeExperience = (experienceId: string, callback?: () => void) => {
dispatch(
subscribeExperience(experienceId, () => {
dispatch(fetchUserExperience());
callback && callback();
}),
);
};
const removeExperience = (experienceId: string, callback?: () => void) => {
dispatch(
deleteExperience(experienceId, () => {
dispatch(fetchUserExperience());
callback && callback();
}),
);
};
const beUnsubscribeExperience = (experienceId: string, callback?: () => void) => {
dispatch(
unsubscribeExperience(experienceId, () => {
dispatch(fetchUserExperience());
callback && callback();
}),
);
};
const clear = () => {
dispatch(clearExperiences());
};
return {
loading,
page: meta.currentPage,
hasMore,
experiences,
experiencePosts,
trendingExperiences,
userExperiences,
profileExperiences,
experience,
selectedExperience,
tags,
people,
loadExperience,
loadExperiencePostList,
addPostsToExperience,
loadPostExperience,
loadNextPostExperience,
nextPage,
searchExperience: findExperience,
searchPeople: findPeople,
searchTags: findTags,
cloneExperience: followExperience,
saveExperience,
getExperienceDetail,
subscribeExperience: beSubscribeExperience,
updateExperience: editExperience,
removeExperience,
unsubscribeExperience: beUnsubscribeExperience,
clearExperiences: clear,
loadTrendingExperience,
};
}; | searchPeople: people, |
fetcher-fetch.d.ts | import { OperationDefinitionNode } from 'graphql';
import { ReactQueryVisitor } from './visitor';
import { FetcherRenderer } from './fetcher';
export declare class FetchFetcher implements FetcherRenderer {
private visitor;
constructor(visitor: ReactQueryVisitor);
generateFetcherImplementaion(): string;
generateQueryHook(
node: OperationDefinitionNode,
documentVariableName: string,
operationName: string,
operationResultType: string,
operationVariablesTypes: string, | documentVariableName: string,
operationName: string,
operationResultType: string,
operationVariablesTypes: string
): string;
generateFetcherFetch(
node: OperationDefinitionNode,
documentVariableName: string,
operationName: string,
operationResultType: string,
operationVariablesTypes: string,
hasRequiredVariables: boolean
): string;
} | hasRequiredVariables: boolean
): string;
generateMutationHook(
node: OperationDefinitionNode, |
test_cumulative.py | """
Tests for Series cumulative operations.
See also
--------
tests.frame.test_cumulative
"""
from itertools import product
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
def _check_accum_op(name, series, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(
func(series).values, func(np.array(series)), check_dtype=check_dtype,
)
# with missing values
ts = series.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)
class TestSeriesCumulativeOps:
def test_cumsum(self, datetime_series):
_check_accum_op("cumsum", datetime_series)
def test_cumprod(self, datetime_series):
_check_accum_op("cumprod", datetime_series)
def test_cummin(self, datetime_series):
tm.assert_numpy_array_equal(
datetime_series.cummin().values,
np.minimum.accumulate(np.array(datetime_series)),
)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummin()[1::2]
expected = np.minimum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
def test_cummax(self, datetime_series):
tm.assert_numpy_array_equal(
datetime_series.cummax().values,
np.maximum.accumulate(np.array(datetime_series)),
)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummax()[1::2]
expected = np.maximum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "US/Pacific"])
def test_cummin_datetime64(self, tz):
s = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-3"]
).tz_localize(tz)
)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-1"]
).tz_localize(tz)
)
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "2000-1-2", "2000-1-1", "2000-1-1", "2000-1-1"]
).tz_localize(tz)
)
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize("tz", [None, "US/Pacific"])
def test_cummax_datetime64(self, tz):
s = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-1", "NaT", "2000-1-3"]
).tz_localize(tz)
)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "NaT", "2000-1-2", "NaT", "2000-1-3"]
).tz_localize(tz)
)
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_datetime(
["NaT", "2000-1-2", "2000-1-2", "2000-1-2", "2000-1-2", "2000-1-3"]
).tz_localize(tz)
)
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummin_timedelta64(self):
s = pd.Series(pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "3 min"]))
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "1 min"])
)
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "2 min", "1 min", "1 min", "1 min"])
)
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummax_timedelta64(self):
s = pd.Series(pd.to_timedelta(["NaT", "2 min", "NaT", "1 min", "NaT", "3 min"]))
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "NaT", "2 min", "NaT", "3 min"])
)
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(
pd.to_timedelta(["NaT", "2 min", "2 min", "2 min", "2 min", "3 min"])
)
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummethods_bool(self):
# GH#6270
a = pd.Series([False, False, False, True, True, False, False])
b = ~a
c = pd.Series([False] * len(b))
d = ~c
methods = {
"cumsum": np.cumsum,
"cumprod": np.cumprod,
"cummin": np.minimum.accumulate,
"cummax": np.maximum.accumulate,
}
args = product((a, b, c, d), methods)
for s, method in args:
expected = pd.Series(methods[method](s.values))
result = getattr(s, method)()
tm.assert_series_equal(result, expected)
e = pd.Series([False, True, np.nan, False])
cse = pd.Series([0, 1, np.nan, 1], dtype=object)
cpe = pd.Series([False, 0, np.nan, 0])
cmin = pd.Series([False, False, np.nan, False])
cmax = pd.Series([False, True, np.nan, True])
expecteds = {"cumsum": cse, "cumprod": cpe, "cummin": cmin, "cummax": cmax}
for method in methods: | tm.assert_series_equal(res, expecteds[method]) | res = getattr(e, method)() |
index.rs | //! Indexing implementations for `[T]`.
use crate::ops;
use crate::ptr;
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, I> ops::Index<I> for [T]
where
I: SliceIndex<[T]>,
{
type Output = I::Output;
#[inline]
fn index(&self, index: I) -> &I::Output {
index.index(self)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, I> ops::IndexMut<I> for [T]
where
I: SliceIndex<[T]>,
{
#[inline]
fn index_mut(&mut self, index: I) -> &mut I::Output {
index.index_mut(self)
}
}
#[inline(never)]
#[cold]
#[track_caller]
fn slice_start_index_len_fail(index: usize, len: usize) -> ! {
panic!("range start index {} out of range for slice of length {}", index, len);
}
#[inline(never)]
#[cold]
#[track_caller]
fn slice_end_index_len_fail(index: usize, len: usize) -> ! {
panic!("range end index {} out of range for slice of length {}", index, len);
}
#[inline(never)]
#[cold]
#[track_caller]
fn slice_index_order_fail(index: usize, end: usize) -> ! {
panic!("slice index starts at {} but ends at {}", index, end);
}
#[inline(never)]
#[cold]
#[track_caller]
fn slice_start_index_overflow_fail() -> ! {
panic!("attempted to index slice from after maximum usize");
}
#[inline(never)]
#[cold]
#[track_caller]
fn slice_end_index_overflow_fail() -> ! {
panic!("attempted to index slice up to maximum usize");
}
mod private_slice_index {
use super::ops;
#[stable(feature = "slice_get_slice", since = "1.28.0")]
pub trait Sealed {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for usize {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for ops::Range<usize> {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for ops::RangeTo<usize> {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for ops::RangeFrom<usize> {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for ops::RangeFull {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for ops::RangeInclusive<usize> {}
#[stable(feature = "slice_get_slice", since = "1.28.0")]
impl Sealed for ops::RangeToInclusive<usize> {}
#[stable(feature = "slice_index_with_ops_bound_pair", since = "1.53.0")]
impl Sealed for (ops::Bound<usize>, ops::Bound<usize>) {}
}
/// A helper trait used for indexing operations.
///
/// Implementations of this trait have to promise that if the argument
/// to `get_(mut_)unchecked` is a safe reference, then so is the result.
#[stable(feature = "slice_get_slice", since = "1.28.0")]
#[rustc_on_unimplemented(
on(T = "str", label = "string indices are ranges of `usize`",),
on(
all(any(T = "str", T = "&str", T = "std::string::String"), _Self = "{integer}"),
note = "you can use `.chars().nth()` or `.bytes().nth()`\n\
for more information, see chapter 8 in The Book: \
<https://doc.rust-lang.org/book/ch08-02-strings.html#indexing-into-strings>"
),
message = "the type `{T}` cannot be indexed by `{Self}`",
label = "slice indices are of type `usize` or ranges of `usize`"
)]
pub unsafe trait SliceIndex<T: ?Sized>: private_slice_index::Sealed {
/// The output type returned by methods.
#[stable(feature = "slice_get_slice", since = "1.28.0")]
type Output: ?Sized;
/// Returns a shared reference to the output at this location, if in
/// bounds.
#[unstable(feature = "slice_index_methods", issue = "none")]
fn get(self, slice: &T) -> Option<&Self::Output>;
/// Returns a mutable reference to the output at this location, if in
/// bounds.
#[unstable(feature = "slice_index_methods", issue = "none")]
fn get_mut(self, slice: &mut T) -> Option<&mut Self::Output>;
/// Returns a shared reference to the output at this location, without
/// performing any bounds checking.
/// Calling this method with an out-of-bounds index or a dangling `slice` pointer
/// is *[undefined behavior]* even if the resulting reference is not used.
///
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[unstable(feature = "slice_index_methods", issue = "none")]
unsafe fn get_unchecked(self, slice: *const T) -> *const Self::Output;
/// Returns a mutable reference to the output at this location, without
/// performing any bounds checking.
/// Calling this method with an out-of-bounds index or a dangling `slice` pointer
/// is *[undefined behavior]* even if the resulting reference is not used.
///
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[unstable(feature = "slice_index_methods", issue = "none")]
unsafe fn get_unchecked_mut(self, slice: *mut T) -> *mut Self::Output;
/// Returns a shared reference to the output at this location, panicking
/// if out of bounds.
#[unstable(feature = "slice_index_methods", issue = "none")]
#[track_caller]
fn index(self, slice: &T) -> &Self::Output;
/// Returns a mutable reference to the output at this location, panicking
/// if out of bounds.
#[unstable(feature = "slice_index_methods", issue = "none")]
#[track_caller]
fn index_mut(self, slice: &mut T) -> &mut Self::Output;
}
#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
unsafe impl<T> SliceIndex<[T]> for usize {
type Output = T;
#[inline]
fn get(self, slice: &[T]) -> Option<&T> {
// SAFETY: `self` is checked to be in bounds.
if self < slice.len() { unsafe { Some(&*self.get_unchecked(slice)) } } else { None }
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut T> {
// SAFETY: `self` is checked to be in bounds.
if self < slice.len() { unsafe { Some(&mut *self.get_unchecked_mut(slice)) } } else { None }
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const T {
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
unsafe { slice.as_ptr().add(self) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut T {
// SAFETY: see comments for `get_unchecked` above.
unsafe { slice.as_mut_ptr().add(self) }
}
#[inline]
fn index(self, slice: &[T]) -> &T {
// N.B., use intrinsic indexing
&(*slice)[self]
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut T {
// N.B., use intrinsic indexing
&mut (*slice)[self]
}
}
#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
unsafe impl<T> SliceIndex<[T]> for ops::Range<usize> {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&[T]> {
if self.start > self.end || self.end > slice.len() {
None
} else {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { Some(&*self.get_unchecked(slice)) }
}
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
if self.start > self.end || self.end > slice.len() {
None
} else {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { Some(&mut *self.get_unchecked_mut(slice)) }
}
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
unsafe { ptr::slice_from_raw_parts(slice.as_ptr().add(self.start), self.end - self.start) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: see comments for `get_unchecked` above.
unsafe {
ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start), self.end - self.start)
}
}
#[inline]
fn index(self, slice: &[T]) -> &[T] {
if self.start > self.end {
slice_index_order_fail(self.start, self.end);
} else if self.end > slice.len() {
slice_end_index_len_fail(self.end, slice.len());
}
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &*self.get_unchecked(slice) }
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
if self.start > self.end {
slice_index_order_fail(self.start, self.end);
} else if self.end > slice.len() {
slice_end_index_len_fail(self.end, slice.len());
}
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &mut *self.get_unchecked_mut(slice) }
}
}
#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
unsafe impl<T> SliceIndex<[T]> for ops::RangeTo<usize> {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&[T]> {
(0..self.end).get(slice)
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
(0..self.end).get_mut(slice)
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (0..self.end).get_unchecked(slice) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (0..self.end).get_unchecked_mut(slice) }
}
#[inline]
fn index(self, slice: &[T]) -> &[T] {
(0..self.end).index(slice)
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
(0..self.end).index_mut(slice)
}
}
#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
unsafe impl<T> SliceIndex<[T]> for ops::RangeFrom<usize> {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&[T]> {
(self.start..slice.len()).get(slice)
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
(self.start..slice.len()).get_mut(slice)
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (self.start..slice.len()).get_unchecked(slice) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (self.start..slice.len()).get_unchecked_mut(slice) }
}
#[inline]
fn index(self, slice: &[T]) -> &[T] {
if self.start > slice.len() {
slice_start_index_len_fail(self.start, slice.len());
}
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &*self.get_unchecked(slice) }
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
if self.start > slice.len() {
slice_start_index_len_fail(self.start, slice.len());
}
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &mut *self.get_unchecked_mut(slice) }
}
}
#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
unsafe impl<T> SliceIndex<[T]> for ops::RangeFull {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&[T]> {
Some(slice)
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
Some(slice)
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
slice
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
slice
}
#[inline]
fn index(self, slice: &[T]) -> &[T] {
slice
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
slice
}
}
#[stable(feature = "inclusive_range", since = "1.26.0")]
unsafe impl<T> SliceIndex<[T]> for ops::RangeInclusive<usize> {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&[T]> {
if *self.end() == usize::MAX { None } else { self.into_slice_range().get(slice) }
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
if *self.end() == usize::MAX { None } else { self.into_slice_range().get_mut(slice) }
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { self.into_slice_range().get_unchecked(slice) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] |
#[inline]
fn index(self, slice: &[T]) -> &[T] {
if *self.end() == usize::MAX {
slice_end_index_overflow_fail();
}
self.into_slice_range().index(slice)
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
if *self.end() == usize::MAX {
slice_end_index_overflow_fail();
}
self.into_slice_range().index_mut(slice)
}
}
#[stable(feature = "inclusive_range", since = "1.26.0")]
unsafe impl<T> SliceIndex<[T]> for ops::RangeToInclusive<usize> {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&[T]> {
(0..=self.end).get(slice)
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
(0..=self.end).get_mut(slice)
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (0..=self.end).get_unchecked(slice) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (0..=self.end).get_unchecked_mut(slice) }
}
#[inline]
fn index(self, slice: &[T]) -> &[T] {
(0..=self.end).index(slice)
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
(0..=self.end).index_mut(slice)
}
}
/// Performs bounds-checking of a range.
///
/// This method is similar to [`Index::index`] for slices, but it returns a
/// [`Range`] equivalent to `range`. You can use this method to turn any range
/// into `start` and `end` values.
///
/// `bounds` is the range of the slice to use for bounds-checking. It should
/// be a [`RangeTo`] range that ends at the length of the slice.
///
/// The returned [`Range`] is safe to pass to [`slice::get_unchecked`] and
/// [`slice::get_unchecked_mut`] for slices with the given range.
///
/// [`Range`]: ops::Range
/// [`RangeTo`]: ops::RangeTo
/// [`slice::get_unchecked`]: slice::get_unchecked
/// [`slice::get_unchecked_mut`]: slice::get_unchecked_mut
///
/// # Panics
///
/// Panics if `range` would be out of bounds.
///
/// # Examples
///
/// ```
/// #![feature(slice_range)]
///
/// use std::slice;
///
/// let v = [10, 40, 30];
/// assert_eq!(1..2, slice::range(1..2, ..v.len()));
/// assert_eq!(0..2, slice::range(..2, ..v.len()));
/// assert_eq!(1..3, slice::range(1.., ..v.len()));
/// ```
///
/// Panics when [`Index::index`] would panic:
///
/// ```should_panic
/// #![feature(slice_range)]
///
/// use std::slice;
///
/// slice::range(2..1, ..3);
/// ```
///
/// ```should_panic
/// #![feature(slice_range)]
///
/// use std::slice;
///
/// slice::range(1..4, ..3);
/// ```
///
/// ```should_panic
/// #![feature(slice_range)]
///
/// use std::slice;
///
/// slice::range(1..=usize::MAX, ..3);
/// ```
///
/// [`Index::index`]: ops::Index::index
#[track_caller]
#[unstable(feature = "slice_range", issue = "76393")]
pub fn range<R>(range: R, bounds: ops::RangeTo<usize>) -> ops::Range<usize>
where
R: ops::RangeBounds<usize>,
{
let len = bounds.end;
let start: ops::Bound<&usize> = range.start_bound();
let start = match start {
ops::Bound::Included(&start) => start,
ops::Bound::Excluded(start) => {
start.checked_add(1).unwrap_or_else(|| slice_start_index_overflow_fail())
}
ops::Bound::Unbounded => 0,
};
let end: ops::Bound<&usize> = range.end_bound();
let end = match end {
ops::Bound::Included(end) => {
end.checked_add(1).unwrap_or_else(|| slice_end_index_overflow_fail())
}
ops::Bound::Excluded(&end) => end,
ops::Bound::Unbounded => len,
};
if start > end {
slice_index_order_fail(start, end);
}
if end > len {
slice_end_index_len_fail(end, len);
}
ops::Range { start, end }
}
/// Convert pair of `ops::Bound`s into `ops::Range` without performing any bounds checking and (in debug) overflow checking
fn into_range_unchecked(
len: usize,
(start, end): (ops::Bound<usize>, ops::Bound<usize>),
) -> ops::Range<usize> {
use ops::Bound;
let start = match start {
Bound::Included(i) => i,
Bound::Excluded(i) => i + 1,
Bound::Unbounded => 0,
};
let end = match end {
Bound::Included(i) => i + 1,
Bound::Excluded(i) => i,
Bound::Unbounded => len,
};
start..end
}
/// Convert pair of `ops::Bound`s into `ops::Range`.
/// Returns `None` on overflowing indices.
fn into_range(
len: usize,
(start, end): (ops::Bound<usize>, ops::Bound<usize>),
) -> Option<ops::Range<usize>> {
use ops::Bound;
let start = match start {
Bound::Included(start) => start,
Bound::Excluded(start) => start.checked_add(1)?,
Bound::Unbounded => 0,
};
let end = match end {
Bound::Included(end) => end.checked_add(1)?,
Bound::Excluded(end) => end,
Bound::Unbounded => len,
};
// Don't bother with checking `start < end` and `end <= len`
// since these checks are handled by `Range` impls
Some(start..end)
}
/// Convert pair of `ops::Bound`s into `ops::Range`.
/// Panics on overflowing indices.
fn into_slice_range(
len: usize,
(start, end): (ops::Bound<usize>, ops::Bound<usize>),
) -> ops::Range<usize> {
use ops::Bound;
let start = match start {
Bound::Included(start) => start,
Bound::Excluded(start) => {
start.checked_add(1).unwrap_or_else(|| slice_start_index_overflow_fail())
}
Bound::Unbounded => 0,
};
let end = match end {
Bound::Included(end) => {
end.checked_add(1).unwrap_or_else(|| slice_end_index_overflow_fail())
}
Bound::Excluded(end) => end,
Bound::Unbounded => len,
};
// Don't bother with checking `start < end` and `end <= len`
// since these checks are handled by `Range` impls
start..end
}
#[stable(feature = "slice_index_with_ops_bound_pair", since = "1.53.0")]
unsafe impl<T> SliceIndex<[T]> for (ops::Bound<usize>, ops::Bound<usize>) {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&Self::Output> {
into_range(slice.len(), self)?.get(slice)
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut Self::Output> {
into_range(slice.len(), self)?.get_mut(slice)
}
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const Self::Output {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { into_range_unchecked(slice.len(), self).get_unchecked(slice) }
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut Self::Output {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { into_range_unchecked(slice.len(), self).get_unchecked_mut(slice) }
}
#[inline]
fn index(self, slice: &[T]) -> &Self::Output {
into_slice_range(slice.len(), self).index(slice)
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut Self::Output {
into_slice_range(slice.len(), self).index_mut(slice)
}
}
| {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { self.into_slice_range().get_unchecked_mut(slice) }
} |
binary_converter.py | def | (string: str, oneChar = "1", zeroChar = "0"):
out = 0
for i in range(len(string)):
currentDigit = None
if string[len(string) - 1 - i] == oneChar:
currentDigit = 1
elif string[len(string) - 1 - i] == zeroChar:
currentDigit = 0
out += (2**i) * currentDigit
return(out)
if __name__ == "__main__":
print(binaryToInt("1011")) | binaryToInt |
label.rs | use crate::common::YaSerdeAttribute;
use proc_macro2::Ident;
pub fn build_label_name(
label: &Ident,
field_attrs: &YaSerdeAttribute,
default_namespace: &Option<String>,
) -> String {
let prefix = if default_namespace == &field_attrs.prefix | else {
field_attrs
.prefix
.clone()
.map_or("".to_string(), |prefix| prefix + ":")
};
let label = field_attrs
.rename
.get_value()
.unwrap_or_else(|| label.to_string());
format!("{}{}", prefix, label)
}
| {
"".to_string()
} |
editorAccessor.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import {IPosition, ICommonCodeEditor} from 'vs/editor/common/editorCommon';
import strings = require('vs/base/common/strings');
import snippets = require('vs/editor/contrib/snippet/common/snippet');
import {Range} from 'vs/editor/common/core/range';
import {SnippetController} from 'vs/editor/contrib/snippet/common/snippetController';
import emmet = require('emmet');
export interface IGrammarContributions {
getGrammar(mode: string): string;
}
export class | implements emmet.Editor {
private _editor: ICommonCodeEditor;
private _syntaxProfiles: any;
private _excludedLanguages: any;
private _grammars: IGrammarContributions;
private _hasMadeEdits: boolean;
private emmetSupportedModes = ['html', 'xhtml', 'css', 'xml', 'xsl', 'haml', 'jade', 'jsx', 'slim', 'scss', 'sass', 'less', 'stylus', 'styl'];
constructor(editor: ICommonCodeEditor, syntaxProfiles: any, excludedLanguages: String[], grammars: IGrammarContributions) {
this._editor = editor;
this._syntaxProfiles = syntaxProfiles;
this._excludedLanguages = excludedLanguages;
this._hasMadeEdits = false;
this._grammars = grammars;
}
public isEmmetEnabledMode(): boolean {
return this.emmetSupportedModes.indexOf(this.getSyntax()) !== -1;
}
public getSelectionRange(): emmet.Range {
let selection = this._editor.getSelection();
return {
start: this.getOffsetFromPosition(selection.getStartPosition()),
end: this.getOffsetFromPosition(selection.getEndPosition())
};
}
public getCurrentLineRange(): emmet.Range {
let currentLine = this._editor.getSelection().startLineNumber;
return {
start: this.getOffsetFromPosition({ lineNumber: currentLine, column: 1 }),
end: this.getOffsetFromPosition({ lineNumber: currentLine + 1, column: 1 })
};
}
public getCaretPos(): number {
let selectionStart = this._editor.getSelection().getStartPosition();
return this.getOffsetFromPosition(selectionStart);
}
public setCaretPos(pos: number): void {
this.createSelection(pos);
}
public getCurrentLine(): string {
let selectionStart = this._editor.getSelection().getStartPosition();
return this._editor.getModel().getLineContent(selectionStart.lineNumber);
}
public onBeforeEmmetAction(): void {
this._hasMadeEdits = false;
}
public replaceContent(value: string, start: number, end: number, no_indent: boolean): void {
//console.log('value', value);
let startPosition = this.getPositionFromOffset(start);
let endPosition = this.getPositionFromOffset(end);
// test if < or </ are located before the replace range. Either replace these too, or block the expansion
var currentLine = this._editor.getModel().getLineContent(startPosition.lineNumber).substr(0, startPosition.column - 1); // content before the replaced range
var match = currentLine.match(/<[/]?$/);
if (match) {
if (strings.startsWith(value, match[0])) {
startPosition = { lineNumber: startPosition.lineNumber, column: startPosition.column - match[0].length };
} else {
return; // ignore
}
}
// If this is the first edit in this "transaction", push an undo stop before them
if (!this._hasMadeEdits) {
this._hasMadeEdits = true;
this._editor.pushUndoStop();
}
let range = new Range(startPosition.lineNumber, startPosition.column, endPosition.lineNumber, endPosition.column);
let codeSnippet = snippets.CodeSnippet.fromEmmet(value);
SnippetController.get(this._editor).runWithReplaceRange(codeSnippet, range);
}
public onAfterEmmetAction(): void {
// If there were any edits in this "transaction", push an undo stop after them
if (this._hasMadeEdits) {
this._editor.pushUndoStop();
}
}
public getContent(): string {
return this._editor.getModel().getValue();
}
public createSelection(startOffset: number, endOffset?: number): void {
let startPosition = this.getPositionFromOffset(startOffset);
let endPosition = null;
if (!endOffset) {
endPosition = startPosition;
} else {
endPosition = this.getPositionFromOffset(endOffset);
}
let range = new Range(startPosition.lineNumber, startPosition.column, endPosition.lineNumber, endPosition.column);
this._editor.setSelection(range);
this._editor.revealRange(range);
}
public getSyntax(): string {
let position = this._editor.getSelection().getStartPosition();
let modeId = this._editor.getModel().getModeIdAtPosition(position.lineNumber, position.column);
let syntax = modeId.split('.').pop();
if (this._excludedLanguages.indexOf(syntax) !== -1) {
return '';
}
// user can overwrite the syntax using the emmet syntaxProfiles setting
let profile = this.getSyntaxProfile(syntax);
if (profile) {
return profile;
}
if (this.emmetSupportedModes.indexOf(syntax) !== -1) {
return syntax;
}
if (/\b(razor|handlebars)\b/.test(syntax)) { // treat like html
return 'html';
}
if (/\b(typescriptreact|javascriptreact)\b/.test(syntax)) { // treat tsx like jsx
return 'jsx';
}
if (syntax === 'sass-indented') { // map sass-indented to sass
return 'sass';
}
syntax = this.checkParentMode(syntax);
return syntax;
}
private getSyntaxProfile(syntax: string): string {
const profile = this._syntaxProfiles[syntax];
if (profile && typeof profile === 'string') {
return profile;
}
}
private checkParentMode(syntax: string): string {
let languageGrammar = this._grammars.getGrammar(syntax);
if (!languageGrammar) {
return syntax;
}
let languages = languageGrammar.split('.');
let thisLanguage = languages[languages.length - 1];
if (syntax !== thisLanguage || languages.length < 2) {
return syntax;
}
for (let i = 1; i < languages.length; i++) {
const language = languages[languages.length - i];
if (this.emmetSupportedModes.indexOf(language) !== -1) {
return language;
}
}
return syntax;
}
public getProfileName(): string {
return null;
}
public prompt(title: string): any {
//
}
public getSelection(): string {
let selection = this._editor.getSelection();
let model = this._editor.getModel();
let start = selection.getStartPosition();
let end = selection.getEndPosition();
let range = new Range(start.lineNumber, start.column, end.lineNumber, end.column);
return model.getValueInRange(range);
}
public getFilePath(): string {
return this._editor.getModel().uri.fsPath;
}
private getPositionFromOffset(offset: number): IPosition {
return this._editor.getModel().getPositionAt(offset);
}
private getOffsetFromPosition(position: IPosition): number {
return this._editor.getModel().getOffsetAt(position);
}
}
| EditorAccessor |
receivers_builder.go | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package builder // import "go.opentelemetry.io/collector/service/internal/builder"
import (
"context"
"errors"
"fmt"
"go.uber.org/multierr"
"go.uber.org/zap"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/service/internal/components"
"go.opentelemetry.io/collector/service/internal/fanoutconsumer"
)
var errUnusedReceiver = errors.New("receiver defined but not used by any pipeline")
// builtReceiver is a receiver that is built based on a config. It can have
// a trace and/or a metrics component.
type builtReceiver struct {
logger *zap.Logger
receiver component.Receiver
}
// Start starts the receiver.
func (rcv *builtReceiver) Start(ctx context.Context, host component.Host) error {
return rcv.receiver.Start(ctx, components.NewHostWrapper(host, rcv.logger))
}
// Shutdown stops the receiver.
func (rcv *builtReceiver) Shutdown(ctx context.Context) error {
return rcv.receiver.Shutdown(ctx)
}
// Receivers is a map of receivers created from receiver configs.
type Receivers map[config.ComponentID]*builtReceiver
// ShutdownAll stops all receivers.
func (rcvs Receivers) ShutdownAll(ctx context.Context) error {
var err error
for _, rcv := range rcvs {
err = multierr.Append(err, rcv.Shutdown(ctx))
}
return err
}
// StartAll starts all receivers.
func (rcvs Receivers) StartAll(ctx context.Context, host component.Host) error {
for _, rcv := range rcvs {
rcv.logger.Info("Receiver is starting...")
if err := rcv.Start(ctx, host); err != nil {
return err
}
rcv.logger.Info("Receiver started.")
}
return nil
}
// receiversBuilder builds receivers from config.
type receiversBuilder struct {
config *config.Config
builtPipelines BuiltPipelines
factories map[config.Type]component.ReceiverFactory
}
// BuildReceivers builds Receivers from config.
func | (
settings component.TelemetrySettings,
buildInfo component.BuildInfo,
cfg *config.Config,
builtPipelines BuiltPipelines,
factories map[config.Type]component.ReceiverFactory,
) (Receivers, error) {
rb := &receiversBuilder{cfg, builtPipelines, factories}
receivers := make(Receivers)
for recvID, recvCfg := range cfg.Receivers {
set := component.ReceiverCreateSettings{
TelemetrySettings: component.TelemetrySettings{
Logger: settings.Logger.With(
zap.String(components.ZapKindKey, components.ZapKindReceiver),
zap.String(components.ZapNameKey, recvID.String())),
TracerProvider: settings.TracerProvider,
MeterProvider: settings.MeterProvider,
MetricsLevel: cfg.Telemetry.Metrics.Level,
},
BuildInfo: buildInfo,
}
rcv, err := rb.buildReceiver(context.Background(), set, recvID, recvCfg)
if err != nil {
if errors.Is(err, errUnusedReceiver) {
set.Logger.Info("Ignoring receiver as it is not used by any pipeline")
continue
}
return nil, err
}
receivers[recvID] = rcv
}
return receivers, nil
}
// hasReceiver returns true if the pipeline is attached to specified receiver.
func hasReceiver(pipeline *config.Pipeline, receiverID config.ComponentID) bool {
for _, id := range pipeline.Receivers {
if id == receiverID {
return true
}
}
return false
}
type attachedPipelines map[config.DataType][]*builtPipeline
func (rb *receiversBuilder) findPipelinesToAttach(receiverID config.ComponentID) (attachedPipelines, error) {
// A receiver may be attached to multiple pipelines. Pipelines may consume different
// data types. We need to compile the list of pipelines of each type that must be
// attached to this receiver according to configuration.
pipelinesToAttach := make(attachedPipelines)
// Iterate over all pipelines.
for pipelineID, pipelineCfg := range rb.config.Service.Pipelines {
// Get the first processor of the pipeline.
pipelineProcessor := rb.builtPipelines[pipelineID]
if pipelineProcessor == nil {
return nil, fmt.Errorf("cannot find pipeline %q", pipelineID)
}
// Is this receiver attached to the pipeline?
if hasReceiver(pipelineCfg, receiverID) {
if _, exists := pipelinesToAttach[pipelineID.Type()]; !exists {
pipelinesToAttach[pipelineID.Type()] = make([]*builtPipeline, 0)
}
// Yes, add it to the list of pipelines of corresponding data type.
pipelinesToAttach[pipelineID.Type()] = append(pipelinesToAttach[pipelineID.Type()], pipelineProcessor)
}
}
return pipelinesToAttach, nil
}
func attachReceiverToPipelines(
ctx context.Context,
set component.ReceiverCreateSettings,
factory component.ReceiverFactory,
dataType config.DataType,
id config.ComponentID,
cfg config.Receiver,
rcv *builtReceiver,
builtPipelines []*builtPipeline,
) error {
// There are pipelines of the specified data type that must be attached to
// the receiver. Create the receiver of corresponding data type and make
// sure its output is fanned out to all attached pipelines.
var err error
var createdReceiver component.Receiver
switch dataType {
case config.TracesDataType:
junction := buildFanoutTraceConsumer(builtPipelines)
createdReceiver, err = factory.CreateTracesReceiver(ctx, set, cfg, junction)
case config.MetricsDataType:
junction := buildFanoutMetricConsumer(builtPipelines)
createdReceiver, err = factory.CreateMetricsReceiver(ctx, set, cfg, junction)
case config.LogsDataType:
junction := buildFanoutLogConsumer(builtPipelines)
createdReceiver, err = factory.CreateLogsReceiver(ctx, set, cfg, junction)
default:
err = component.ErrDataTypeIsNotSupported
}
if err != nil {
if errors.Is(err, component.ErrDataTypeIsNotSupported) {
return fmt.Errorf(
"receiver %v does not support %s but it was used in a %s pipeline",
id, dataType, dataType)
}
return fmt.Errorf("cannot create receiver %v: %w", id, err)
}
// Check if the factory really created the receiver.
if createdReceiver == nil {
return fmt.Errorf("factory for %v produced a nil receiver", id)
}
if rcv.receiver != nil {
// The receiver was previously created for this config. This can happen if the
// same receiver type supports more than one data type. In that case we expect
// that CreateTracesReceiver and CreateMetricsReceiver return the same value.
if rcv.receiver != createdReceiver {
return fmt.Errorf(
"factory for %q is implemented incorrectly: "+
"CreateTracesReceiver, CreateMetricsReceiver and CreateLogsReceiver must return "+
"the same receiver pointer when creating receivers of different data types",
id,
)
}
}
rcv.receiver = createdReceiver
set.Logger.Info("Receiver was built.", zap.String("datatype", string(dataType)))
return nil
}
func (rb *receiversBuilder) buildReceiver(ctx context.Context, set component.ReceiverCreateSettings, id config.ComponentID, cfg config.Receiver) (*builtReceiver, error) {
// First find pipelines that must be attached to this receiver.
pipelinesToAttach, err := rb.findPipelinesToAttach(id)
if err != nil {
return nil, err
}
// Prepare to build the receiver.
factory := rb.factories[id.Type()]
if factory == nil {
return nil, fmt.Errorf("receiver factory not found for: %v", cfg.ID())
}
rcv := &builtReceiver{
logger: set.Logger,
}
// Now we have list of pipelines broken down by data type. Iterate for each data type.
for dataType, pipelines := range pipelinesToAttach {
if len(pipelines) == 0 {
// No pipelines of this data type are attached to this receiver.
continue
}
// Attach the corresponding part of the receiver to all pipelines that require
// this data type.
if err = attachReceiverToPipelines(ctx, set, factory, dataType, id, cfg, rcv, pipelines); err != nil {
return nil, err
}
}
if rcv.receiver == nil {
return nil, errUnusedReceiver
}
return rcv, nil
}
func buildFanoutTraceConsumer(pipelines []*builtPipeline) consumer.Traces {
var pipelineConsumers []consumer.Traces
for _, pipeline := range pipelines {
pipelineConsumers = append(pipelineConsumers, pipeline.firstTC)
}
// Create a junction point that fans out to all pipelines.
return fanoutconsumer.NewTraces(pipelineConsumers)
}
func buildFanoutMetricConsumer(pipelines []*builtPipeline) consumer.Metrics {
var pipelineConsumers []consumer.Metrics
for _, pipeline := range pipelines {
pipelineConsumers = append(pipelineConsumers, pipeline.firstMC)
}
// Create a junction point that fans out to all pipelines.
return fanoutconsumer.NewMetrics(pipelineConsumers)
}
func buildFanoutLogConsumer(pipelines []*builtPipeline) consumer.Logs {
var pipelineConsumers []consumer.Logs
for _, pipeline := range pipelines {
pipelineConsumers = append(pipelineConsumers, pipeline.firstLC)
}
// Create a junction point that fans out to all pipelines.
return fanoutconsumer.NewLogs(pipelineConsumers)
}
| BuildReceivers |
galileo.ts | /*
* Oozaru JavaScript game engine
* Copyright (c) 2015-2020, Fat Cerberus
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of miniSphere nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
**/
import BufferStream from './buffer-stream.js';
import * as util from './utility.js';
interface Glyph
{
pixelData: Uint8Array;
width: number;
height: number;
u: number;
v: number;
}
let activeDrawTarget: DrawTarget | null = null;
let activeShader: Shader | null = null;
let gl: WebGLRenderingContext;
let screen: HTMLCanvasElement;
export
enum BlendOp
{
Default,
Add,
Average,
CopyAlpha,
CopyRGB,
Invert,
Multiply,
Replace,
Subtract,
}
export
enum DepthOp
{
AlwaysPass,
Equal,
Greater,
GreaterOrEqual,
Less,
LessOrEqual,
NeverPass,
NotEqual,
}
export
interface RGBA
{
r: number;
g: number;
b: number;
a: number;
}
export
interface Rectangle
{
x: number;
y: number;
w: number;
h: number;
}
export
enum ShapeType
{
Fan,
Lines,
LineLoop,
LineStrip,
Points,
Triangles,
TriStrip,
}
export
interface Vertex
{
x: number;
y: number;
z?: number;
u?: number;
v?: number;
color?: RGBA;
}
export default
class Galileo extends null
{
static async initialize(canvas: HTMLCanvasElement)
{
const glOptions = { alpha: false, preserveDrawingBuffer: true };
const webGL = (canvas.getContext('webgl2', glOptions)
|| canvas.getContext('webgl', glOptions)
|| canvas.getContext('experimental-webgl', glOptions)) as WebGLRenderingContext | null;
if (webGL === null)
throw new Error(`Unable to acquire WebGL rendering context`);
webGL.clearColor(0.0, 0.0, 0.0, 1.0);
webGL.clearDepth(1.0);
webGL.blendEquation(webGL.FUNC_ADD);
webGL.blendFunc(webGL.SRC_ALPHA, webGL.ONE_MINUS_SRC_ALPHA);
webGL.depthFunc(webGL.LEQUAL);
webGL.enable(webGL.BLEND);
webGL.enable(webGL.DEPTH_TEST);
webGL.enable(webGL.SCISSOR_TEST);
gl = webGL;
screen = canvas;
DrawTarget.Screen.activate();
}
}
export
class Color
{
r: number;
g: number;
b: number;
a: number;
constructor(r: number, g: number, b: number, a = 1.0)
{
this.r = r;
this.g = g;
this.b = b;
this.a = a;
}
}
export
class DrawTarget
{
private blendOp_ = BlendOp.Default;
private clipping: Rectangle;
private depthOp_ = DepthOp.LessOrEqual;
private frameBuffer: WebGLFramebuffer | null;
private texture: Texture | null;
static get Screen()
{
const surface = Object.create(DrawTarget.prototype) as DrawTarget;
surface.blendOp_ = BlendOp.Default;
surface.clipping = { x: 0, y: 0, w: screen.width, h: screen.height };
surface.depthOp_ = DepthOp.LessOrEqual;
surface.frameBuffer = null;
surface.texture = null;
Object.defineProperty(DrawTarget, 'Screen', {
writable: false,
enumerable: false,
configurable: true,
value: surface,
});
return surface;
}
constructor(texture: Texture)
{
const frameBuffer = gl.createFramebuffer();
const depthBuffer = gl.createRenderbuffer();
if (frameBuffer === null || depthBuffer === null)
throw new Error(`Unable to create WebGL framebuffer object`);
// in order to set up the FBO we need to change the current FB binding, so make sure it gets
// changed back afterwards.
const previousFBO = gl.getParameter(gl.FRAMEBUFFER_BINDING);
gl.bindFramebuffer(gl.FRAMEBUFFER, frameBuffer);
gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture.hwTexture, 0);
gl.bindRenderbuffer(gl.RENDERBUFFER, depthBuffer);
gl.renderbufferStorage(gl.RENDERBUFFER, gl.DEPTH_COMPONENT16, texture.width, texture.height);
gl.framebufferRenderbuffer(gl.FRAMEBUFFER, gl.DEPTH_ATTACHMENT, gl.RENDERBUFFER, depthBuffer);
gl.bindFramebuffer(gl.FRAMEBUFFER, previousFBO);
this.clipping = { x: 0, y: 0, w: texture.width, h: texture.height };
this.frameBuffer = frameBuffer;
this.texture = texture;
}
get blendOp()
{
return this.blendOp_;
}
set blendOp(value)
{
this.blendOp_ = value;
if (activeDrawTarget === this)
applyBlendOp(value);
}
get depthOp()
{
return this.depthOp_;
}
set depthOp(value)
{
this.depthOp_ = value;
if (activeDrawTarget === this)
applyDepthOp(value);
}
get height()
{
return this.texture?.height ?? screen.height;
}
get width()
{
return this.texture?.width ?? screen.width;
}
activate()
{
if (activeDrawTarget === this)
return;
gl.bindFramebuffer(gl.FRAMEBUFFER, this.frameBuffer);
if (this.texture !== null)
gl.viewport(0, 0, this.texture.width, this.texture.height);
else
gl.viewport(0, 0, screen.width, screen.height);
gl.scissor(this.clipping.x, this.clipping.y, this.clipping.w, this.clipping.h);
applyBlendOp(this.blendOp_);
applyDepthOp(this.depthOp_);
activeDrawTarget = this;
}
clipTo(x: number, y: number, width: number, height: number)
{
this.clipping.x = x;
this.clipping.y = y;
this.clipping.w = width;
this.clipping.h = height;
if (this === activeDrawTarget)
gl.scissor(x, this.height - y - height, width, height);
}
unclip()
{
this.clipTo(0, 0, this.width, this.height);
}
}
export
class |
{
private atlas: Texture;
private glyphs: Glyph[] = [];
private lineHeight = 0;
private maxWidth = 0;
private numGlyphs = 0;
private stride: number;
private vertexBuffer = new VertexBuffer();
static async fromFile(url: string)
{
const data = await util.fetchRawFile(url);
return new this(data);
}
constructor(rfnData: BufferSource)
{
let stream = new BufferStream(rfnData);
let rfn = stream.readStruct({
signature: 'string/4',
version: 'uint16-le',
numGlyphs: 'uint16-le',
reserved: 'reserve/248',
});
if (rfn.signature !== '.rfn')
throw new Error(`Unable to load RFN font file`);
if (rfn.version < 2 || rfn.version > 2)
throw new Error(`Unsupported RFN version '${rfn.version}'`)
if (rfn.numGlyphs <= 0)
throw new Error(`Malformed RFN font (no glyphs)`);
const numAcross = Math.ceil(Math.sqrt(rfn.numGlyphs));
this.stride = 1.0 / numAcross;
for (let i = 0; i < rfn.numGlyphs; ++i) {
let charInfo = stream.readStruct({
width: 'uint16-le',
height: 'uint16-le',
reserved: 'reserve/28',
});
this.lineHeight = Math.max(this.lineHeight, charInfo.height);
this.maxWidth = Math.max(this.maxWidth, charInfo.width);
const pixelData = stream.readBytes(charInfo.width * charInfo.height * 4);
this.glyphs.push({
width: charInfo.width,
height: charInfo.height,
u: i % numAcross / numAcross,
v: 1.0 - Math.floor(i / numAcross) / numAcross,
pixelData,
});
}
this.atlas = new Texture(numAcross * this.maxWidth, numAcross * this.lineHeight);
this.numGlyphs = rfn.numGlyphs;
for (let i = 0; i < this.numGlyphs; ++i) {
const glyph = this.glyphs[i];
const x = i % numAcross * this.maxWidth;
const y = Math.floor(i / numAcross) * this.lineHeight;
this.atlas.upload(glyph.pixelData, x, y, glyph.width, glyph.height);
}
}
get height()
{
return this.lineHeight;
}
drawText(text: string, color: RGBA, matrix: Matrix)
{
if (text === "")
return; // empty string, nothing to render
if (activeShader !== null) {
activeShader.activate(true);
activeShader.transform(matrix);
}
this.atlas.activate(0);
let cp: number | undefined;
let ptr = 0;
let x = 0;
const vertices: Vertex[] = [];
while ((cp = text.codePointAt(ptr++)) !== undefined) {
if (cp > 0xFFFF) // surrogate pair?
++ptr;
cp = cp == 0x20AC ? 128
: cp == 0x201A ? 130
: cp == 0x0192 ? 131
: cp == 0x201E ? 132
: cp == 0x2026 ? 133
: cp == 0x2020 ? 134
: cp == 0x2021 ? 135
: cp == 0x02C6 ? 136
: cp == 0x2030 ? 137
: cp == 0x0160 ? 138
: cp == 0x2039 ? 139
: cp == 0x0152 ? 140
: cp == 0x017D ? 142
: cp == 0x2018 ? 145
: cp == 0x2019 ? 146
: cp == 0x201C ? 147
: cp == 0x201D ? 148
: cp == 0x2022 ? 149
: cp == 0x2013 ? 150
: cp == 0x2014 ? 151
: cp == 0x02DC ? 152
: cp == 0x2122 ? 153
: cp == 0x0161 ? 154
: cp == 0x203A ? 155
: cp == 0x0153 ? 156
: cp == 0x017E ? 158
: cp == 0x0178 ? 159
: cp;
if (cp >= this.numGlyphs)
cp = 0x1A;
const glyph = this.glyphs[cp];
const x1 = x, x2 = x1 + glyph.width;
const y1 = 0, y2 = y1 + glyph.height;
const u1 = glyph.u;
const u2 = u1 + glyph.width / this.maxWidth * this.stride;
const v1 = glyph.v;
const v2 = v1 - glyph.height / this.lineHeight * this.stride;
vertices.push(
{ x: x1, y: y1, u: u1, v: v1, color },
{ x: x2, y: y1, u: u2, v: v1, color },
{ x: x1, y: y2, u: u1, v: v2, color },
{ x: x2, y: y1, u: u2, v: v1, color },
{ x: x1, y: y2, u: u1, v: v2, color },
{ x: x2, y: y2, u: u2, v: v2, color },
);
x += glyph.width;
}
this.vertexBuffer.upload(vertices);
Prim.draw(this.vertexBuffer, null, ShapeType.Triangles);
}
widthOf(text: string)
{
let cp: number | undefined;
let ptr = 0;
let width = 0;
while ((cp = text.codePointAt(ptr++)) !== undefined) {
if (cp > 0xFFFF) // surrogate pair?
++ptr;
cp = cp == 0x20AC ? 128
: cp == 0x201A ? 130
: cp == 0x0192 ? 131
: cp == 0x201E ? 132
: cp == 0x2026 ? 133
: cp == 0x2020 ? 134
: cp == 0x2021 ? 135
: cp == 0x02C6 ? 136
: cp == 0x2030 ? 137
: cp == 0x0160 ? 138
: cp == 0x2039 ? 139
: cp == 0x0152 ? 140
: cp == 0x017D ? 142
: cp == 0x2018 ? 145
: cp == 0x2019 ? 146
: cp == 0x201C ? 147
: cp == 0x201D ? 148
: cp == 0x2022 ? 149
: cp == 0x2013 ? 150
: cp == 0x2014 ? 151
: cp == 0x02DC ? 152
: cp == 0x2122 ? 153
: cp == 0x0161 ? 154
: cp == 0x203A ? 155
: cp == 0x0153 ? 156
: cp == 0x017E ? 158
: cp == 0x0178 ? 159
: cp;
if (cp >= this.numGlyphs)
cp = 0x1A;
const glyph = this.glyphs[cp];
width += glyph.width;
}
return width;
}
wordWrap(text: string, wrapWidth: number)
{
const lines: string[] = [];
let codepoints: number[] = [];
let currentLine = "";
let lineWidth = 0;
let lineFinished = false;
let wordWidth = 0;
let wordFinished = false;
let cp: number | undefined;
let ptr = 0;
while ((cp = text.codePointAt(ptr++)) !== undefined) {
if (cp > 0xFFFF) // surrogate pair?
++ptr;
cp = cp == 0x20AC ? 128
: cp == 0x201A ? 130
: cp == 0x0192 ? 131
: cp == 0x201E ? 132
: cp == 0x2026 ? 133
: cp == 0x2020 ? 134
: cp == 0x2021 ? 135
: cp == 0x02C6 ? 136
: cp == 0x2030 ? 137
: cp == 0x0160 ? 138
: cp == 0x2039 ? 139
: cp == 0x0152 ? 140
: cp == 0x017D ? 142
: cp == 0x2018 ? 145
: cp == 0x2019 ? 146
: cp == 0x201C ? 147
: cp == 0x201D ? 148
: cp == 0x2022 ? 149
: cp == 0x2013 ? 150
: cp == 0x2014 ? 151
: cp == 0x02DC ? 152
: cp == 0x2122 ? 153
: cp == 0x0161 ? 154
: cp == 0x203A ? 155
: cp == 0x0153 ? 156
: cp == 0x017E ? 158
: cp == 0x0178 ? 159
: cp;
if (cp >= this.numGlyphs)
cp = 0x1A;
const glyph = this.glyphs[cp];
switch (cp) {
case 13: case 10: // newline
if (cp === 13 && text.codePointAt(ptr) == 10)
++ptr; // treat CRLF as a single newline
lineFinished = true;
break;
case 8: // tab
codepoints.push(cp);
wordWidth += this.glyphs[32].width * 3;
wordFinished = true;
break;
case 32: // space
codepoints.push(cp);
wordWidth += glyph.width;
wordFinished = true;
break;
default:
codepoints.push(cp);
wordWidth += glyph.width;
break;
}
if (wordFinished || lineFinished) {
currentLine += String.fromCodePoint(...codepoints);
lineWidth += wordWidth;
codepoints.length = 0;
wordWidth = 0;
wordFinished = false;
}
if (lineWidth + wordWidth > wrapWidth || lineFinished) {
lines.push(currentLine);
currentLine = "";
lineWidth = 0;
lineFinished = false;
}
}
currentLine += String.fromCodePoint(...codepoints);
if (currentLine !== "")
lines.push(currentLine);
return lines;
}
}
export
class IndexBuffer
{
hwBuffer: WebGLBuffer | null = null;
length: number = 0;
streamable: boolean;
constructor(indices?: Iterable<number>)
{
this.streamable = indices === undefined;
if (indices !== undefined)
this.upload(indices);
}
activate()
{
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, this.hwBuffer);
}
upload(indices: Iterable<number>)
{
const values = new Uint16Array(indices);
const hwBuffer = gl.createBuffer();
if (hwBuffer === null)
throw new Error(`Unable to create WebGL index buffer object`);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, hwBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, values, this.streamable ? gl.STREAM_DRAW : gl.STATIC_DRAW);
gl.deleteBuffer(this.hwBuffer);
this.hwBuffer = hwBuffer;
this.length = values.length;
}
}
export
class Matrix
{
values: Float32Array;
static get Identity()
{
return new this().identity();
}
constructor(values?: ArrayLike<number>)
{
if (values !== undefined)
this.values = new Float32Array(values);
else
this.values = new Float32Array(4 * 4);
}
clone()
{
const dolly = new Matrix();
dolly.values.set(this.values);
return dolly;
}
composeWith(other: Matrix)
{
const m1 = this.values;
const m2 = other.values;
// multiply from the left (i.e. `other * this`). this emulates the way Allegro's
// `al_compose_transform()` function works--that is, transformations are logically applied in
// the order they're specified, rather than reversed as in classic OpenGL.
const a00 = m2[0], a01 = m2[1], a02 = m2[2], a03 = m2[3];
const a10 = m2[4], a11 = m2[5], a12 = m2[6], a13 = m2[7];
const a20 = m2[8], a21 = m2[9], a22 = m2[10], a23 = m2[11];
const a30 = m2[12], a31 = m2[13], a32 = m2[14], a33 = m2[15];
const b00 = m1[0], b01 = m1[1], b02 = m1[2], b03 = m1[3];
const b10 = m1[4], b11 = m1[5], b12 = m1[6], b13 = m1[7];
const b20 = m1[8], b21 = m1[9], b22 = m1[10], b23 = m1[11];
const b30 = m1[12], b31 = m1[13], b32 = m1[14], b33 = m1[15];
// multiply the matrices together. funny story: I still don't understand how this
// works. but it does, so...
m1[0] = b00 * a00 + b01 * a10 + b02 * a20 + b03 * a30;
m1[1] = b00 * a01 + b01 * a11 + b02 * a21 + b03 * a31;
m1[2] = b00 * a02 + b01 * a12 + b02 * a22 + b03 * a32;
m1[3] = b00 * a03 + b01 * a13 + b02 * a23 + b03 * a33;
m1[4] = b10 * a00 + b11 * a10 + b12 * a20 + b13 * a30;
m1[5] = b10 * a01 + b11 * a11 + b12 * a21 + b13 * a31;
m1[6] = b10 * a02 + b11 * a12 + b12 * a22 + b13 * a32;
m1[7] = b10 * a03 + b11 * a13 + b12 * a23 + b13 * a33;
m1[8] = b20 * a00 + b21 * a10 + b22 * a20 + b23 * a30;
m1[9] = b20 * a01 + b21 * a11 + b22 * a21 + b23 * a31;
m1[10] = b20 * a02 + b21 * a12 + b22 * a22 + b23 * a32;
m1[11] = b20 * a03 + b21 * a13 + b22 * a23 + b23 * a33;
m1[12] = b30 * a00 + b31 * a10 + b32 * a20 + b33 * a30;
m1[13] = b30 * a01 + b31 * a11 + b32 * a21 + b33 * a31;
m1[14] = b30 * a02 + b31 * a12 + b32 * a22 + b33 * a32;
m1[15] = b30 * a03 + b31 * a13 + b32 * a23 + b33 * a33;
return this;
}
identity()
{
this.values.set([
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0,
]);
return this;
}
ortho(left: number, top: number, right: number, bottom: number, near = -1.0, far = 1.0)
{
const deltaX = right - left;
const deltaY = top - bottom;
const deltaZ = far - near;
const projection = new Matrix();
const values = projection.values;
values[0] = 2.0 / deltaX;
values[5] = 2.0 / deltaY;
values[10] = 2.0 / deltaZ;
values[15] = 1.0;
values[12] = -(right + left) / deltaX;
values[13] = -(top + bottom) / deltaY;
values[14] = -(far + near) / deltaZ;
return this.composeWith(projection);
}
perspective(left: number, top: number, right: number, bottom: number, near: number, far: number)
{
const deltaX = right - left;
const deltaY = top - bottom;
const deltaZ = far - near;
const projection = new Matrix();
const values = projection.values;
values[0] = 2.0 * near / deltaX;
values[5] = 2.0 * near / deltaY;
values[8] = (right + left) / deltaX;
values[9] = (top + bottom) / deltaY;
values[10] = -(far + near) / deltaZ;
values[11] = -1.0;
values[14] = -2.0 * far * near / deltaZ;
values[15] = 0.0;
return this.composeWith(projection);
}
rotate(theta: number, vX: number, vY: number, vZ: number)
{
const cos = Math.cos(theta);
const sin = Math.sin(theta);
const siv = 1.0 - cos;
const rotation = new Matrix();
const values = rotation.values;
values[0] = (siv * vX * vX) + cos;
values[1] = (siv * vX * vY) + (vZ * sin);
values[2] = (siv * vX * vZ) - (vY * sin);
values[4] = (siv * vX * vY) - (vZ * sin);
values[5] = (siv * vY * vY) + cos;
values[6] = (siv * vZ * vY) + (vX * sin);
values[8] = (siv * vX * vZ) + (vY * sin);
values[9] = (siv * vY * vZ) - (vX * sin);
values[10] = (siv * vZ * vZ) + cos;
values[15] = 1.0;
return this.composeWith(rotation);
}
scale(sX: number, sY: number, sZ = 1.0)
{
this.values[0] *= sX;
this.values[4] *= sX;
this.values[8] *= sX;
this.values[12] *= sX;
this.values[1] *= sY;
this.values[5] *= sY;
this.values[9] *= sY;
this.values[13] *= sY;
this.values[2] *= sZ;
this.values[6] *= sZ;
this.values[10] *= sZ;
this.values[14] *= sZ;
return this;
}
translate(tX: number, tY: number, tZ = 0.0)
{
this.values[12] += tX;
this.values[13] += tY;
this.values[14] += tZ;
return this;
}
}
export
class Prim extends null
{
static clear()
{
gl.disable(gl.SCISSOR_TEST);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
gl.enable(gl.SCISSOR_TEST);
}
static draw(vertexBuffer: VertexBuffer, indexBuffer: IndexBuffer | null, type: ShapeType, offset = 0, numVertices?: number)
{
const drawMode = type === ShapeType.Fan ? gl.TRIANGLE_FAN
: type === ShapeType.Lines ? gl.LINES
: type === ShapeType.LineLoop ? gl.LINE_LOOP
: type === ShapeType.LineStrip ? gl.LINE_STRIP
: type === ShapeType.Points ? gl.POINTS
: type === ShapeType.TriStrip ? gl.TRIANGLE_STRIP
: gl.TRIANGLES;
vertexBuffer.activate();
if (indexBuffer !== null) {
if (numVertices === undefined)
numVertices = indexBuffer.length - offset;
indexBuffer.activate();
gl.drawElements(drawMode, numVertices, gl.UNSIGNED_SHORT, offset);
}
else {
if (numVertices === undefined)
numVertices = vertexBuffer.length - offset;
gl.drawArrays(drawMode, offset, numVertices);
}
}
static rerez(width: number, height: number)
{
screen.width = width;
screen.height = height;
if (width <= 400 && height <= 300) {
screen.style.width = `${width * 2}px`;
screen.style.height = `${height * 2}px`;
}
else {
screen.style.width = `${width}px`;
screen.style.height = `${height}px`;
}
if (activeDrawTarget === DrawTarget.Screen)
gl.viewport(0, 0, screen.width, screen.height);
}
}
export
class Shader
{
program: WebGLProgram;
deferredValues: { [x: string]: { type: string, value: any } } = {};
modelView: Matrix;
projection: Matrix;
uniformIDs: { [x: string]: WebGLUniformLocation | null } = {};
constructor(vertexSource: string, fragmentSource: string)
{
const program = gl.createProgram();
const vertShader = gl.createShader(gl.VERTEX_SHADER);
const fragShader = gl.createShader(gl.FRAGMENT_SHADER);
if (program === null || vertShader === null || fragShader === null)
throw new Error(`Unable to create WebGL shader program object`);
// compile vertex and fragment shaders and check for errors
gl.shaderSource(vertShader, vertexSource);
gl.shaderSource(fragShader, fragmentSource);
gl.compileShader(vertShader);
if (!gl.getShaderParameter(vertShader, gl.COMPILE_STATUS)) {
const message = gl.getShaderInfoLog(vertShader);
throw new Error(`Couldn't compile vertex shader...\n${message}`);
}
gl.compileShader(fragShader);
if (!gl.getShaderParameter(fragShader, gl.COMPILE_STATUS)) {
const message = gl.getShaderInfoLog(fragShader);
throw new Error(`Couldn't compile fragment shader...\n${message}`);
}
// link the individual shaders into a program, check for errors
gl.attachShader(program, vertShader);
gl.attachShader(program, fragShader);
gl.bindAttribLocation(program, 0, 'al_pos');
gl.bindAttribLocation(program, 1, 'al_color');
gl.bindAttribLocation(program, 2, 'al_texcoord');
gl.linkProgram(program);
if (!gl.getProgramParameter(program, gl.LINK_STATUS)) {
const message = gl.getProgramInfoLog(program);
throw new Error(`Couldn't link shader program...\n${message}`);
}
this.program = program;
this.projection = Matrix.Identity;
this.modelView = Matrix.Identity;
let transformation = this.modelView.clone()
.composeWith(this.projection);
this.setMatrixValue('al_projview_matrix', transformation);
this.setIntValue('al_tex', 0);
}
activate(useTexture: boolean)
{
if (activeShader !== this) {
gl.useProgram(this.program);
for (const name of Object.keys(this.deferredValues)) {
const entry = this.deferredValues[name];
const slot = this.uniformIDs[name];
let size: number;
switch (entry.type) {
case 'bool':
gl.uniform1i(slot, entry.value ? 1 : 0);
break;
case 'float':
gl.uniform1f(slot, entry.value);
break;
case 'floatArray':
gl.uniform1fv(slot, entry.value);
break;
case 'floatVec':
size = entry.value.length;
size === 4 ? gl.uniform4fv(slot, entry.value)
: size === 3 ? gl.uniform3fv(slot, entry.value)
: size === 2 ? gl.uniform2fv(slot, entry.value)
: gl.uniform1fv(slot, entry.value);
break;
case 'int':
gl.uniform1i(slot, entry.value);
break;
case 'intArray':
gl.uniform1iv(slot, entry.value);
break;
case 'intVec':
size = entry.value.length;
size === 4 ? gl.uniform4iv(slot, entry.value)
: size === 3 ? gl.uniform3iv(slot, entry.value)
: size === 2 ? gl.uniform2iv(slot, entry.value)
: gl.uniform1iv(slot, entry.value);
break;
case 'matrix':
gl.uniformMatrix4fv(slot, false, entry.value.values);
break;
}
}
this.deferredValues = {};
activeShader = this;
}
this.setBoolValue('al_use_tex', useTexture);
}
project(matrix: Matrix)
{
this.projection = matrix.clone();
let transformation = this.modelView.clone()
.composeWith(this.projection);
this.setMatrixValue('al_projview_matrix', transformation);
}
setBoolValue(name: string, value: boolean)
{
let location = this.uniformIDs[name];
if (location === undefined) {
location = gl.getUniformLocation(this.program, name);
this.uniformIDs[name] = location;
}
if (activeShader === this)
gl.uniform1i(location, value ? 1 : 0);
else
this.deferredValues[name] = { type: 'bool', value };
}
setFloatArray(name: string, values: number[])
{
let location = this.uniformIDs[name];
if (location === undefined) {
location = gl.getUniformLocation(this.program, name);
this.uniformIDs[name] = location;
}
if (activeShader === this)
gl.uniform1fv(location, values);
else
this.deferredValues[name] = { type: 'floatArray', value: values };
}
setFloatValue(name: string, value: number)
{
let location = this.uniformIDs[name];
if (location === undefined) {
location = gl.getUniformLocation(this.program, name);
this.uniformIDs[name] = location;
}
if (activeShader === this)
gl.uniform1f(location, value);
else
this.deferredValues[name] = { type: 'float', value };
}
setFloatVec(name: string, values: number[])
{
let location = this.uniformIDs[name];
if (location === undefined) {
location = gl.getUniformLocation(this.program, name);
this.uniformIDs[name] = location;
}
if (activeShader === this) {
const size = values.length;
size === 4 ? gl.uniform4fv(location, values)
: size === 3 ? gl.uniform3fv(location, values)
: size === 2 ? gl.uniform2fv(location, values)
: gl.uniform1fv(location, values);
}
else {
this.deferredValues[name] = { type: 'floatVec', value: values };
}
}
setIntArray(name: string, values: number[])
{
let location = this.uniformIDs[name];
if (location === undefined) {
location = gl.getUniformLocation(this.program, name);
this.uniformIDs[name] = location;
}
if (activeShader === this)
gl.uniform1iv(location, values);
else
this.deferredValues[name] = { type: 'intArray', value: values };
}
setIntValue(name: string, value: number)
{
let location = this.uniformIDs[name];
if (location === undefined) {
location = gl.getUniformLocation(this.program, name);
this.uniformIDs[name] = location;
}
if (activeShader === this)
gl.uniform1i(location, value);
else
this.deferredValues[name] = { type: 'int', value };
}
setIntVec(name: string, values: number[])
{
let location = this.uniformIDs[name];
if (location === undefined) {
location = gl.getUniformLocation(this.program, name);
this.uniformIDs[name] = location;
}
if (activeShader === this) {
const size = values.length;
size === 4 ? gl.uniform4iv(location, values)
: size === 3 ? gl.uniform3iv(location, values)
: size === 2 ? gl.uniform2iv(location, values)
: gl.uniform1iv(location, values);
}
else {
this.deferredValues[name] = { type: 'intVec', value: values };
}
}
setMatrixValue(name: string, value: Matrix)
{
let location = this.uniformIDs[name];
if (location === undefined) {
location = gl.getUniformLocation(this.program, name);
this.uniformIDs[name] = location;
}
if (activeShader === this)
gl.uniformMatrix4fv(location, false, value.values);
else
this.deferredValues[name] = { type: 'matrix', value };
}
transform(matrix: Matrix)
{
this.modelView = matrix.clone();
let transformation = this.modelView.clone()
.composeWith(this.projection);
this.setMatrixValue('al_projview_matrix', transformation);
}
}
export
class Shape
{
type: ShapeType;
vertices: VertexBuffer;
indices: IndexBuffer | null;
constructor(vertexBuffer: VertexBuffer, indexBuffer: IndexBuffer | null, type: ShapeType)
{
this.type = type;
this.vertices = vertexBuffer;
this.indices = indexBuffer;
}
draw()
{
Prim.draw(this.vertices, this.indices, this.type);
}
}
export
class Texture
{
hwTexture: WebGLTexture;
width: number;
height: number;
constructor(image: HTMLImageElement);
constructor(width: number, height: number, content?: BufferSource | RGBA);
constructor(arg1: HTMLImageElement | number, arg2?: number, arg3?: BufferSource | RGBA)
{
const hwTexture = gl.createTexture();
if (hwTexture === null)
throw new Error(`Unable to create WebGL texture object`);
this.hwTexture = hwTexture;
const oldBinding = gl.getParameter(gl.TEXTURE_BINDING_2D);
gl.bindTexture(gl.TEXTURE_2D, this.hwTexture);
gl.pixelStorei(gl.UNPACK_FLIP_Y_WEBGL, true);
if (arg1 instanceof HTMLImageElement) {
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, arg1);
this.width = arg1.width;
this.height = arg1.height;
}
else {
this.width = arg1;
this.height = arg2 as number;
if (arg3 instanceof ArrayBuffer || ArrayBuffer.isView(arg3)) {
const buffer = arg3 instanceof ArrayBuffer ? arg3 : arg3.buffer;
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, this.width, this.height, 0, gl.RGBA, gl.UNSIGNED_BYTE,
new Uint8Array(buffer));
}
else {
let pixels = new Uint32Array(this.width * this.height);
if (arg3 !== undefined)
pixels.fill((arg3.a * 255 << 24) + (arg3.b * 255 << 16) + (arg3.g * 255 << 8) + (arg3.r * 255));
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, this.width, this.height, 0, gl.RGBA, gl.UNSIGNED_BYTE,
new Uint8Array(pixels.buffer));
}
}
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.bindTexture(gl.TEXTURE_2D, oldBinding);
}
activate(textureUnit = 0)
{
gl.activeTexture(gl.TEXTURE0 + textureUnit);
gl.bindTexture(gl.TEXTURE_2D, this.hwTexture);
}
upload(content: BufferSource, x = 0, y = 0, width = this.width, height = this.height)
{
const pixelData = ArrayBuffer.isView(content)
? new Uint8Array(content.buffer)
: new Uint8Array(content);
gl.bindTexture(gl.TEXTURE_2D, this.hwTexture);
gl.texSubImage2D(gl.TEXTURE_2D, 0, x, this.height - y - height, width, height, gl.RGBA, gl.UNSIGNED_BYTE, pixelData);
}
}
export
class VertexBuffer
{
hwBuffer: WebGLBuffer | null = null;
length: number = 0;
streamable: boolean;
constructor(vertices?: ArrayLike<Vertex>)
{
this.streamable = vertices === undefined;
if (vertices !== undefined)
this.upload(vertices);
}
activate()
{
gl.bindBuffer(gl.ARRAY_BUFFER, this.hwBuffer);
gl.enableVertexAttribArray(0);
gl.enableVertexAttribArray(1);
gl.enableVertexAttribArray(2);
gl.vertexAttribPointer(0, 4, gl.FLOAT, false, 40, 0);
gl.vertexAttribPointer(1, 4, gl.FLOAT, false, 40, 16);
gl.vertexAttribPointer(2, 2, gl.FLOAT, false, 40, 32);
}
upload(vertices: ArrayLike<Vertex>)
{
const data = new Float32Array(10 * vertices.length);
for (let i = 0, len = vertices.length; i < len; ++i) {
const vertex = vertices[i];
data[0 + i * 10] = vertex.x;
data[1 + i * 10] = vertex.y;
data[2 + i * 10] = vertex.z ?? 0.0;
data[3 + i * 10] = 1.0;
data[4 + i * 10] = vertex.color?.r ?? 1.0;
data[5 + i * 10] = vertex.color?.g ?? 1.0;
data[6 + i * 10] = vertex.color?.b ?? 1.0;
data[7 + i * 10] = vertex.color?.a ?? 1.0;
data[8 + i * 10] = vertex.u ?? 0.0;
data[9 + i * 10] = vertex.v ?? 0.0;
}
const hwBuffer = gl.createBuffer();
if (hwBuffer === null)
throw new Error(`Unable to create WebGL vertex buffer object`);
gl.bindBuffer(gl.ARRAY_BUFFER, hwBuffer);
gl.bufferData(gl.ARRAY_BUFFER, data, this.streamable ? gl.STREAM_DRAW : gl.STATIC_DRAW);
gl.deleteBuffer(this.hwBuffer);
this.hwBuffer = hwBuffer;
this.length = vertices.length;
}
}
function applyBlendOp(op: BlendOp)
{
switch (op) {
case BlendOp.Default:
gl.blendEquation(gl.FUNC_ADD);
gl.blendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA);
break;
case BlendOp.Add:
gl.blendEquation(gl.FUNC_ADD);
gl.blendFunc(gl.ONE, gl.ONE);
break;
case BlendOp.Average:
gl.blendEquation(gl.FUNC_ADD);
gl.blendFunc(gl.CONSTANT_COLOR, gl.CONSTANT_COLOR);
gl.blendColor(0.5, 0.5, 0.5, 0.5);
break;
case BlendOp.CopyAlpha:
gl.blendEquation(gl.FUNC_ADD);
gl.blendFuncSeparate(gl.ZERO, gl.ONE, gl.ONE, gl.ZERO);
break;
case BlendOp.CopyRGB:
gl.blendEquation(gl.FUNC_ADD);
gl.blendFuncSeparate(gl.ONE, gl.ZERO, gl.ZERO, gl.ONE);
break;
case BlendOp.Invert:
gl.blendEquation(gl.FUNC_ADD);
gl.blendFunc(gl.ZERO, gl.ONE_MINUS_SRC_COLOR);
break;
case BlendOp.Multiply:
gl.blendEquation(gl.FUNC_ADD);
gl.blendFunc(gl.DST_COLOR, gl.ZERO);
break;
case BlendOp.Replace:
gl.blendEquation(gl.FUNC_ADD);
gl.blendFunc(gl.ONE, gl.ZERO);
break;
case BlendOp.Subtract:
gl.blendEquation(gl.FUNC_REVERSE_SUBTRACT);
gl.blendFunc(gl.ONE, gl.ONE);
break;
default:
// something went horribly wrong if we got here; just set the blender to output
// nothing so the user can see something went awry.
gl.blendEquation(gl.FUNC_ADD);
gl.blendFunc(gl.ZERO, gl.ZERO);
}
}
function applyDepthOp(op: DepthOp)
{
const depthFunc = op === DepthOp.AlwaysPass ? gl.ALWAYS
: op === DepthOp.Equal ? gl.EQUAL
: op === DepthOp.Greater ? gl.GREATER
: op === DepthOp.GreaterOrEqual ? gl.GEQUAL
: op === DepthOp.Less ? gl.LESS
: op === DepthOp.LessOrEqual ? gl.LEQUAL
: op === DepthOp.NotEqual ? gl.NOTEQUAL
: gl.NEVER;
gl.depthFunc(depthFunc);
}
| Font |
task_index.go | // Copyright 2013-2020 Aerospike, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aerospike
import (
"regexp"
"strconv"
"strings"
)
// IndexTask is used to poll for long running create index completion.
type IndexTask struct {
*baseTask
namespace string
indexName string
}
// NewIndexTask initializes a task with fields needed to query server nodes.
func | (cluster *Cluster, namespace string, indexName string) *IndexTask {
return &IndexTask{
baseTask: newTask(cluster),
namespace: namespace,
indexName: indexName,
}
}
// IsDone queries all nodes for task completion status.
func (tski *IndexTask) IsDone() (bool, error) {
command := "sindex/" + tski.namespace + "/" + tski.indexName
nodes := tski.cluster.GetNodes()
complete := false
r := regexp.MustCompile(`\.*load_pct=(\d+)\.*`)
for _, node := range nodes {
responseMap, err := node.requestInfoWithRetry(&tski.cluster.infoPolicy, 5, command)
if err != nil {
return false, err
}
for _, response := range responseMap {
find := "load_pct="
index := strings.Index(response, find)
if index < 0 {
if tski.retries.Get() > 20 {
complete = true
}
continue
}
matchRes := r.FindStringSubmatch(response)
// we know it exists and is a valid number
pct, _ := strconv.Atoi(matchRes[1])
if pct >= 0 && pct < 100 {
return false, nil
}
complete = true
}
}
return complete, nil
}
// OnComplete returns a channel that will be closed as soon as the task is finished.
// If an error is encountered during operation, an error will be sent on the channel.
func (tski *IndexTask) OnComplete() chan error {
return tski.onComplete(tski)
}
| NewIndexTask |
ex012.js | //como pegar a hora atual do sistema e inserir no código? Criar uma variável
var agora = new Date()
var hora = agora.getHours()
console.log(`Agora são exatamente ${hora} horas.`)
if (hora < 12){
console.log("Bom dia!")
} else if(hora <= 18){
console.log("Boa tarde!")
} else { | console.log("Boa noite!")
} | |
fake.go | /*
Copyright 2021 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
| package fake
import (
context "context"
fake "knative.dev/eventing/pkg/client/injection/informers/factory/fake"
containersource "knative.dev/eventing/pkg/client/injection/informers/sources/v1/containersource"
controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
)
var Get = containersource.Get
func init() {
injection.Fake.RegisterInformer(withInformer)
}
func withInformer(ctx context.Context) (context.Context, controller.Informer) {
f := fake.Get(ctx)
inf := f.Sources().V1().ContainerSources()
return context.WithValue(ctx, containersource.Key{}, inf), inf.Informer()
} | |
train_resnext50_hmdb51.py | # import apex - !!!! INCLUDE THIS IMPORT IF YOU WANT TO USE MIXED PRECISION TRAINING !!!!
import torch
import os
import sys
import torch.optim as optim
import torch.nn as nn
from datetime import datetime
from tqdm import tqdm
from pathlib import Path
# Make sure that the project root is in your PATH (i.e., the parent folder containing 'dynamic_image_networks').
sys.path.append(str(Path('../../..').resolve()))
# ---------------------------------------------------------------
# Model / dataset choice
# ---------------------------------------------------------------
from dynamic_image_networks.hmdb51.models.resnext50_temppool import get_model
from dynamic_image_networks.hmdb51.dataloaders.hmdb51_dataloader import get_train_loader
from dynamic_image_networks.hmdb51.utilities.calculate_training_metrics import calculate_accuracy
from dynamic_image_networks.hmdb51.utilities.logger import initialize_logger
from dynamic_image_networks.hmdb51.utilities.meters import AverageMeter
def main():
# ============================================================================================
# Setup
# ============================================================================================
# ---------------------------------------------------------------
# Random seeds
# ---------------------------------------------------------------
torch.manual_seed(590238490)
torch.backends.cudnn.benchmark = True
# ---------------------------------------------------------------
# GPU
# ---------------------------------------------------------------
device = torch.device("cuda:0")
fp16 = False
if fp16:
|
# ---------------------------------------------------------------
# Training settings
# ---------------------------------------------------------------
batch_size = 32
num_epochs = 60
num_workers = 6
max_segment_size = 10
save_best_models = True
image_augmentation = False
# ----------------------------------------------------------------------------
# Get the model
# ----------------------------------------------------------------------------
net = get_model(num_classes=51)
net.to(device)
# ----------------------------------------------------------------------------
# Initialize optimizer and loss function
# ----------------------------------------------------------------------------
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=3e-3)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, verbose=True)
if fp16:
net, optimizer = apex.amp.initialize(net, optimizer, opt_level="O1")
# ---------------------------------------------------------------
# Logging set-up
# ---------------------------------------------------------------
# File-name
file_name = ''.join(os.path.basename(__file__).split('.py')[:-1])
logger = initialize_logger(file_name, log_dir='./logs/')
# ============================================================================================
# Train
# ============================================================================================
time_start = datetime.now()
fold_i = 1
# ---------------------------------------------------------------
# Load dataloaders
# ---------------------------------------------------------------
train_loader, validation_loader = get_train_loader(fold_id=fold_i,
batch_size=batch_size,
num_workers=num_workers,
image_augmenation=image_augmentation,
segment_size=max_segment_size)
logger.info('Starting Training on Fold: {}\n'.format(fold_i))
best_val_loss = float('inf')
best_val_acc = 0
for epoch_i in range(num_epochs):
# ---------------------------------------------------------------
# Training and validation loop
# ---------------------------------------------------------------
avg_loss, avg_acc = training_loop('train', net, device, train_loader,
optimizer, criterion, fp16)
avg_val_loss, avg_val_acc = training_loop('val', net, device, validation_loader,
None, criterion, fp16)
if scheduler:
scheduler.step(avg_val_loss)
# ---------------------------------------------------------------
# Track the best model
# ---------------------------------------------------------------
if avg_val_loss < best_val_loss:
best_val_loss = avg_val_loss
if save_best_models:
logger.info('Saving model because of best loss...')
os.makedirs('./saved_models/', exist_ok=True)
torch.save(net.state_dict(),
'./saved_models/{}_fold_{}_best_loss_state.pt'.format(file_name, fold_i))
if avg_val_acc > best_val_acc:
best_val_acc = avg_val_acc
if save_best_models:
logger.info('Saving model because of best acc...')
os.makedirs('./saved_models/', exist_ok=True)
torch.save(net.state_dict(),
'./saved_models/{}_fold_{}_best_acc_state.pt'.format(file_name, fold_i))
# ---------------------------------------------------------------
# Log the training status
# ---------------------------------------------------------------
time_elapsed = datetime.now() - time_start
output_msg = 'Fold {}, Epoch: {}/{}\n' \
'---------------------\n' \
'train loss: {:.6f}, val loss: {:.6f}\n' \
'train acc: {:.6f}, val acc: {:.6f}\n' \
'best val loss: {:.6f}, best val acc: {:.6f}\n' \
'time elapsed: {}\n'. \
format(fold_i, epoch_i, num_epochs - 1,
avg_loss, avg_val_loss,
avg_acc, avg_val_acc,
best_val_loss, best_val_acc,
str(time_elapsed).split('.')[0])
logger.info(output_msg)
logger.info('Finished Training')
def training_loop(phase, net, device, dataloader, optimizer, criterion, fp16):
loss_meter = AverageMeter()
acc_meter = AverageMeter()
# Set the model into the appropriate mode.
if phase == 'train':
net.train()
elif phase == 'val':
net.eval()
else:
raise ValueError
# Enable gradient accumulation only for the training phase.
with torch.set_grad_enabled(phase == 'train'):
for i, data in tqdm(enumerate(dataloader), total=len(dataloader)):
x, y, = data
x, y = x.to(device, non_blocking=True), y.to(device, non_blocking=True)
# Prediction.
y_pred = net(x).float()
# Loss and step.
loss = criterion(y_pred, y)
if phase == 'train':
optimizer.zero_grad()
if fp16 is True:
with apex.amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
# Metrics
batch_size = len(y)
loss_meter.add(loss.item(), batch_size)
acc_meter.add(calculate_accuracy(y_pred, y), batch_size)
avg_loss = loss_meter.get_average()
avg_acc = acc_meter.get_average()
return avg_loss, avg_acc
if __name__ == '__main__':
main()
| print('!!! MIXED PRECISION TRAINING IS ENABLED -- ONLY USE FOR VOLTA AND TURING GPUs!!!') |
categories.component.ts |
import { Component, OnInit } from '@angular/core';
import { ActivatedRoute, Router } from '@angular/router';
import { ApiService } from 'src/app/services/api.service';
import { CartService } from 'src/app/services/cart.service';
import { UtilService } from 'src/app/services/util.service';
import { uniq } from 'lodash';
import { Options } from 'ng5-slider';
@Component({
selector: 'app-categories',
templateUrl: './categories.component.html',
styleUrls: ['./categories.component.scss']
})
export class | implements OnInit {
tabID: any;
catID: any;
subId: any;
dummys = Array(20);
limit: any;
maxLimit: any;
haveSub: boolean = false;
categories: any[] = [];
dummyProducts: any[] = [];
products: any[] = [];
filter: any = '1';
loaded: boolean;
banners: any[] = [];
dummyBanners = Array(5);
myCarouselOptions = {
loop: false,
margin: 20,
nav: false,
dots: true,
autoplay: true,
responsive: {
0: {
items: 1,
},
600: {
items: 1
},
800: {
items: 1
},
1000: {
items: 1
}
}
}
options: Options = {
floor: 0,
ceil: 100,
showTicks: true
};
min: any;
max: any;
constructor(
private router: Router,
private route: ActivatedRoute,
public api: ApiService,
public util: UtilService,
public cart: CartService
) {
this.init();
}
init() {
console.log('current route', this.router.url);
if (this.router.url.includes('/sub/')) {
console.log('its sub category')
this.haveSub = true;
this.catID = this.route.snapshot.paramMap.get('id');
this.subId = this.route.snapshot.paramMap.get('sub_id');
} else if (this.router.url.includes('/categories/')) {
console.log('it category');
this.catID = this.route.snapshot.paramMap.get('id');
}
this.limit = 1;
this.loaded = false;
this.categories = [];
this.banners = [];
this.products = [];
this.getCates();
this.getBanners();
}
getCates() {
this.api.get('categories').then((datas: any) => {
if (datas && datas.data && datas.data.length) {
const list = datas.data.filter(x => x.status === '1');
console.log('not selected');
this.categories = [];
list.forEach(element => {
const obj = {
id: element.id,
name: element.name,
sub: []
};
this.categories.push(obj);
});
console.log('categories', this.categories);
this.getSubCates();
}
}, error => {
console.log(error);
this.util.errorMessage(this.util.translate('Something went wrong'));
}).catch(error => {
console.log(error);
this.util.errorMessage(this.util.translate('Something went wrong'));
})
}
getSubCates() {
this.api.get('subcate').then((datas: any) => {
console.log(datas);
if (datas && datas.data && datas.data.length) {
const list = datas.data.filter(x => x.status === '1');
this.categories.forEach((element, index) => {
list.forEach(sub => {
if (element.id === sub.cate_id) {
this.categories[index].sub.push(sub);
}
});
});
console.log('all cates', this.categories);
if (this.haveSub === false) {
const index = this.categories.findIndex(x => x.id === this.catID);
console.log('index', index);
this.subId = this.categories[index].sub[0].id;
console.log('sub id-----', this.subId);
}
this.getProducts();
}
}, error => {
console.log(error);
this.util.errorMessage(this.util.translate('Something went wrong'));
}).catch(error => {
console.log(error);
this.util.errorMessage(this.util.translate('Something went wrong'));
});
}
getProducts() {
const stores = {
id: localStorage.getItem('city')
};
this.api.post('stores/getByCity', stores).then((stores: any) => {
if (stores && stores.status === 200 && stores.data && stores.data.length) {
this.util.active_store = [...new Set(stores.data.map(item => item.uid))];
const param = {
id: this.catID,
cid: localStorage.getItem('city'),
sid: this.subId,
limit: this.limit * 12
};
console.log('parma', param);
this.dummys = Array(20);
this.products = [];
this.api.post('products/getByCSID', param).then((cates: any) => {
console.log(cates);
this.dummys = [];
if (cates && cates.status === 200 && cates.data && cates.data.length) {
this.maxLimit = (this.limit * 12) - 1;
console.log('Max Limit0000', this.maxLimit);
console.log('products', cates.data);
const products = cates.data;
window.scrollTo(0, 0);
this.products = products.filter(x => x.status === '1' && this.util.active_store.includes(x.store_id));
this.products = uniq(this.products, 'id');
this.max = Math.max(...this.products.map(o => o.original_price), 0);
console.log('maxValueOfPrice', this.max);
this.min = Math.min.apply(null, this.products.map(item => item.original_price))
console.log('min', this.min);
this.products.forEach((info: any) => {
if (info.variations && info.size === '1' && info.variations !== '') {
if (((x) => { try { JSON.parse(x); return true; } catch (e) { return false } })(info.status)) {
info.variations = JSON.parse(info.variations);
info['variant'] = 0;
} else {
info.variations = [];
info['variant'] = 1;
}
} else {
info.variations = [];
info['variant'] = 1;
}
if (this.cart.itemId.includes(info.id)) {
const index = this.cart.cart.filter(x => x.id === info.id);
info['quantiy'] = index[0].quantiy;
} else {
info['quantiy'] = 0;
}
});
this.dummyProducts = this.products;
this.onChange(this.filter);
this.dummys = [];
} else {
this.dummys = [];
}
if (this.loaded) {
this.loaded = false;
}
}, error => {
console.log(error);
this.dummys = [];
this.dummyProducts = [];
this.products = [];
this.util.toast('error', this.util.translate('Error'), this.util.translate('wrong input'));
});
} else {
this.dummys = [];
this.dummyProducts = [];
this.products = [];
this.router.navigate(['']);
this.util.toast('error', this.util.translate('Error'), this.util.translate('wrong input'));
}
}).catch((error) => {
console.log('error-/>', error);
console.log(error);
this.dummys = [];
this.dummyProducts = [];
this.products = [];
this.util.toast('error', this.util.translate('Error'), this.util.translate('wrong input'));
});
}
ngOnInit(): void {
}
catChange(val) {
console.log(val);
this.catID = val;
}
goToShopDetail() {
this.router.navigate(['/shop-detail']);
}
onChange(value) {
this.filter = value;
switch (this.filter) {
case 1:
console.log('its rating');
// this.products = this.products.sort((a, b) => parseInt(b.total_rating) - parseInt(a.total_rating));
this.products = this.products.sort((a, b) =>
parseFloat(b.total_rating) < parseFloat(a.total_rating) ? -1
: (parseFloat(b.total_rating) > parseFloat(a.total_rating) ? 1 : 0));
break;
case 2:
console.log('its low to high');
this.products = this.products.sort((a, b) =>
parseFloat(a.original_price) < parseFloat(b.original_price) ? -1
: (parseFloat(a.original_price) > parseFloat(b.original_price) ? 1 : 0));
break;
case 3:
console.log('its highht to low');
this.products = this.products.sort((a, b) =>
parseFloat(b.original_price) < parseFloat(a.original_price) ? -1
: (parseFloat(b.original_price) > parseFloat(a.original_price) ? 1 : 0));
break;
case 4:
console.log('its a - z');
this.products = this.products.sort((a, b) => {
if (a.name < b.name) { return -1; }
if (a.name > b.name) { return 1; }
return 0;
});
break;
case 5:
console.log('its z - a');
this.products = this.products.sort((a, b) => {
if (a.name > b.name) { return -1; }
if (a.name < b.name) { return 1; }
return 0;
});
break;
case 6:
console.log('its % off');
this.products = this.products.sort((a, b) =>
parseFloat(b.discount) < parseFloat(a.discount) ? -1
: (parseFloat(b.discount) > parseFloat(a.discount) ? 1 : 0));
break;
default:
break;
}
}
singleProduct(item) {
console.log('-->', item);
const name = item.name.replace(/[^a-zA-Z0-9]/g, '-').toLowerCase();;
this.router.navigate(['product', name, item.id]);
}
addToCart(item, index) {
console.log(item);
this.products[index].quantiy = 1;
this.cart.addItem(item);
}
add(product, index) {
console.log(product);
if (this.products[index].quantiy > 0) {
this.products[index].quantiy = this.products[index].quantiy + 1;
this.cart.addQuantity(this.products[index].quantiy, product.id);
}
}
remove(product, index) {
console.log(product, index);
if (this.products[index].quantiy === 1) {
this.products[index].quantiy = 0;
this.cart.removeItem(product.id)
} else {
this.products[index].quantiy = this.products[index].quantiy - 1;
this.cart.addQuantity(this.products[index].quantiy, product.id);
}
}
loadData() {
this.limit = this.limit + 1;
this.loaded = true;
this.getProducts();
}
getBanners() {
this.dummyBanners = Array(30);
this.api.get('banners').then((data: any) => {
console.log(data);
this.dummyBanners = [];
this.banners = [];
if (data && data.status === 200 && data.data && data.data.length) {
data.data.forEach(element => {
if (element && element.status === '1') {
if (element.position === '0') {
this.banners.push(element);
}
}
});
console.log('top', this.banners);
console.log('detect changes');
}
}, error => {
console.log(error);
this.dummyBanners = [];
});
}
openLink(item) {
console.log(item);
if (item.type === '0') {
console.log('open category');
const name = this.categories.filter(x => x.id === item.link);
let cateName: any = '';
if (name && name.length) {
cateName = name[0].name;
}
const routeName = cateName.replace(/[^a-zA-Z0-9]/g, '-').toLowerCase();;
this.router.navigate([]).then(result => { window.open('categories/' + item.link + '/' + routeName, '_blank'); });
} else if (item.type === '1') {
console.log('open product');
const name = item.message.replace(/[^a-zA-Z0-9]/g, '-').toLowerCase();;
this.router.navigate(['product', name, item.link]);
} else {
console.log('open link');
window.open(item.link, '_blank');
}
}
onUserChange(event) {
console.log(event);
const products = [];
this.dummyProducts.forEach(element => {
if (parseFloat(element.original_price) >= event.value && parseFloat(element.original_price) <= event.highValue) {
products.push(element);
}
this.products = products;
});
}
}
| CategoriesComponent |
settings.py | """
Django settings for apply project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '--v$_^*0r5(ok1^2sxdm4w_wwskvuv-z0tcop+yf1-m@+7p#5i'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gunicorn',
'bootstrapform',
'yard',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'apply.urls'
WSGI_APPLICATION = 'apply.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'yard', # Or path to database file if using sqlite3.
'USER': 'frankie', # Not used with sqlite3.
'PASSWORD': 'frankie', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
DOCS_URL = BASE_DIR + '/yard/static/docs/'
LOGGING = { |
LOGIN_REDIRECT_URL="/" | 'version': 1,
} |
application_exception_test.go | /*
* Copyright 2019-present Facebook, Inc.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"testing"
)
func TestApplicationException(t *testing.T) {
exc := NewApplicationException(UNKNOWN_APPLICATION_EXCEPTION, "")
if exc.Error() != "" |
if exc.TypeID() != UNKNOWN_APPLICATION_EXCEPTION {
t.Fatalf("Expected type UNKNOWN for exception but found '%d'", exc.TypeID())
}
exc = NewApplicationException(WRONG_METHOD_NAME, "junk_method")
if exc.Error() != "junk_method" {
t.Fatalf("Expected 'junk_method' for exception but found '%s'", exc.Error())
}
if exc.TypeID() != WRONG_METHOD_NAME {
t.Fatalf("Expected type WRONG_METHOD_NAME for exception but found '%d'", exc.TypeID())
}
}
| {
t.Fatalf("Expected empty string for exception but found '%s'", exc.Error())
} |
typed_list_deserialization_strategy.py | from typing import Type, List
from jivago.inject import typing_meta_helper
from jivago.lang.annotations import Override
from jivago.lang.stream import Stream
from jivago.serialization.deserialization_strategy import DeserializationStrategy, T
TYPES_WHICH_DESERIALIZE_TO_LISTS = ('List', 'Iterable', 'Collection')
class TypedListDeserializationStrategy(DeserializationStrategy):
def __init__(self, deserializer: "Deserializer"):
self.deserializer = deserializer
@Override
def can_handle_deserialization(self, declared_type: type) -> bool:
|
@Override
def deserialize(self, obj: list, declared_type: Type[List[T]]) -> list:
list_content_type = declared_type.__args__[0]
return Stream(obj).map(lambda x: self.deserializer.deserialize(x, list_content_type)).toList()
| return typing_meta_helper.is_typing_meta_collection(declared_type, TYPES_WHICH_DESERIALIZE_TO_LISTS) |
memory.go | /**
* Author: Wang P
* Version: 1.0.0
* Date: 2021/4/13 下午6:34
* Description: 请求排队的队列有多个生产者,但只有一个消费者,且以固定速度消费,因此基于 Fan-In 模式的 RateLimiter 来实现内存队列
**/
package mq
import (
"errors"
"fmt"
"github.com/weitrue/Seckill/infrastructure/mq/mqi"
"github.com/weitrue/Seckill/infrastructure/pool/worker/taski"
"github.com/weitrue/Seckill/infrastructure/services/local/ratelimiter"
"github.com/spf13/viper"
)
type memoryQueue struct {
queue ratelimiter.RateLimiter
}
func MqFactory(name string) (mqi.Queue, error) {
rate := viper.GetInt64(fmt.Sprintf("queue.%s.rate", name))
size := viper.GetInt64(fmt.Sprintf("queue.%s.size", name))
q, _ := ratelimiter.NewRateLimiter(size, rate, ratelimiter.FanIn)
return &memoryQueue{queue: q}, nil
}
func (mq *memoryQueue) Produce(task taski.Task) error {
if ok := mq.queue.Push(task); !ok {
return errors.New("queue producer error")
}
return nil
}
func (mq *memoryQueue) Consume() (taski.Task, error) {
task, ok := mq.queue.Pop()
if !ok {
return nil, errors.New("queue consumer error")
}
return task, nil
}
func (mq *memoryQueue | ) Close() error {
return mq.queue.Close()
}
|
|
hook.go | // Code generated by entc, DO NOT EDIT.
package hook
import (
"context"
"fmt"
"github.com/gobench-io/gobench/ent"
)
// The ApplicationFunc type is an adapter to allow the use of ordinary
// function as Application mutator.
type ApplicationFunc func(context.Context, *ent.ApplicationMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f ApplicationFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
mv, ok := m.(*ent.ApplicationMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ApplicationMutation", m)
}
return f(ctx, mv)
}
// The CounterFunc type is an adapter to allow the use of ordinary
// function as Counter mutator.
type CounterFunc func(context.Context, *ent.CounterMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f CounterFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
mv, ok := m.(*ent.CounterMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.CounterMutation", m)
}
return f(ctx, mv)
}
// The GaugeFunc type is an adapter to allow the use of ordinary
// function as Gauge mutator.
type GaugeFunc func(context.Context, *ent.GaugeMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f GaugeFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
mv, ok := m.(*ent.GaugeMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GaugeMutation", m)
}
return f(ctx, mv)
}
// The GraphFunc type is an adapter to allow the use of ordinary
// function as Graph mutator.
type GraphFunc func(context.Context, *ent.GraphMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f GraphFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
mv, ok := m.(*ent.GraphMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GraphMutation", m)
}
return f(ctx, mv)
}
// The GroupFunc type is an adapter to allow the use of ordinary
// function as Group mutator.
type GroupFunc func(context.Context, *ent.GroupMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f GroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
mv, ok := m.(*ent.GroupMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GroupMutation", m)
}
return f(ctx, mv)
}
// The HistogramFunc type is an adapter to allow the use of ordinary
// function as Histogram mutator.
type HistogramFunc func(context.Context, *ent.HistogramMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f HistogramFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
mv, ok := m.(*ent.HistogramMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.HistogramMutation", m)
}
return f(ctx, mv) |
// The MetricFunc type is an adapter to allow the use of ordinary
// function as Metric mutator.
type MetricFunc func(context.Context, *ent.MetricMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f MetricFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
mv, ok := m.(*ent.MetricMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MetricMutation", m)
}
return f(ctx, mv)
}
// The TagFunc type is an adapter to allow the use of ordinary
// function as Tag mutator.
type TagFunc func(context.Context, *ent.TagMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f TagFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
mv, ok := m.(*ent.TagMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.TagMutation", m)
}
return f(ctx, mv)
}
// Condition is a hook condition function.
type Condition func(context.Context, ent.Mutation) bool
// And groups conditions with the AND operator.
func And(first, second Condition, rest ...Condition) Condition {
return func(ctx context.Context, m ent.Mutation) bool {
if !first(ctx, m) || !second(ctx, m) {
return false
}
for _, cond := range rest {
if !cond(ctx, m) {
return false
}
}
return true
}
}
// Or groups conditions with the OR operator.
func Or(first, second Condition, rest ...Condition) Condition {
return func(ctx context.Context, m ent.Mutation) bool {
if first(ctx, m) || second(ctx, m) {
return true
}
for _, cond := range rest {
if cond(ctx, m) {
return true
}
}
return false
}
}
// Not negates a given condition.
func Not(cond Condition) Condition {
return func(ctx context.Context, m ent.Mutation) bool {
return !cond(ctx, m)
}
}
// HasOp is a condition testing mutation operation.
func HasOp(op ent.Op) Condition {
return func(_ context.Context, m ent.Mutation) bool {
return m.Op().Is(op)
}
}
// HasAddedFields is a condition validating `.AddedField` on fields.
func HasAddedFields(field string, fields ...string) Condition {
return func(_ context.Context, m ent.Mutation) bool {
if _, exists := m.AddedField(field); !exists {
return false
}
for _, field := range fields {
if _, exists := m.AddedField(field); !exists {
return false
}
}
return true
}
}
// HasClearedFields is a condition validating `.FieldCleared` on fields.
func HasClearedFields(field string, fields ...string) Condition {
return func(_ context.Context, m ent.Mutation) bool {
if exists := m.FieldCleared(field); !exists {
return false
}
for _, field := range fields {
if exists := m.FieldCleared(field); !exists {
return false
}
}
return true
}
}
// HasFields is a condition validating `.Field` on fields.
func HasFields(field string, fields ...string) Condition {
return func(_ context.Context, m ent.Mutation) bool {
if _, exists := m.Field(field); !exists {
return false
}
for _, field := range fields {
if _, exists := m.Field(field); !exists {
return false
}
}
return true
}
}
// If executes the given hook under condition.
//
// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...)))
//
func If(hk ent.Hook, cond Condition) ent.Hook {
return func(next ent.Mutator) ent.Mutator {
return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if cond(ctx, m) {
return hk(next).Mutate(ctx, m)
}
return next.Mutate(ctx, m)
})
}
}
// On executes the given hook only for the given operation.
//
// hook.On(Log, ent.Delete|ent.Create)
//
func On(hk ent.Hook, op ent.Op) ent.Hook {
return If(hk, HasOp(op))
}
// Unless skips the given hook only for the given operation.
//
// hook.Unless(Log, ent.Update|ent.UpdateOne)
//
func Unless(hk ent.Hook, op ent.Op) ent.Hook {
return If(hk, Not(HasOp(op)))
}
// Reject returns a hook that rejects all operations that match op.
//
// func (T) Hooks() []ent.Hook {
// return []ent.Hook{
// Reject(ent.Delete|ent.Update),
// }
// }
//
func Reject(op ent.Op) ent.Hook {
hk := func(ent.Mutator) ent.Mutator {
return ent.MutateFunc(func(_ context.Context, m ent.Mutation) (ent.Value, error) {
return nil, fmt.Errorf("%s operation is not allowed", m.Op())
})
}
return On(hk, op)
}
// Chain acts as a list of hooks and is effectively immutable.
// Once created, it will always hold the same set of hooks in the same order.
type Chain struct {
hooks []ent.Hook
}
// NewChain creates a new chain of hooks.
func NewChain(hooks ...ent.Hook) Chain {
return Chain{append([]ent.Hook(nil), hooks...)}
}
// Hook chains the list of hooks and returns the final hook.
func (c Chain) Hook() ent.Hook {
return func(mutator ent.Mutator) ent.Mutator {
for i := len(c.hooks) - 1; i >= 0; i-- {
mutator = c.hooks[i](mutator)
}
return mutator
}
}
// Append extends a chain, adding the specified hook
// as the last ones in the mutation flow.
func (c Chain) Append(hooks ...ent.Hook) Chain {
newHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks))
newHooks = append(newHooks, c.hooks...)
newHooks = append(newHooks, hooks...)
return Chain{newHooks}
}
// Extend extends a chain, adding the specified chain
// as the last ones in the mutation flow.
func (c Chain) Extend(chain Chain) Chain {
return c.Append(chain.hooks...)
} | } |
api_op_ListImages.go | // Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package ecr
import (
"context"
"github.com/aws/aws-sdk-go-v2/aws" | )
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImagesRequest
type ListImagesInput struct {
_ struct{} `type:"structure"`
// The filter key and value with which to filter your ListImages results.
Filter *ListImagesFilter `locationName:"filter" type:"structure"`
// The maximum number of image results returned by ListImages in paginated output.
// When this parameter is used, ListImages only returns maxResults results in
// a single page along with a nextToken response element. The remaining results
// of the initial request can be seen by sending another ListImages request
// with the returned nextToken value. This value can be between 1 and 1000.
// If this parameter is not used, then ListImages returns up to 100 results
// and a nextToken value, if applicable.
MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"`
// The nextToken value returned from a previous paginated ListImages request
// where maxResults was used and the results exceeded the value of that parameter.
// Pagination continues from the end of the previous results that returned the
// nextToken value. This value is null when there are no more results to return.
//
// This token should be treated as an opaque identifier that is only used to
// retrieve the next items in a list and not for other programmatic purposes.
NextToken *string `locationName:"nextToken" type:"string"`
// The AWS account ID associated with the registry that contains the repository
// in which to list images. If you do not specify a registry, the default registry
// is assumed.
RegistryId *string `locationName:"registryId" type:"string"`
// The repository with image IDs to be listed.
//
// RepositoryName is a required field
RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
}
// String returns the string representation
func (s ListImagesInput) String() string {
return awsutil.Prettify(s)
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListImagesInput) Validate() error {
invalidParams := aws.ErrInvalidParams{Context: "ListImagesInput"}
if s.MaxResults != nil && *s.MaxResults < 1 {
invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1))
}
if s.RepositoryName == nil {
invalidParams.Add(aws.NewErrParamRequired("RepositoryName"))
}
if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
invalidParams.Add(aws.NewErrParamMinLen("RepositoryName", 2))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImagesResponse
type ListImagesOutput struct {
_ struct{} `type:"structure"`
// The list of image IDs for the requested repository.
ImageIds []ImageIdentifier `locationName:"imageIds" min:"1" type:"list"`
// The nextToken value to include in a future ListImages request. When the results
// of a ListImages request exceed maxResults, this value can be used to retrieve
// the next page of results. This value is null when there are no more results
// to return.
NextToken *string `locationName:"nextToken" type:"string"`
}
// String returns the string representation
func (s ListImagesOutput) String() string {
return awsutil.Prettify(s)
}
const opListImages = "ListImages"
// ListImagesRequest returns a request value for making API operation for
// Amazon EC2 Container Registry.
//
// Lists all the image IDs for a given repository.
//
// You can filter images based on whether or not they are tagged by setting
// the tagStatus parameter to TAGGED or UNTAGGED. For example, you can filter
// your results to return only UNTAGGED images and then pipe that result to
// a BatchDeleteImage operation to delete them. Or, you can filter your results
// to return only TAGGED images to list all of the tags in your repository.
//
// // Example sending a request using ListImagesRequest.
// req := client.ListImagesRequest(params)
// resp, err := req.Send(context.TODO())
// if err == nil {
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImages
func (c *Client) ListImagesRequest(input *ListImagesInput) ListImagesRequest {
op := &aws.Operation{
Name: opListImages,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &aws.Paginator{
InputTokens: []string{"nextToken"},
OutputTokens: []string{"nextToken"},
LimitToken: "maxResults",
TruncationToken: "",
},
}
if input == nil {
input = &ListImagesInput{}
}
req := c.newRequest(op, input, &ListImagesOutput{})
return ListImagesRequest{Request: req, Input: input, Copy: c.ListImagesRequest}
}
// ListImagesRequest is the request type for the
// ListImages API operation.
type ListImagesRequest struct {
*aws.Request
Input *ListImagesInput
Copy func(*ListImagesInput) ListImagesRequest
}
// Send marshals and sends the ListImages API request.
func (r ListImagesRequest) Send(ctx context.Context) (*ListImagesResponse, error) {
r.Request.SetContext(ctx)
err := r.Request.Send()
if err != nil {
return nil, err
}
resp := &ListImagesResponse{
ListImagesOutput: r.Request.Data.(*ListImagesOutput),
response: &aws.Response{Request: r.Request},
}
return resp, nil
}
// NewListImagesRequestPaginator returns a paginator for ListImages.
// Use Next method to get the next page, and CurrentPage to get the current
// response page from the paginator. Next will return false, if there are
// no more pages, or an error was encountered.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over pages.
// req := client.ListImagesRequest(input)
// p := ecr.NewListImagesRequestPaginator(req)
//
// for p.Next(context.TODO()) {
// page := p.CurrentPage()
// }
//
// if err := p.Err(); err != nil {
// return err
// }
//
func NewListImagesPaginator(req ListImagesRequest) ListImagesPaginator {
return ListImagesPaginator{
Pager: aws.Pager{
NewRequest: func(ctx context.Context) (*aws.Request, error) {
var inCpy *ListImagesInput
if req.Input != nil {
tmp := *req.Input
inCpy = &tmp
}
newReq := req.Copy(inCpy)
newReq.SetContext(ctx)
return newReq.Request, nil
},
},
}
}
// ListImagesPaginator is used to paginate the request. This can be done by
// calling Next and CurrentPage.
type ListImagesPaginator struct {
aws.Pager
}
func (p *ListImagesPaginator) CurrentPage() *ListImagesOutput {
return p.Pager.CurrentPage().(*ListImagesOutput)
}
// ListImagesResponse is the response type for the
// ListImages API operation.
type ListImagesResponse struct {
*ListImagesOutput
response *aws.Response
}
// SDKResponseMetdata returns the response metadata for the
// ListImages request.
func (r *ListImagesResponse) SDKResponseMetdata() *aws.Response {
return r.response
} | "github.com/aws/aws-sdk-go-v2/internal/awsutil" |
0001_initial.py | # Generated by Django 2.2 on 2020-10-29 04:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ConstructionSystem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_construction_system', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Material',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_material', models.CharField(max_length=255)), | name='Origin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_origin', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_project', models.CharField(max_length=255)),
('use', models.CharField(max_length=255)),
('builded_surface', models.IntegerField()),
('living_area', models.IntegerField()),
('tier', models.IntegerField()),
('useful_life', models.IntegerField()),
],
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_section', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Unit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_unit', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='MaterialSchemeProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('provider_distance', models.IntegerField()),
('construction_system_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='projects_api.ConstructionSystem')),
('material_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='projects_api.Material')),
('origin_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='projects_api.Origin')),
('project_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='projects_api.Project')),
('unit_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='projects_api.Unit')),
],
),
] | ],
),
migrations.CreateModel( |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.