file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
events.py
|
import logging
from troposphere.events import Rule, Target
from buildlib.helpers.client_helper import ClientHelper
class EventsHelper(object):
def __init__(self, template, project, session=None):
self.client = ClientHelper.get_client('events', session)
self.project = project
self.template = template
|
return self.template.add_resource(Rule(
'{0}Rule'.format(name_prefix),
State=state,
Targets=targets,
ScheduleExpression=schedule_expression,
**kwargs
))
def create_target(self, arn, target_id, name_prefix=''):
return Target(
'{0}Target'.format(name_prefix),
Arn=arn,
Id=target_id
)
|
def create_cron_rule(self, schedule_expression, targets, state='ENABLED', name_prefix='', **kwargs):
|
http.go
|
package core
import (
"context"
"crypto/tls"
"errors"
"fmt"
"log"
"net"
"net/http"
"net/http/pprof"
"regexp"
"time"
anzlog "github.com/anz-bank/sysl-go/log"
"github.com/anz-bank/pkg/health"
"github.com/anz-bank/sysl-go/config"
"github.com/anz-bank/sysl-go/handlerinitialiser"
"github.com/anz-bank/sysl-go/metrics"
"github.com/anz-bank/sysl-go/status"
"github.com/go-chi/chi"
"github.com/prometheus/client_golang/prometheus"
)
const (
defaultGracefulStopTimeout = 3 * time.Minute
)
type Manager interface {
EnabledHandlers() []handlerinitialiser.HandlerInitialiser
LibraryConfig() *config.LibraryConfig
AdminServerConfig() *config.CommonHTTPServerConfig
PublicServerConfig() *config.UpstreamConfig
// AddAdminHTTPMiddleware can return nil if you do not have any additional middleware for the admin endpoint
AddAdminHTTPMiddleware() func(ctx context.Context, r chi.Router)
}
func configureAdminServerListener(ctx context.Context, hl Manager, promRegistry *prometheus.Registry, healthServer *health.HTTPServer, mWare []func(handler http.Handler) http.Handler) (StoppableServer, error) {
// validate hl manager configuration
if hl.AdminServerConfig() == nil {
return nil, errors.New("missing adminserverconfig")
}
if hl.LibraryConfig() == nil {
return nil, errors.New("missing libraryconfig")
}
rootAdminRouter, adminRouter := configureRouters(hl.AdminServerConfig().BasePath, mWare)
adminTLSConfig, err := config.MakeTLSConfig(ctx, hl.AdminServerConfig().Common.TLS)
if err != nil {
return nil, err
}
// Define meta-service endpoints:
statusService := status.Service{
BuildMetadata: buildMetadata,
Config: hl.LibraryConfig(),
Services: hl.EnabledHandlers(),
}
adminRouter.Route("/-", func(r chi.Router) {
if hl.AddAdminHTTPMiddleware() != nil {
hl.AddAdminHTTPMiddleware()(ctx, adminRouter)
}
r.Route("/status", func(r chi.Router) {
status.WireRoutes(r, &statusService)
})
if promRegistry != nil {
r.Route("/metrics", func(r chi.Router) {
r.Get("/", metrics.Handler(promRegistry).(http.HandlerFunc))
})
}
registerProfilingHandler(ctx, hl.LibraryConfig(), r)
})
adminRouter.Route("/", func(r chi.Router) {
if healthServer != nil {
healthServer.RegisterWith(r)
}
})
listenAdmin := prepareServerListener(ctx, rootAdminRouter, adminTLSConfig, *hl.AdminServerConfig(), "REST Admin Server")
return listenAdmin, nil
}
func configurePublicServerListener(ctx context.Context, hl Manager, mWare []func(handler http.Handler) http.Handler, hooks *Hooks) (StoppableServer, error) {
rootPublicRouter, publicRouter := configureRouters(hl.PublicServerConfig().HTTP.BasePath, mWare)
publicTLSConfig, err := config.MakeTLSConfig(ctx, hl.PublicServerConfig().HTTP.Common.TLS)
if err != nil {
return nil, err
}
for _, h := range hl.EnabledHandlers() {
h.WireRoutes(ctx, publicRouter)
}
if len(hl.EnabledHandlers()) == 0 {
anzlog.Info(ctx, "No service handlers enabled by config.")
}
prepareServerListenerFn := prepareServerListener
if hooks != nil && hooks.StoppableServerBuilder != nil {
prepareServerListenerFn = hooks.StoppableServerBuilder
}
listenPublic := prepareServerListenerFn(ctx, rootPublicRouter, publicTLSConfig, hl.PublicServerConfig().HTTP, "REST Public Server")
return listenPublic, nil
}
func registerProfilingHandler(ctx context.Context, cfg *config.LibraryConfig, parentRouter chi.Router) {
if cfg.Profiling {
anzlog.Info(ctx, "Register profiling handlers")
parentRouter.Group(func(r chi.Router) {
r.HandleFunc("/pprof", pprof.Index)
r.Handle("/allocs", pprof.Handler("allocs"))
r.Handle("/block", pprof.Handler("block"))
r.HandleFunc("/cmdline", pprof.Cmdline)
r.Handle("/goroutine", pprof.Handler("goroutine"))
r.Handle("/heap", pprof.Handler("heap"))
r.Handle("/mutex", pprof.Handler("mutex"))
r.HandleFunc("/profile", pprof.Profile)
r.Handle("/threadcreate", pprof.Handler("threadcreate"))
r.HandleFunc("/symbol", pprof.Symbol)
r.HandleFunc("/trace", pprof.Trace)
})
} else {
anzlog.Info(ctx, "Skip register profiling handler due to profiling disabled")
}
}
type httpServer struct {
ctx context.Context
cfg config.CommonHTTPServerConfig
server *http.Server
gracefulStopTimeout time.Duration
name string
}
func (s httpServer) Start() error {
var err error
if s.cfg.Common.TLS != nil {
anzlog.Infof(s.ctx, "TLS configuration present. Preparing to serve HTTPS for address: %s:%d%s", s.cfg.Common.HostName, s.cfg.Common.Port, s.cfg.BasePath)
err = s.server.ListenAndServeTLS("", "")
} else {
anzlog.Infof(s.ctx, "no TLS configuration present. Preparing to serve HTTP for address: %s:%d%s", s.cfg.Common.HostName, s.cfg.Common.Port, s.cfg.BasePath)
err = s.server.ListenAndServe()
}
if err != http.ErrServerClosed {
return err
}
return nil
}
func (s httpServer) GracefulStop() error {
// If the underlying HTTP server does not have timeouts set to sufficiently small values,
// and there are still some laggardly requests being processed, we may wait for an
// unreasonably long time to stop gracefully. To avoid that, set a limit on the
// maximum amount of time we're willing to wait. If we time out, give up and just do
// a hard stop.
var timeout time.Duration
if s.gracefulStopTimeout != 0 {
timeout = s.gracefulStopTimeout
} else {
timeout = defaultGracefulStopTimeout
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
err := s.server.Shutdown(ctx)
if err == context.DeadlineExceeded {
anzlog.Infof(s.ctx, "warning: GracefulStop timed out for HTTP server, hard-stopping HTTP server")
return s.server.Close()
}
return err
}
func (s httpServer) Stop() error {
return s.server.Close()
}
func (s httpServer) GetName() string {
return s.name
}
func prepareServerListener(ctx context.Context, rootRouter http.Handler, tlsConfig *tls.Config, httpConfig config.CommonHTTPServerConfig, name string) StoppableServer
|
func makeNewServer(ctx context.Context, router http.Handler, tlsConfig *tls.Config, serverConfig config.CommonHTTPServerConfig, serverLogger *log.Logger) *http.Server {
listenAddr := fmt.Sprintf("%s:%d", serverConfig.Common.HostName, serverConfig.Common.Port)
return &http.Server{
Addr: listenAddr,
Handler: router,
TLSConfig: tlsConfig,
ReadTimeout: serverConfig.ReadTimeout,
WriteTimeout: serverConfig.WriteTimeout,
ReadHeaderTimeout: 10 * time.Second,
IdleTimeout: 5 * time.Second,
MaxHeaderBytes: http.DefaultMaxHeaderBytes,
ErrorLog: serverLogger,
BaseContext: func(net.Listener) context.Context { return ctx },
}
}
func configureRouters(basePath string, mWare []func(handler http.Handler) http.Handler) (rootRouter, router *chi.Mux) {
if basePath == "" {
basePath = "/"
}
rootRouter = chi.NewRouter()
rootRouter.Use(mWare...)
router = rootRouter.Route(basePath, nil).(*chi.Mux)
return rootRouter, router
}
// SelectBasePath chooses between a specified base path and a dynmaically chosen one.
func SelectBasePath(fromSpec, dynamic string) string {
switch fromSpec {
case "": // fromSpec not specified
switch dynamic {
case "": // dynamic not specified
return "/"
default:
return dynamic
}
default: // fromSpec specified
switch dynamic {
case "": // dynamic not specified
return fromSpec
default:
return dynamic
}
}
}
|
{
re := regexp.MustCompile(`TLS handshake error from .* EOF`) // Avoid spurious TLS errors from load balancer
writer := &TLSLogFilter{anzlog.GetLogger(ctx), re}
serverLogger := log.New(writer, "HTTPServer ", log.LstdFlags|log.Llongfile)
server := makeNewServer(ctx, rootRouter, tlsConfig, httpConfig, serverLogger)
anzlog.Infof(ctx, "configured listener for address: %s:%d%s", httpConfig.Common.HostName, httpConfig.Common.Port, httpConfig.BasePath)
return httpServer{
ctx: ctx,
cfg: httpConfig,
server: server,
name: name,
}
}
|
relations_request_builder.go
|
package relations
import (
ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9 "github.com/microsoft/kiota/abstractions/go"
i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b "github.com/microsoftgraph/msgraph-sdk-go/models/microsoft/graph/odataerrors"
id62b8df0892707d421d6e0a5aefa589248c11f95794bf4122483a0ef812fad7d "github.com/microsoftgraph/msgraph-sdk-go/models/microsoft/graph/termstore"
i4fe8289e3b4f6886a7f69dbbf5fa801f35f28bb42b783e77ad1f69234bb8efff "github.com/microsoftgraph/msgraph-sdk-go/groups/item/sites/item/termstore/groups/item/sets/item/relations/count"
)
// RelationsRequestBuilder provides operations to manage the relations property of the microsoft.graph.termStore.set entity.
type RelationsRequestBuilder struct {
// Path parameters for the request
pathParameters map[string]string;
// The request adapter to use to execute the requests.
requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter;
// Url template to use to build the URL for the current request builder
urlTemplate string;
}
// RelationsRequestBuilderGetOptions options for Get
type RelationsRequestBuilderGetOptions struct {
// Request headers
H map[string]string;
// Request options
O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption;
// Request query parameters
Q *RelationsRequestBuilderGetQueryParameters;
// Response handler to use in place of the default response handling provided by the core service
ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler;
}
// RelationsRequestBuilderGetQueryParameters indicates which terms have been pinned or reused directly under the set.
type RelationsRequestBuilderGetQueryParameters struct {
// Include count of items
Count *bool;
// Expand related entities
Expand []string;
// Filter items by property values
Filter *string;
// Order items by property values
Orderby []string;
// Search items by search phrases
Search *string;
// Select properties to be returned
Select []string;
// Skip the first n items
Skip *int32;
// Show only the first n items
Top *int32;
}
// RelationsRequestBuilderPostOptions options for Post
type RelationsRequestBuilderPostOptions struct {
//
Body id62b8df0892707d421d6e0a5aefa589248c11f95794bf4122483a0ef812fad7d.Relationable;
// Request headers
H map[string]string;
// Request options
O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption;
// Response handler to use in place of the default response handling provided by the core service
ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler;
}
// NewRelationsRequestBuilderInternal instantiates a new RelationsRequestBuilder and sets the default values.
func NewRelationsRequestBuilderInternal(pathParameters map[string]string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*RelationsRequestBuilder) {
m := &RelationsRequestBuilder{
}
m.urlTemplate = "{+baseurl}/groups/{group_id}/sites/{site_id}/termStore/groups/{group_id1}/sets/{set_id}/relations{?top,skip,search,filter,count,orderby,select,expand}";
urlTplParams := make(map[string]string)
for idx, item := range pathParameters {
urlTplParams[idx] = item
}
m.pathParameters = urlTplParams;
m.requestAdapter = requestAdapter;
return m
}
// NewRelationsRequestBuilder instantiates a new RelationsRequestBuilder and sets the default values.
func NewRelationsRequestBuilder(rawUrl string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*RelationsRequestBuilder) {
urlParams := make(map[string]string)
urlParams["request-raw-url"] = rawUrl
return NewRelationsRequestBuilderInternal(urlParams, requestAdapter)
}
func (m *RelationsRequestBuilder) Count()(*i4fe8289e3b4f6886a7f69dbbf5fa801f35f28bb42b783e77ad1f69234bb8efff.CountRequestBuilder) {
return i4fe8289e3b4f6886a7f69dbbf5fa801f35f28bb42b783e77ad1f69234bb8efff.NewCountRequestBuilderInternal(m.pathParameters, m.requestAdapter);
}
// CreateGetRequestInformation indicates which terms have been pinned or reused directly under the set.
func (m *RelationsRequestBuilder) CreateGetRequestInformation(options *RelationsRequestBuilderGetOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) {
requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.GET
|
if options != nil && options.Q != nil {
requestInfo.AddQueryParameters(*(options.Q))
}
if options != nil && options.H != nil {
requestInfo.Headers = options.H
}
if options != nil && len(options.O) != 0 {
err := requestInfo.AddRequestOptions(options.O...)
if err != nil {
return nil, err
}
}
return requestInfo, nil
}
// CreatePostRequestInformation create new navigation property to relations for groups
func (m *RelationsRequestBuilder) CreatePostRequestInformation(options *RelationsRequestBuilderPostOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) {
requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.POST
requestInfo.SetContentFromParsable(m.requestAdapter, "application/json", options.Body)
if options != nil && options.H != nil {
requestInfo.Headers = options.H
}
if options != nil && len(options.O) != 0 {
err := requestInfo.AddRequestOptions(options.O...)
if err != nil {
return nil, err
}
}
return requestInfo, nil
}
// Get indicates which terms have been pinned or reused directly under the set.
func (m *RelationsRequestBuilder) Get(options *RelationsRequestBuilderGetOptions)(id62b8df0892707d421d6e0a5aefa589248c11f95794bf4122483a0ef812fad7d.RelationCollectionResponseable, error) {
requestInfo, err := m.CreateGetRequestInformation(options);
if err != nil {
return nil, err
}
errorMapping := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ErrorMappings {
"4XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue,
"5XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue,
}
res, err := m.requestAdapter.SendAsync(requestInfo, id62b8df0892707d421d6e0a5aefa589248c11f95794bf4122483a0ef812fad7d.CreateRelationCollectionResponseFromDiscriminatorValue, nil, errorMapping)
if err != nil {
return nil, err
}
return res.(id62b8df0892707d421d6e0a5aefa589248c11f95794bf4122483a0ef812fad7d.RelationCollectionResponseable), nil
}
// Post create new navigation property to relations for groups
func (m *RelationsRequestBuilder) Post(options *RelationsRequestBuilderPostOptions)(id62b8df0892707d421d6e0a5aefa589248c11f95794bf4122483a0ef812fad7d.Relationable, error) {
requestInfo, err := m.CreatePostRequestInformation(options);
if err != nil {
return nil, err
}
errorMapping := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ErrorMappings {
"4XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue,
"5XX": i7df4e557a1198b9abe14a17b40c7ac7db49b0d3050c749c3169541cb6f012b8b.CreateODataErrorFromDiscriminatorValue,
}
res, err := m.requestAdapter.SendAsync(requestInfo, id62b8df0892707d421d6e0a5aefa589248c11f95794bf4122483a0ef812fad7d.CreateRelationFromDiscriminatorValue, nil, errorMapping)
if err != nil {
return nil, err
}
return res.(id62b8df0892707d421d6e0a5aefa589248c11f95794bf4122483a0ef812fad7d.Relationable), nil
}
| |
server.py
|
import json
from shadowray.config.v2ray import SERVER_FILE
from shadowray.config.v2ray import SERVER_KEY_FROM_SUBSCRIBE, SERVER_KEY_FROM_ORIGINAL
class Server:
def __init__(self, filename=None):
self.__servers = json.loads('{"servers_subscribe": [] ,"servers_original": []}')
self.__filename = SERVER_FILE
if filename is not None:
f = open(filename, 'r')
self.__servers = json.load(f)
f.close()
self.__filename = filename
def save(self, filename=None):
if filename is None:
filename = self.__filename
f = open(filename, 'w')
f.write(json.dumps(self.__servers))
f.close()
def add(self, protocol, config, ps, key, host):
self.__servers[key].append({
"protocol": protocol,
"config": config,
"ps": ps,
"host": host
})
def get(self, index):
if self.__servers is None:
return None
return self.__servers[index]
def get_servers(self):
return self.__servers
@property
def original_servers_number(self):
return len(self.__servers[SERVER_KEY_FROM_ORIGINAL])
@property
def subscribe_servers_number(self):
return len(self.__servers[SERVER_KEY_FROM_SUBSCRIBE])
@property
def servers_number(self):
return self.subscribe_servers_number + self.original_servers_number
def get_server(self, index):
if index >= self.servers_number:
print("Index out of range.")
return None
if index < self.original_servers_number:
return self.__servers[SERVER_KEY_FROM_ORIGINAL][index]
else:
return self.__servers[SERVER_KEY_FROM_SUBSCRIBE][index - self.original_servers_number]
def get_config(self, index):
if index >= self.servers_number:
print("Index out of range.")
return None
if index < self.original_servers_number:
return self.__servers[SERVER_KEY_FROM_ORIGINAL][index]['config']
else:
return self.__servers[SERVER_KEY_FROM_SUBSCRIBE][index - self.original_servers_number]['config']
|
def clear(self, key):
self.__servers[key].clear()
| |
medicion.js
|
//--------------------------------------------------------------------------
// Proyecto Icasus <https://gestionproyectos.us.es/projects/r2h2-icasus/>
// Archivo: public/js/medicion.js
// Desarrolladores: Juanan Ruiz ([email protected]), Jesus Martin Corredera ([email protected]),
// Joaquín Valonero Zaera ([email protected])
//--------------------------------------------------------------------------
// Funciones de la plantilla medicion.tpl
// -------------------------------------------------------
//Estimación del indicador/dato
var inverso = $("#med_datos").data("inverso");
var limite = $("#limite").data("limite");
var meta = $("#meta").data("meta");
//Variables: Valor mínimo y máximo permitido
var valor_min = $("#valors").data("valor_min");
var valor_max = $("#valors").data("valor_max");
//Fechas de inicio y fin de grabación
var grabacion_inicio = $("#gi").data("grabacion_inicio");
var grabacion_fin = $("#gf").data("grabacion_fin");
actualizaGrafica();
function actualizaGrafica() {
//Panel de tarta
$("#container").each(function () {
var contenedor = $(this).attr('id');
var nomIndicador = $(this).data("nombre_indicador");
var id_indicador = $(this).data("id_indicador");
var id_medicion = $(this).data("id_medicion");
//Variables para guardar el nombre y total de la medición solicitada
var medicion, total = 0;
var urlApi = "api_publica.php?metodo=get_valores_con_timestamp&id=" + id_indicador;
// contenedor para los datos del gráfico
var chartSerie = new HighchartSerie();
$.ajax({
url: urlApi,
type: "GET",
dataType: "json",
success: onDataReceived
});
function onDataReceived(datos) {
//Buscamos la medición para luego obtener su total
while (!medicion) {
datos.forEach(function (dato) {
if (dato.id_medicion == id_medicion) {
medicion = dato.medicion;
}
});
}
datos.forEach(function (dato) {
if (dato.etiqueta_mini && dato.id_medicion == id_medicion) {
chartSerie.add(dato);
}
//Guardamos el total
if (medicion == dato.medicion && dato.id_unidad == 0) {
total = parseFloat(dato.valor);
}
});
//Redondeamos el total
total = Highcharts.numberFormat(total, 2);
//Pide las series de datos a chartSerie
var dataseries = chartSerie.getPieSerie();
//Gráfico de tarta
pintaGrafico({
chart: {
renderTo: contenedor,
events: {}
},
credits: {
enabled: false
},
title: {
text: nomIndicador,
style: {"fontSize": "14px"}
},
subtitle: {
text: 'Medición: ' + medicion + ' Total: ' + total
},
exporting: {
filename: nomIndicador
},
xAxis: {
type: 'category'
},
yAxis: {
title: {
text: 'Valores'
},
labels: {
format: '{value:,.2f}'
}
},
plotOptions: {
series: {
dataLabels: {
enabled: true,
format: '{y:,.2f} ({percentage:,.2f} %)'
}
}
},
tooltip: {
pointFormat: '<span style="color:{point.color}">\u25CF</span> {series.name}: <b>{point.y:,.2f} ({point.percentage:,.2f} %)</b><br/>'
},
series: dataseries
});
}
});
}
//Edición de la medición
function fila_editar(medicion, id_valor)
{
$('#valors').load("index.php?page=medicion_ajax&modulo=editarfila&ajax=true&id_medicion=" + medicion + "&id_valor=" + id_valor);
}
function fila_grabar(id_valor, medicion)
{
var value = $('[name=v_' + id_valor + ']').val();
var fecha_actual = new Date();
var fecha_inicio = new Date(grabacion_inicio);
var fecha_fin = new Date(grabacion_fin);
value = value.replace(',', '.');
$('#intervalo').text('[' + valor_min + ', ' + valor_max + '].');
$('#periodo').text('(' + fecha_inicio.toLocaleDateString() + ' al ' + fecha_fin.toLocaleDateString() + ').');
//Comprobamos que estemos dentro del periodo de grabación
if ((fecha_actual >= fecha_inicio) && (fecha_actual <= fecha_fin)) {
if (value !== '')
{
if (isNaN(value) === false)
{
//Si hay un intervalo [min,max]
if ($.isNumeric(valor_min) && $.isNumeric(valor_max)) {
if (value < valor_min || value > valor_max) {
$('#dialogo_valor_intervalo').modal('show');
}
else {
$.ajax({
type: "POST",
url: "index.php?page=medicion_ajax&modulo=grabarfila&ajax=true",
data: {"id_valor": id_valor, "valor": value},
success: function (response) {
$('#valors').load("index.php?page=medicion_ajax&modulo=cancelarfila&ajax=true&id_medicion=" + medicion);
$('#grafica').load("index.php?page=medicion_ajax&modulo=grafica&ajax=true&id_medicion=" + medicion);
}
});
}
}
//Si hay un valor mínimo
else if ($.isNumeric(valor_min) && !$.isNumeric(valor_max)) {
if (value < valor_min) {
$('#dialogo_valor_intervalo').modal('show');
}
else {
$.ajax({
type: "POST",
url: "index.php?page=medicion_ajax&modulo=grabarfila&ajax=true",
data: {"id_valor": id_valor, "valor": value},
success: function (response) {
$('#valors').load("index.php?page=medicion_ajax&modulo=cancelarfila&ajax=true&id_medicion=" + medicion);
$('#grafica').load("index.php?page=medicion_ajax&modulo=grafica&ajax=true&id_medicion=" + medicion);
}
});
}
}
//Si hay un valor máximo
else if ($.isNumeric(valor_max) && !$.isNumeric(valor_min)) {
if (value > valor_max) {
$('#dialogo_valor_intervalo').modal('show');
}
else {
$.ajax({
type: "POST",
url: "index.php?page=medicion_ajax&modulo=grabarfila&ajax=true",
data: {"id_valor": id_valor, "valor": value},
success: function (response) {
$('#valors').load("index.php?page=medicion_ajax&modulo=cancelarfila&ajax=true&id_medicion=" + medicion);
$('#grafica').load("index.php?page=medicion_ajax&modulo=grafica&ajax=true&id_medicion=" + medicion);
}
});
}
}
//Si no hay definida ninguna restricción en cuanto a los valores
else {
$.ajax({
type: "POST",
url: "index.php?page=medicion_ajax&modulo=grabarfila&ajax=true",
data: {"id_valor": id_valor, "valor": value},
success: function (response) {
$('#valors').load("index.php?page=medicion_ajax&modulo=cancelarfila&ajax=true&id_medicion=" + medicion);
$('#grafica').load("index.php?page=medicion_ajax&modulo=grafica&ajax=true&id_medicion=" + medicion);
}
});
}
}
else if (value === "---")
{
$.post("index.php?page=medicion_ajax&modulo=anularvalor&ajax=true", {id_valor: id_valor}, function () {
$('#valors').load("index.php?page=medicion_ajax&modulo=cancelarfila&ajax=true&id_medicion=" + medicion);
$('#grafica').load("index.php?page=medicion_ajax&modulo=grafica&ajax=true&id_medicion=" + medicion);
});
}
else
{
$('#dialogo_valor_num').modal('show');
}
}
else
{
$('#dialogo_valor_nulo').modal('show');
}
}
else {
$('#dialogo_valor_periodo').modal('show');
}
}
function fila_cancelar(medicion)
{
$('#valors').load("index.php?page=medicion_ajax&modulo=cancelarfila&ajax=true&id_medicion=" + medicion);
}
function etiqueta_editar(medicion, content)
{
$('#' + content).load("index.php?page=medicion_ajax&modulo=editaretiqueta&ajax=true&id_medicion=" + medicion + "&contenedor=" + content);
}
function observaciones_editar(medicion, content)
{
$('#' + content).load("index.php?page=medicion_ajax&modulo=editarobservaciones&ajax=true&id_medicion=" + medicion + "&contenedor=" + content);
}
function etiqueta_editar_grabar(content, medicion, tag)
{
var value = $("[name=" + tag + "]").val();
if (value === '') {
$('#dialogo_etiqueta_nula').modal('show');
}
else {
$.post("index.php?page=medicion_ajax&modulo=grabaretiqueta&ajax=true", {id_medicion: medicion, contenedor: content, valor: value}, function () {
// $('#' + content).load("index.php?page=medicion_ajax&modulo=cancelaretiqueta&ajax=true&id_medicion=" + medicion + "&contenedor=" + content);
location.reload();
});
}
}
function observaciones_editar_grabar(content, medicion, tag)
{
var value = $("[name=" + tag + "]").val();
$.post("index.php?page=medicion_ajax&modulo=grabarobservaciones&ajax=true", {id_medicion: medicion, contenedor: content, valor: value}, function () {
$('#' + content).load("index.php?page=medicion_ajax&modulo=cancelarobservaciones&ajax=true&id_medicion=" + medicion + "&contenedor=" + content);
});
}
function etiqueta_editar_cancelar(content, medicion)
{
$('#' + content).load("index.php?page=medicion_ajax&modulo=cancelaretiqueta&ajax=true&id_medicion=" + medicion + "&contenedor=" + content);
}
function observaciones_ed
|
on)
{
$('#' + content).load("index.php?page=medicion_ajax&modulo=cancelarobservaciones&ajax=true&id_medicion=" + medicion + "&contenedor=" + content);
}
function fecha_editar(medicion, content)
{
$('#' + content).load("index.php?page=medicion_ajax&modulo=editaretiqueta&ajax=true&id_medicion=" + medicion + "&contenedor=" + content);
}
function fecha_grabar(medicion, content)
{
var dia = $("[name=" + content + "Day]").val();
var mes = $("[name=" + content + "Month]").val();
var year = $("[name=" + content + "Year]").val();
var value = year + "-" + mes + "-" + dia;
$.post("index.php?page=medicion_ajax&modulo=grabaretiqueta&ajax=true", {id_medicion: medicion, contenedor: content, valor: value}, function () {
$('#' + content).load("index.php?page=medicion_ajax&modulo=cancelaretiqueta&ajax=true&id_medicion=" + medicion + "&contenedor=" + content);
//Si cambiamos periodos de grabación actualizamos valores globales
if (content === 'gi') {
grabacion_inicio = value;
}
if (content === 'gf') {
grabacion_fin = value;
}
});
}
function fecha_cancelar(content, medicion)
{
$('#' + content).load("index.php?page=medicion_ajax&modulo=cancelaretiqueta&ajax=true&id_medicion=" + medicion + "&contenedor=" + content);
}
function referencia_editar(id, medicion)
{
$('#referencia_' + id).load("index.php?page=medicion_ajax&modulo=editarvalorreferencia&ajax=true&id_referencia=" + id + "&id_medicion=" + medicion);
}
function referencia_grabar(id, medicion, nombre_ref)
{
var value = $("[name=input_referencia_" + id + "]").val();
value = value.replace(',', '.');
$('#intervalo').text('[' + valor_min + ', ' + valor_max + '].');
if (value !== '')
{
if (isNaN(value) === false)
{
//Validamos y grabamos
if (validar_referencia(nombre_ref, value))
{
//Si hay un intervalo [min,max]
if ($.isNumeric(valor_min) && $.isNumeric(valor_max)) {
if (value < valor_min || value > valor_max) {
$('#dialogo_valor_intervalo').modal('show');
}
else {
$.post("index.php?page=medicion_ajax&modulo=grabarvalorreferencia&ajax=true", {id_referencia: id, valor: value}, function () {
//Si es limite o meta actualizamos
actualizar_estimacion(nombre_ref, value);
$('#referencia_' + id).load("index.php?page=medicion_ajax&modulo=cancelarvalorreferencia&ajax=true&id=" + id + "&id_medicion=" + medicion);
$('#valors').load("index.php?page=medicion_ajax&modulo=cancelarfila&ajax=true&id_medicion=" + medicion);
});
}
}
// Si no hay intervalo [min, max]
else {
$.post("index.php?page=medicion_ajax&modulo=grabarvalorreferencia&ajax=true", {id_referencia: id, valor: value}, function () {
//Si es limite o meta actualizamos
actualizar_estimacion(nombre_ref, value);
$('#referencia_' + id).load("index.php?page=medicion_ajax&modulo=cancelarvalorreferencia&ajax=true&id=" + id + "&id_medicion=" + medicion);
$('#valors').load("index.php?page=medicion_ajax&modulo=cancelarfila&ajax=true&id_medicion=" + medicion);
});
}
}
else {
$('#dialogo_valor_referencia').modal('show');
}
}
else if (value === "---")
{
$.post("index.php?page=medicion_ajax&modulo=anularvalorreferencia&ajax=true", {id_referencia: id}, function () {
//Si es limite o meta actualizamos
actualizar_estimacion(nombre_ref, null);
$('#referencia_' + id).load("index.php?page=medicion_ajax&modulo=cancelarvalorreferencia&ajax=true&id=" + id + "&id_medicion=" + medicion);
$('#valors').load("index.php?page=medicion_ajax&modulo=cancelarfila&ajax=true&id_medicion=" + medicion);
});
}
else
{
$('#dialogo_valor_num').modal('show');
}
}
else
{
$('#dialogo_valor_nulo').modal('show');
}
}
//Valida que el valor de una referencia sea correcto si es una meta o límite
function validar_referencia(nombre_ref, value) {
var validado = true;
//El indicador/dato tiene estimación descendente
if (inverso) {
//Estamos grabando el límite
if (nombre_ref.indexOf('mite') !== -1) {
if (value <= meta && meta !== null) {
validado = false;
}
}
//Estamos grabando la meta
if (nombre_ref.indexOf('eta') !== -1) {
if (value >= limite && limite !== null) {
validado = false;
}
}
}
//El indicador/dato tiene estimación ascendente
else {
//Estamos grabando el límite
if (nombre_ref.indexOf('mite') !== -1) {
if (value >= meta && meta !== null) {
validado = false;
}
}
//Estamos grabando la meta
if (nombre_ref.indexOf('eta') !== -1) {
if (value <= limite && limite !== null) {
validado = false;
}
}
}
return validado;
}
//Actualiza los valores globales de la meta y el límite después de ser grabados
function actualizar_estimacion(nombre_ref, value) {
//Hemos grabado el límite
if (nombre_ref.indexOf('mite') !== -1) {
limite = value;
}
//Hemos grabado la meta
if (nombre_ref.indexOf('eta') !== -1) {
meta = value;
}
}
function referencia_cancelar(id, medicion)
{
$('#referencia_' + id).load("index.php?page=medicion_ajax&modulo=cancelarvalorreferencia&ajax=true&id=" + id + "&id_medicion=" + medicion);
}
//Función que pinta nuestra gráfica
function pintaGrafico(chartOptions) {
$(document).ready(function () {
// Añadimos evento al hacer click en el gráfico
chartOptions.chart.events.click = function () {
hs.htmlExpand(document.getElementById(chartOptions.chart.renderTo), {
width: 9999,
height: 9999,
allowWidthReduction: true
}, {
chartOptions: chartOptions
});
};
var chart = new Highcharts.Chart(chartOptions);
});
}
// Crea un nuevo gráfico con un popup de Highslide
var i = 0; //Contador de popups
hs.zIndexCounter = 2000; //z-index del popup
hs.Expander.prototype.onAfterExpand = function () {
if (this.custom.chartOptions) {
var chartOptions = this.custom.chartOptions;
chartOptions.chart.height = 600;
chartOptions.chart.renderTo = $('.highslide-body')[i];
chartOptions.chart.events.click = function () {
};
var hsChart = new Highcharts.Chart(chartOptions);
i++;
}
};
//Actualización del gráfico
$('#tab_med_datos').click(function () {
actualizaGrafica();
});
//Tablas de valores
tablas_valores = $('#tabla_valores').DataTable({
"bPaginate": false,
"bSort": false,
fixedHeader: true,
dom: "<'row'<'col-sm-12'tr>>"
});
//Reajustamos las cabeceras de las datatables al cambiar de pestaña
$('a[data-toggle="tab"]').on('shown.bs.tab', function (e) {
tablas_valores.fixedHeader.adjust();
});
//Reajustamos las cabeceras de las datatables al hacer scroll
$('.table-responsive').on('scroll', function () {
tablas_valores.fixedHeader.adjust();
});
//Validaciones
$('#page-wrapper').on('keyup', '.actualizar_dato', function () {
var actualizar_dato = $(this);
var valor = $(this).val();
valor = valor.replace(',', '.');
if ($.isNumeric(valor) || valor === '---') {
actualizar_dato.css("border-color", "green");
}
else {
actualizar_dato.css("border-color", "red");
}
});
$('#page-wrapper').on('keyup', '.actualizar_etiqueta', function () {
var actualizar_etiqueta = $(this);
var valor = $(this).val();
if (valor.length === 0) {
actualizar_etiqueta.css("border-color", "red");
}
else {
actualizar_etiqueta.css("border-color", "green");
}
});
|
itar_cancelar(content, medici
|
flash_f405_f411_f412_f413.rs
|
#![allow(non_snake_case, non_upper_case_globals)]
#![allow(non_camel_case_types)]
//! FLASH
//!
//! Used by: stm32f405, stm32f411, stm32f412, stm32f413
#[cfg(not(feature = "nosync"))]
pub use crate::stm32f4::peripherals::flash_v2::Instance;
pub use crate::stm32f4::peripherals::flash_v2::{RegisterBlock, ResetValues};
pub use crate::stm32f4::peripherals::flash_v2::{ACR, CR, KEYR, OPTCR, OPTKEYR, SR};
/// Access functions for the FLASH peripheral instance
pub mod FLASH {
use super::ResetValues;
#[cfg(not(feature = "nosync"))]
use super::Instance;
#[cfg(not(feature = "nosync"))]
const INSTANCE: Instance = Instance {
addr: 0x40023c00,
_marker: ::core::marker::PhantomData,
};
/// Reset values for each field in FLASH
pub const reset: ResetValues = ResetValues {
ACR: 0x00000000,
KEYR: 0x00000000,
OPTKEYR: 0x00000000,
SR: 0x00000000,
CR: 0x80000000,
OPTCR: 0x00000014,
};
#[cfg(not(feature = "nosync"))]
#[allow(renamed_and_removed_lints)]
#[allow(private_no_mangle_statics)]
#[no_mangle]
static mut FLASH_TAKEN: bool = false;
/// Safe access to FLASH
///
/// This function returns `Some(Instance)` if this instance is not
/// currently taken, and `None` if it is. This ensures that if you
/// do get `Some(Instance)`, you are ensured unique access to
/// the peripheral and there cannot be data races (unless other
/// code uses `unsafe`, of course). You can then pass the
/// `Instance` around to other functions as required. When you're
/// done with it, you can call `release(instance)` to return it.
///
/// `Instance` itself dereferences to a `RegisterBlock`, which
/// provides access to the peripheral's registers.
#[cfg(not(feature = "nosync"))]
#[inline]
pub fn take() -> Option<Instance> {
external_cortex_m::interrupt::free(|_| unsafe {
if FLASH_TAKEN {
None
} else {
FLASH_TAKEN = true;
Some(INSTANCE)
}
})
}
/// Release exclusive access to FLASH
///
/// This function allows you to return an `Instance` so that it
/// is available to `take()` again. This function will panic if
/// you return a different `Instance` or if this instance is not
/// already taken.
#[cfg(not(feature = "nosync"))]
#[inline]
pub fn
|
(inst: Instance) {
external_cortex_m::interrupt::free(|_| unsafe {
if FLASH_TAKEN && inst.addr == INSTANCE.addr {
FLASH_TAKEN = false;
} else {
panic!("Released a peripheral which was not taken");
}
});
}
/// Unsafely steal FLASH
///
/// This function is similar to take() but forcibly takes the
/// Instance, marking it as taken irregardless of its previous
/// state.
#[cfg(not(feature = "nosync"))]
#[inline]
pub unsafe fn steal() -> Instance {
FLASH_TAKEN = true;
INSTANCE
}
}
/// Raw pointer to FLASH
///
/// Dereferencing this is unsafe because you are not ensured unique
/// access to the peripheral, so you may encounter data races with
/// other users of this peripheral. It is up to you to ensure you
/// will not cause data races.
///
/// This constant is provided for ease of use in unsafe code: you can
/// simply call for example `write_reg!(gpio, GPIOA, ODR, 1);`.
pub const FLASH: *const RegisterBlock = 0x40023c00 as *const _;
|
release
|
functions.py
|
from math import sqrt
# function with int parameter
def my_function(a: str):
print(a)
my_function(3)
# function with type annotation
def my_function2(a: str) -> str:
return a
print(my_function2(3))
# import sqrt from math and use it
print(sqrt(9.4323))
# import alias from math
# from math import sqrt as square_root
# function with list parameter
def my_function3(a: list):
for i in a:
print(i)
my_function3([1, 2, 3, 4, 5])
# function with dictionary parameter
def my_function4(a: dict):
|
my_function4({'a': 1, 'b': 2, 'c': 3})
# function with tuple parameter
def my_function5(a: tuple):
for i in a:
print(i)
my_function5(('a', 'b', 'c', 'd'))
# function with set parameter
def my_function6(a: set):
for i in a:
print(i)
my_function6({'a', 'b', 'c', 'd'})
# function with function parameter
def my_function7(a: callable):
a()
# make an http request async
async def my_function8(a: callable):
a()
# my_function8(lambda: print('hello'))
|
for key, value in a.items():
print(key, value)
|
nsd.go
|
package nsd
import (
"bufio"
"bytes"
"fmt"
"net"
"os/exec"
"strconv"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/config"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
)
type runner func(cmdName string, timeout config.Duration, useSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error)
// NSD is used to store configuration values
type NSD struct {
Binary string
Timeout config.Duration
UseSudo bool
Server string
ConfigFile string
run runner
}
var defaultBinary = "/usr/sbin/nsd-control"
var defaultTimeout = config.Duration(time.Second)
var sampleConfig = `
## Address of server to connect to, optionally ':port'. Defaults to the
## address in the nsd config file.
server = "127.0.0.1:8953"
## If running as a restricted user you can prepend sudo for additional access:
# use_sudo = false
## The default location of the nsd-control binary can be overridden with:
# binary = "/usr/sbin/nsd-control"
## The default location of the nsd config file can be overridden with:
# config_file = "/etc/nsd/nsd.conf"
## The default timeout of 1s can be overridden with:
# timeout = "1s"
`
// Description displays what this plugin is about
func (s *NSD) Description() string {
return "A plugin to collect stats from the NSD authoritative DNS name server"
}
// SampleConfig displays configuration instructions
func (s *NSD) SampleConfig() string {
return sampleConfig
}
// Shell out to nsd_stat and return the output
func nsdRunner(cmdName string, timeout config.Duration, useSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error) {
cmdArgs := []string{"stats_noreset"}
if Server != "" {
host, port, err := net.SplitHostPort(Server)
if err == nil {
Server = host + "@" + port
}
cmdArgs = append([]string{"-s", Server}, cmdArgs...)
}
if ConfigFile != "" {
cmdArgs = append([]string{"-c", ConfigFile}, cmdArgs...)
}
cmd := exec.Command(cmdName, cmdArgs...)
if useSudo {
cmdArgs = append([]string{cmdName}, cmdArgs...)
cmd = exec.Command("sudo", cmdArgs...)
}
var out bytes.Buffer
cmd.Stdout = &out
err := internal.RunTimeout(cmd, time.Duration(timeout))
if err != nil {
return &out, fmt.Errorf("error running nsd-control: %s (%s %v)", err, cmdName, cmdArgs)
}
return &out, nil
}
// Gather collects stats from nsd-control and adds them to the Accumulator
func (s *NSD) Gather(acc telegraf.Accumulator) error {
out, err := s.run(s.Binary, s.Timeout, s.UseSudo, s.Server, s.ConfigFile)
if err != nil {
return fmt.Errorf("error gathering metrics: %s", err)
}
// Process values
fields := make(map[string]interface{})
fieldsServers := make(map[string]map[string]interface{})
scanner := bufio.NewScanner(out)
for scanner.Scan() {
cols := strings.Split(scanner.Text(), "=")
// Check split correctness
if len(cols) != 2
|
stat := cols[0]
value := cols[1]
fieldValue, err := strconv.ParseFloat(value, 64)
if err != nil {
acc.AddError(fmt.Errorf("Expected a numerical value for %s = %v",
stat, value))
continue
}
if strings.HasPrefix(stat, "server") {
statTokens := strings.Split(stat, ".")
if len(statTokens) > 1 {
serverID := strings.TrimPrefix(statTokens[0], "server")
if _, err := strconv.Atoi(serverID); err == nil {
serverTokens := statTokens[1:]
field := strings.Join(serverTokens[:], "_")
if fieldsServers[serverID] == nil {
fieldsServers[serverID] = make(map[string]interface{})
}
fieldsServers[serverID][field] = fieldValue
}
}
} else {
field := strings.Replace(stat, ".", "_", -1)
fields[field] = fieldValue
}
}
acc.AddFields("nsd", fields, nil)
for thisServerID, thisServerFields := range fieldsServers {
thisServerTag := map[string]string{"server": thisServerID}
acc.AddFields("nsd_servers", thisServerFields, thisServerTag)
}
return nil
}
func init() {
inputs.Add("nsd", func() telegraf.Input {
return &NSD{
run: nsdRunner,
Binary: defaultBinary,
Timeout: defaultTimeout,
UseSudo: false,
Server: "",
ConfigFile: "",
}
})
}
|
{
continue
}
|
client.go
|
package ssl
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"github.com/project-flogo/core/data/coerce"
"github.com/project-flogo/core/support/log"
)
const ConfigSchema = `
{
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"caFile": {
"type": "string"
},
"certFile": {
"type": "string"
},
"keyFile": {
"type": "string"
},
"skipVerify": {
"type": "boolean"
},
"useSystemCert": {
"type": "boolean"
}
}
}`
type Config struct {
CAFile string `json:"caFile"`
CertFile string `json:"certFile"`
KeyFile string `json:"keyFile"`
SkipVerify bool `json:"skipVerify"`
UseSystemCert bool `json:"useSystemCert"`
}
func (i *Config) ToMap() map[string]interface{} {
return map[string]interface{}{
"caFile": i.CAFile,
"certFile": i.CertFile,
"keyFile": i.KeyFile,
"skipVerify": i.SkipVerify,
"useSystemCert": i.UseSystemCert,
}
}
func (i *Config) FromMap(values map[string]interface{}) error {
var err error
i.CAFile, err = coerce.ToString(values["caFile"])
if err != nil {
return err
}
i.CertFile, err = coerce.ToString(values["certFile"])
if err != nil {
return err
}
i.KeyFile, err = coerce.ToString(values["keyFile"])
if err != nil {
return err
}
i.SkipVerify, err = coerce.ToBool(values["skipVerify"])
if err != nil {
return err
}
i.UseSystemCert, err = coerce.ToBool(values["useSystemCert"])
if err != nil {
return err
}
return nil
}
func NewClientTLSConfig(config *Config) (*tls.Config, error) {
tlsConfig := &tls.Config{
|
InsecureSkipVerify: config.SkipVerify,
}
var caCertPool *x509.CertPool
if config.UseSystemCert {
caCertPool, _ = x509.SystemCertPool()
if caCertPool == nil {
log.RootLogger().Warnf("unable to get system cert pool, using empty pool")
}
}
if caCertPool == nil {
caCertPool = x509.NewCertPool()
}
if config.CAFile != "" {
caCert, err := ioutil.ReadFile(config.CAFile)
if err != nil {
return nil, fmt.Errorf("unable to read CAfile '%s' : %v", config.CAFile, err)
}
caCertPool.AppendCertsFromPEM(caCert)
}
tlsConfig.RootCAs = caCertPool
if config.CertFile != "" && config.KeyFile != "" {
cert, err := tls.LoadX509KeyPair(config.CertFile, config.CertFile)
if err != nil {
return nil, err
}
tlsConfig.Certificates = []tls.Certificate{cert}
}
return tlsConfig, nil
}
|
//MinVersion: tls.VersionTLS12,
|
material-module.ts
|
import { NgModule } from '@angular/core';
import { ScrollingModule } from '@angular/cdk/scrolling';
import { MatButtonModule } from '@angular/material/button';
import { MatCardModule } from '@angular/material/card';
import { MatIconModule } from '@angular/material/icon';
import { MatRippleModule } from '@angular/material/core';
import { MatToolbarModule } from '@angular/material/toolbar';
import { NoopAnimationsModule } from '@angular/platform-browser/animations';
|
MatButtonModule,
MatIconModule,
MatRippleModule,
NoopAnimationsModule,
MatCardModule,
MatToolbarModule,
ScrollingModule
]
})
export class MaterialModule {}
|
@NgModule({
exports: [
|
Commute.js
|
import React, { Component } from 'react';
import moment from 'moment';
import throttle from 'lodash.throttle';
import AddressPicker from './AddressPicker';
import Chart from './Chart';
import D3Wrapper from './D3Wrapper';
import DatePicker from './DatePicker';
import DirectionsLoader from './DirectionsLoader';
const intervalMinutes = 30;
function getDuration(response) {
return response.routes[0].legs[0].duration_in_traffic.value;
}
export default class
|
extends Component {
constructor(props) {
super(props);
let defaultDate = moment();
const endOfDay = defaultDate.clone().set({
hour: 20,
minute: 0
});
if (defaultDate.isAfter(endOfDay)) {
defaultDate.add('days', 1);
}
this.state = {
enableToday: moment().isBefore(endOfDay),
homeAddress: localStorage.homeAddress,
workAddress: localStorage.workAddress,
currentDate: defaultDate,
beginDate: new Date(),
endDate: new Date(),
directionsResults: {}
};
this.directionsLoader = new DirectionsLoader(new this.props.GoogleMaps.DirectionsService);
this.directionsLoader.setAddresses(this.state.homeAddress, this.state.workAddress);
}
componentDidMount() {
if (!this.props.debugData) {
this.loadForDate(this.state.currentDate);
}
}
componentDidUpdate(prevProps, prevState) {
if (this.props.debugData) return;
if (!prevState.currentDate.isSame(this.state.currentDate) ||
prevState.homeAddress !== this.state.homeAddress ||
prevState.workAddress !== this.state.workAddress) {
this.directionsLoader.clear();
this.setState({ directionsResults: {} });
this.directionsLoader.setAddresses(this.state.homeAddress, this.state.workAddress);
this.loadForDate(this.state.currentDate);
localStorage.setItem('homeAddress', this.state.homeAddress || '');
localStorage.setItem('workAddress', this.state.workAddress || '');
}
}
loadForDate(date) {
// queue loading all data for the given date. ensures we don't load anything before the current
// time, which is not allowed by the google maps api for obvious reasons
const now = moment();
const remainder = intervalMinutes - now.minute() % intervalMinutes;
now.add(remainder, 'minutes');
let beginOfDay = date.clone().set({
hour: 6,
minute: 0
});
const endOfDay = date.clone().set({
hour: 20,
minute: 1
});
let middleOfDay = date.clone().set({
hour: 13,
minute: 0
});
if(now.isBefore(middleOfDay)) {
beginOfDay = moment.max(now, beginOfDay);
this.setState({ beginDate: beginOfDay.clone() });
this.load(beginOfDay, middleOfDay, true);
}
middleOfDay.add(intervalMinutes, 'minutes');
middleOfDay = moment.max(now, middleOfDay);
if(!now.isBefore(middleOfDay)) {
beginOfDay = middleOfDay.clone();
this.setState({ beginDate: beginOfDay });
}
if (middleOfDay.isBefore(endOfDay)) {
this.load(middleOfDay, endOfDay, false);
}
this.setState({ endDate: endOfDay });
}
load(startDate, endDate, navigateToWork) {
for (var date = startDate; !date.isAfter(endDate); date.add(intervalMinutes, 'minutes')) {
this.directionsLoader.loadRouteAtDate(date.toDate(), navigateToWork, this.loaded.bind(this));
}
}
loaded(date, trafficModel, response, status) {
if (status === 'OK') {
const model = this.state.directionsResults[date] || {};
model[trafficModel] = response;
console.log(`Duration for ${date} for ${trafficModel} is ${response.routes[0].legs[0].duration_in_traffic.text}`);
this.setState({directionsResults: {...this.state.directionsResults, [date]: model}});
} else {
console.log(`loading error: ${status}!`);
}
}
getData() {
if (this.props.debugData) return this.props.debugData;
const data = [];
for (const date in this.state.directionsResults) {
const raw = this.state.directionsResults[date];
if (raw.bestguess && raw.pessimistic && raw.optimistic) {
const datum = {
date: date
};
datum.bestguess = getDuration(raw.bestguess);
datum.pessimistic = getDuration(raw.pessimistic);
datum.optimistic = getDuration(raw.optimistic);
data.push(datum);
}
}
return data;
}
render() {
const data = this.getData();
const beginDate = this.props.debugData ? new Date(this.props.debugData[0].date) : this.state.beginDate;
const endDate = this.props.debugData ? new Date(this.props.debugData[this.props.debugData.length - 1].date) : this.state.endDate;
const setDate = date => this.setState({ currentDate: date });
const setHomeAddress = address => this.setState({ homeAddress: address });
const setWorkAddress = address => this.setState({ workAddress: address });
return (
<div>
<div className='row mx-2 mt-2'>
<form className='form form-inline'>
<DatePicker enableToday={this.state.enableToday} setDate={setDate} defaultDate={this.state.currentDate} />
<AddressPicker
name='Home'
defaultAddress={this.state.homeAddress}
setAddress={setHomeAddress} />
<AddressPicker
name='Work'
defaultAddress={this.state.workAddress}
setAddress={setWorkAddress} />
</form>
</div>
<div className='row'>
<D3Wrapper
chart={Chart}
height={600}
beginDate={beginDate}
endDate={endDate}
data={data} />
</div>
</div>
);
}
};
|
Commute
|
line_info.rs
|
use std::ffi::OsStr;
use std::path::{Component, Path};
use crate::prelude::*;
use rustc_span::{
FileName, Pos, SourceFile, SourceFileAndLine, SourceFileHash, SourceFileHashAlgorithm,
};
use cranelift_codegen::binemit::CodeOffset;
use cranelift_codegen::machinst::MachSrcLoc;
use gimli::write::{
Address, AttributeValue, FileId, FileInfo, LineProgram, LineString, LineStringTable,
UnitEntryId,
};
// OPTIMIZATION: It is cheaper to do this in one pass than using `.parent()` and `.file_name()`.
fn split_path_dir_and_file(path: &Path) -> (&Path, &OsStr) {
let mut iter = path.components();
let file_name = match iter.next_back() {
Some(Component::Normal(p)) => p,
component => {
panic!(
"Path component {:?} of path {} is an invalid filename",
component,
path.display()
);
}
};
let parent = iter.as_path();
(parent, file_name)
}
// OPTIMIZATION: Avoid UTF-8 validation on UNIX.
fn osstr_as_utf8_bytes(path: &OsStr) -> &[u8] {
#[cfg(unix)]
{
use std::os::unix::ffi::OsStrExt;
return path.as_bytes();
}
#[cfg(not(unix))]
{
return path.to_str().unwrap().as_bytes();
}
}
pub(crate) const MD5_LEN: usize = 16;
pub fn make_file_info(hash: SourceFileHash) -> Option<FileInfo> {
if hash.kind == SourceFileHashAlgorithm::Md5 {
let mut buf = [0u8; MD5_LEN];
buf.copy_from_slice(hash.hash_bytes());
Some(FileInfo {
timestamp: 0,
size: 0,
md5: buf,
})
} else {
None
}
}
fn line_program_add_file(
line_program: &mut LineProgram,
line_strings: &mut LineStringTable,
file: &SourceFile,
) -> FileId {
match &file.name {
FileName::Real(path) => {
let (dir_path, file_name) = split_path_dir_and_file(path.stable_name());
let dir_name = osstr_as_utf8_bytes(dir_path.as_os_str());
let file_name = osstr_as_utf8_bytes(file_name);
let dir_id = if !dir_name.is_empty() {
let dir_name = LineString::new(dir_name, line_program.encoding(), line_strings);
line_program.add_directory(dir_name)
} else {
line_program.default_directory()
};
let file_name = LineString::new(file_name, line_program.encoding(), line_strings);
let info = make_file_info(file.src_hash);
line_program.file_has_md5 &= info.is_some();
line_program.add_file(file_name, dir_id, info)
}
// FIXME give more appropriate file names
filename => {
let dir_id = line_program.default_directory();
let dummy_file_name = LineString::new(
filename.to_string().into_bytes(),
line_program.encoding(),
line_strings,
);
line_program.add_file(dummy_file_name, dir_id, None)
}
}
}
impl<'tcx> DebugContext<'tcx> {
pub(super) fn emit_location(&mut self, entry_id: UnitEntryId, span: Span) {
let loc = self.tcx.sess.source_map().lookup_char_pos(span.lo());
let file_id = line_program_add_file(
&mut self.dwarf.unit.line_program,
&mut self.dwarf.line_strings,
&loc.file,
);
let entry = self.dwarf.unit.get_mut(entry_id);
entry.set(
gimli::DW_AT_decl_file,
AttributeValue::FileIndex(Some(file_id)),
);
entry.set(
gimli::DW_AT_decl_line,
AttributeValue::Udata(loc.line as u64),
);
// FIXME: probably omit this
entry.set(
gimli::DW_AT_decl_column,
AttributeValue::Udata(loc.col.to_usize() as u64),
);
}
pub(super) fn create_debug_lines(
&mut self,
isa: &dyn cranelift_codegen::isa::TargetIsa,
symbol: usize,
entry_id: UnitEntryId,
context: &Context,
function_span: Span,
source_info_set: &indexmap::IndexSet<SourceInfo>,
) -> CodeOffset {
let tcx = self.tcx;
let line_program = &mut self.dwarf.unit.line_program;
let func = &context.func;
let line_strings = &mut self.dwarf.line_strings;
let mut last_span = None;
let mut last_file = None;
let mut create_row_for_span = |line_program: &mut LineProgram, span: Span| {
if let Some(last_span) = last_span {
if span == last_span {
line_program.generate_row();
return;
}
}
last_span = Some(span);
// Based on https://github.com/rust-lang/rust/blob/e369d87b015a84653343032833d65d0545fd3f26/src/librustc_codegen_ssa/mir/mod.rs#L116-L131
// In order to have a good line stepping behavior in debugger, we overwrite debug
// locations of macro expansions with that of the outermost expansion site
// (unless the crate is being compiled with `-Z debug-macros`).
|
span
} else {
// Walk up the macro expansion chain until we reach a non-expanded span.
// We also stop at the function body level because no line stepping can occur
// at the level above that.
rustc_span::hygiene::walk_chain(span, function_span.ctxt())
};
let (file, line, col) = match tcx.sess.source_map().lookup_line(span.lo()) {
Ok(SourceFileAndLine { sf: file, line }) => {
let line_pos = file.line_begin_pos(span.lo());
(
file,
u64::try_from(line).unwrap() + 1,
u64::from((span.lo() - line_pos).to_u32()) + 1,
)
}
Err(file) => (file, 0, 0),
};
// line_program_add_file is very slow.
// Optimize for the common case of the current file not being changed.
let current_file_changed = if let Some(last_file) = &last_file {
// If the allocations are not equal, then the files may still be equal, but that
// is not a problem, as this is just an optimization.
!rustc_data_structures::sync::Lrc::ptr_eq(last_file, &file)
} else {
true
};
if current_file_changed {
let file_id = line_program_add_file(line_program, line_strings, &file);
line_program.row().file = file_id;
last_file = Some(file.clone());
}
line_program.row().line = line;
line_program.row().column = col;
line_program.generate_row();
};
line_program.begin_sequence(Some(Address::Symbol { symbol, addend: 0 }));
let mut func_end = 0;
if let Some(ref mcr) = &context.mach_compile_result {
for &MachSrcLoc { start, end, loc } in mcr.buffer.get_srclocs_sorted() {
line_program.row().address_offset = u64::from(start);
if !loc.is_default() {
let source_info = *source_info_set.get_index(loc.bits() as usize).unwrap();
create_row_for_span(line_program, source_info.span);
} else {
create_row_for_span(line_program, function_span);
}
func_end = end;
}
line_program.end_sequence(u64::from(func_end));
func_end = mcr.buffer.total_size();
} else {
let encinfo = isa.encoding_info();
let mut blocks = func.layout.blocks().collect::<Vec<_>>();
blocks.sort_by_key(|block| func.offsets[*block]); // Ensure inst offsets always increase
for block in blocks {
for (offset, inst, size) in func.inst_offsets(block, &encinfo) {
let srcloc = func.srclocs[inst];
line_program.row().address_offset = u64::from(offset);
if !srcloc.is_default() {
let source_info =
*source_info_set.get_index(srcloc.bits() as usize).unwrap();
create_row_for_span(line_program, source_info.span);
} else {
create_row_for_span(line_program, function_span);
}
func_end = offset + size;
}
}
line_program.end_sequence(u64::from(func_end));
}
assert_ne!(func_end, 0);
let entry = self.dwarf.unit.get_mut(entry_id);
entry.set(
gimli::DW_AT_low_pc,
AttributeValue::Address(Address::Symbol { symbol, addend: 0 }),
);
entry.set(
gimli::DW_AT_high_pc,
AttributeValue::Udata(u64::from(func_end)),
);
self.emit_location(entry_id, function_span);
func_end
}
}
|
let span = if !span.from_expansion() || tcx.sess.opts.debugging_opts.debug_macros {
|
stats_client.go
|
package collector
import (
"strconv"
"github.com/prometheus/client_golang/prometheus"
)
type clientStats []struct {
val func(*client) float64
vec *prometheus.GaugeVec
}
// ClientStats creates a new stats collector which is able to
// expose the client metrics of a nsqd node to Prometheus. The
// client metrics are reported per topic and per channel.
//
// If there are too many clients, it could cause a timeout of the
// Prometheus collection process. So be sure the number of clients
// is small enough when using this collector.
func ClientStats(namespace string) StatsCollector
|
func (cs clientStats) collect(s *stats, out chan<- prometheus.Metric) {
for _, topic := range s.Topics {
for _, channel := range topic.Channels {
for _, client := range channel.Clients {
labels := prometheus.Labels{
"type": "client",
"topic": topic.Name,
"channel": channel.Name,
"deflate": strconv.FormatBool(client.Deflate),
"snappy": strconv.FormatBool(client.Snappy),
"tls": strconv.FormatBool(client.TLS),
"client_id": client.ID,
"hostname": client.Hostname,
"version": client.Version,
"remote_address": client.RemoteAddress,
}
for _, c := range cs {
c.vec.With(labels).Set(c.val(client))
c.vec.Collect(out)
}
}
}
}
}
|
{
labels := []string{"type", "topic", "channel", "deflate", "snappy", "tls", "client_id", "hostname", "version", "remote_address"}
return clientStats{
{
// TODO: Give state a descriptive name instead of a number.
val: func(c *client) float64 { return float64(c.State) },
vec: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "state",
Help: "State of client",
}, labels),
},
{
val: func(c *client) float64 { return float64(c.FinishCount) },
vec: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "finish_count",
Help: "Finish count",
}, labels),
},
{
val: func(c *client) float64 { return float64(c.MessageCount) },
vec: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "message_count",
Help: "Queue message count",
}, labels),
},
{
val: func(c *client) float64 { return float64(c.ReadyCount) },
vec: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "ready_count",
Help: "Ready count",
}, labels),
},
{
val: func(c *client) float64 { return float64(c.InFlightCount) },
vec: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "in_flight_count",
Help: "In flight count",
}, labels),
},
{
val: func(c *client) float64 { return float64(c.RequeueCount) },
vec: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "requeue_count",
Help: "Requeue count",
}, labels),
},
{
val: func(c *client) float64 { return float64(c.ConnectTime) },
vec: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "connect_ts",
Help: "Connect timestamp",
}, labels),
},
{
val: func(c *client) float64 { return float64(c.SampleRate) },
vec: prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Name: "sample_rate",
Help: "Sample Rate",
}, labels),
},
}
}
|
main.go
|
/*
* WARNING! All changes made in this file will be lost!
* Created from 'scheme.tl' by 'mtprotoc'
*
* Copyright (c) 2021-present, Teamgram Studio (https://teamgram.io).
* All rights reserved.
*
* Author: teamgramio ([email protected])
*/
package main
import (
"github.com/teamgram/marmota/pkg/commands"
"github.com/teamgram/teamgram-server/app/service/biz/chat/internal/server"
)
func
|
() {
commands.Run(server.New())
}
|
main
|
StatusWidget.js
|
(function ($) {
AjaxSolr.StatusWidget = AjaxSolr.AbstractWidget.extend({
init: function(){
var self = this;
var statusID = getURLParameter("status");
if (statusID!=null && statusID!="null" && jQuery.trim(statusID)!=""){
var statusRequester=jQuery.getJSON(self.server_URL+"/"+statusID);
statusRequester.done(function(response){
self.loadStatus(response);
});
statusRequester.error(function(response){
alert("ERROR: We could'nt find the configuration state that you are requesting. ("+statusID+")");
});
}
},
loadStatus:function(json){
var self = this;
self.status=json;
coreURL=self.status.generalSettings.core;
private_key=self.status.generalSettings.key;
Manager.solrUrl=server+"/"+coreURL+"/";
reloadModel();
if (typeof self.reinit != "undefined")
for (var i=0;i<self.reinit.length;i++){
Manager.widgets[self.reinit[i]].init();
}
for (var widg in self.status.widgets){
var widget = Manager.widgets[widg];
if ( typeof widget.uploadStatus !="undefined"){
widget.uploadStatus(self.status.widgets[widg]);
|
previousStatus:"",
previousID:"",
setSize: function(size){
var self=this;
var s=size.split("x");
self.iframe_w=s[0];
self.iframe_h=s[1];
},
iframe_w:640,
iframe_h:480,
_fillElements:function(elements,url){
var self = this;
for (var i=0;i<elements.length;i++){
if (elements[i][0].localName=="textarea")
elements[i].html ("<iframe width=\""+self.iframe_w+"\" height=\""+self.iframe_h+"\" src=\""+url+"&embedded=true\" frameborder=\"0\" allowfullscreen></iframe>");
if (elements[i][0].localName=="input")
elements[i].attr ("value",url);
}
},
fillElementsWithCodeToEmbed: function(elements){
var self = this;
var json=self.getStatus();
if (json==self.previousStatus)
self._fillElements(elements,window.location.href.toString().split("?")[0]+"?status="+self.previousID);
else{
var form= "<form id=\"tmpstatusform233\" method=\"POST\" name=\"comment_form\" controller=\"comment\" action=\""+self.uploader_URL+"save_settings\">";
form += " <textarea name=\"json_settings\">";
form += json;
form += " </textarea><input type=\"submit\" value=\"Submit\"></form>";
$("body").append(form);
$('#tmpstatusform233').ajaxForm( {
success: function(responseText) {
//alert(responseText);
// textarea.html ("<iframe width=\"640\" height=\"480\" src=\""+window.location.href.toString().split("?")[0]+"?status="+responseText+"&embedded=true\" frameborder=\"0\" allowfullscreen></iframe>");
self._fillElements(elements,window.location.href.toString().split("?")[0]+"?status="+responseText);
},
error: function() {
alert("Sorry, We couldn't save the full status of PINV, the link and code to embed only include the proteins queried. ");
self._fillElements(elements,Manager.widgets["requester"].getURL());
// textarea.html ("<iframe width=\"640\" height=\"480\" src=\""+Manager.widgets["requester"].getURL()+"&embedded=true\" frameborder=\"0\" allowfullscreen></iframe>");
},
complete: function() {
$('#tmpstatusform233').remove();
}
});
$('#tmpstatusform233').submit();
}
},
getStatus:function(){
var self = this;
var widgetsStatus={};
for (var widg in Manager.widgets){
var widget = Manager.widgets[widg];
if ( typeof widget.status2JSON !="undefined"){
var st=widget.status2JSON();
if (st!=STATUS.NO_APPLICABLE)
widgetsStatus[widg]=st;
}
}
self.status={"generalSettings":{"core":coreURL},"widgets":widgetsStatus};
if ( typeof private_key != "undefined" && private_key != null && private_key != "null")
self.status.generalSettings["key"]=private_key;
return JSON.stringify(self.status);
},
status2JSON:function(){
return STATUS.NO_APPLICABLE;
},
uploadStatus:function(json){
return STATUS.NO_APPLICABLE;
}
});
})(jQuery);
|
}
}
},
|
log-to-file.d.ts
|
declare module 'log-to-file';
|
||
client.go
|
package client
import (
"bytes"
"context"
"fmt"
"io"
"os/exec"
"strings"
"time"
"github.com/mgoltzsche/k8spkg/pkg/resource"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
defaultTimeout = time.Duration(2 * time.Minute)
)
type K8sClient interface {
Apply(ctx context.Context, namespace string, resources resource.K8sResourceList, prune bool, labels []string) (resource.K8sResourceList, error)
Delete(ctx context.Context, namespace string, resources resource.K8sResourceRefList) (err error)
GetResource(ctx context.Context, kind string, namespace string, name string) (*resource.K8sResource, error)
Get(ctx context.Context, kinds []string, namespace string, labels []string) <-chan resource.ResourceEvent
//WatchResource(ctx context.Context, kind, namespace string, name string) <-chan WatchEvent
Watch(ctx context.Context, kind, namespace string, labels []string, watchOnly bool) <-chan resource.ResourceEvent
AwaitDeletion(ctx context.Context, namespace string, resources resource.K8sResourceRefList) (err error)
ResourceTypes(ctx context.Context) (types []*APIResourceType, err error)
ContainerLogs(ctx context.Context, namespace, podName, containerName string, previous, follow bool, writer io.Writer) (err error)
}
type notFoundError struct {
error
}
func IsNotFound(err error) bool {
_, ok := err.(notFoundError)
return ok
}
// APIResourceType represents a Kubernetes API resource type's metadata
type APIResourceType struct {
Name string
ShortNames []string
APIGroup string
Kind string
Namespaced bool
}
// Returns the type's short name if any or its name
func (t *APIResourceType) ShortName() (name string) {
name = t.Name
if len(t.ShortNames) > 0 {
name = t.ShortNames[0]
}
return
}
// Returns the type's short name with APIGroup suffix if there is one
func (t *APIResourceType) FullName() (name string) {
if t.APIGroup == "" {
return t.ShortName()
}
return t.ShortName() + "." + t.APIGroup
}
type k8sClient struct {
kubeconfigFile string
}
type WatchEvent struct {
Resource *resource.K8sResource
Error error
}
func NewK8sClient(kubeconfigFile string) K8sClient {
return &k8sClient{kubeconfigFile}
}
func (c *k8sClient) Apply(ctx context.Context, namespace string, resources resource.K8sResourceList, prune bool, labelSelector []string) (l resource.K8sResourceList, err error) {
args := []string{"apply", "--wait", "-f", "-", "--record", "--timeout=" + getTimeout(ctx)}
if len(labelSelector) > 0 {
args = append(args, "-l", strings.Join(labelSelector, ","))
}
if prune {
// TODO: delete objects within other namespaces that belong to the package as well
args = append(args, "--prune")
}
if namespace != "" {
args = append(args, "-n", namespace)
}
for evt := range c.kubectlEmit(ctx, resources.YamlReader(), args) {
if evt.Error == nil {
l = append(l, evt.Resource)
} else {
err = evt.Error
}
}
return
}
func (c *k8sClient) Delete(ctx context.Context, namespace string, resources resource.K8sResourceRefList) (err error) {
for _, grp := range resources.GroupByNamespace() {
args := []string{"delete", "--wait", "--cascade", "--ignore-not-found", "--timeout=" + getTimeout(ctx)}
args = append(args, grp.Resources.Names()...)
if grp.Key == "" {
grp.Key = namespace
}
if grp.Key != "" {
args = append(args, "-n", grp.Key)
|
if e := kubectl(ctx, nil, nil, c.kubeconfigFile, args); e != nil && err == nil {
err = e
}
}
return
}
func (c *k8sClient) AwaitDeletion(ctx context.Context, namespace string, resources resource.K8sResourceRefList) error {
for _, grp := range resources.GroupByNamespace() {
args := []string{"wait", "--for", "delete", "--timeout=" + getTimeout(ctx)}
args = append(args, grp.Resources.Names()...)
if grp.Key == "" {
grp.Key = namespace
}
if grp.Key != "" {
args = append(args, "-n", grp.Key)
}
if err := kubectl(ctx, nil, nil, c.kubeconfigFile, args); err != nil {
if kerr, ok := errors.Cause(err).(*kubectlError); ok {
var unexpectedLines []string
for _, line := range kerr.stderr {
if !strings.HasPrefix(line, "Error from server (NotFound): ") {
unexpectedLines = append(unexpectedLines, line)
}
}
if len(unexpectedLines) > 0 {
return &kubectlError{kerr.error, unexpectedLines}
}
} else {
return err
}
}
}
return ctx.Err()
}
func (c *k8sClient) GetResource(ctx context.Context, kind string, namespace string, name string) (r *resource.K8sResource, err error) {
args := []string{"--ignore-not-found", strings.ToLower(kind), name}
for evt := range c.kubectlEmit(ctx, nil, getArgs(namespace, args...)) {
if evt.Error != nil && err == nil {
err = evt.Error
continue
}
r = evt.Resource
}
if r == nil && err == nil {
err = notFoundError{errors.Errorf("resource %s:%s/%s not found", namespace, kind, name)}
}
return
}
func (c *k8sClient) Get(ctx context.Context, kinds []string, namespace string, labels []string) <-chan resource.ResourceEvent {
args := []string{strings.ToLower(strings.Join(kinds, ","))}
if len(labels) > 0 {
args = append(args, "-l", strings.Join(labels, ","))
}
return c.kubectlEmit(ctx, nil, getArgs(namespace, args...))
}
/*func (c *k8sClient) WatchResource(ctx context.Context, kind, namespace string, name string) <-chan resource.ResourceEvent {
return c.kubectlGet(ctx, namespace, []string{"-w", strings.ToLower(kind), name})
}*/
func (c *k8sClient) Watch(ctx context.Context, kind, namespace string, labels []string, watchOnly bool) <-chan resource.ResourceEvent {
args := []string{"-w", strings.ToLower(kind)}
if len(labels) > 0 {
args = append(args, "-l", strings.Join(labels, ","))
}
if watchOnly {
args = append(args, "--watch-only")
}
return c.kubectlEmit(ctx, nil, getArgs(namespace, args...))
}
func (c *k8sClient) ContainerLogs(ctx context.Context, namespace, podName, containerName string, previous, follow bool, writer io.Writer) (err error) {
args := []string{"logs", podName, "-c", containerName}
if previous {
args = append(args, "--previous")
} else if follow {
args = append(args, "--follow")
}
if namespace != "" {
args = append(args, "-n", namespace)
}
return kubectl(ctx, nil, writer, c.kubeconfigFile, args)
}
func (c *k8sClient) kubectlEmit(ctx context.Context, stdin io.Reader, args []string) <-chan resource.ResourceEvent {
reader, writer := io.Pipe()
done := make(chan error)
ch := make(chan resource.ResourceEvent)
go func() {
var err error
for evt := range resource.FromJsonStream(reader) {
if evt.Error != nil && err == nil {
err = evt.Error
} else {
ch <- evt
}
}
reader.CloseWithError(err)
done <- err
}()
go func() {
args = append(args, "-o", "json")
err := kubectl(ctx, stdin, writer, c.kubeconfigFile, args)
writer.CloseWithError(err)
if e := <-done; e != nil && err == nil {
err = errors.Wrap(e, "get")
}
if err != nil {
ch <- resource.ResourceEvent{nil, err}
}
close(ch)
}()
return ch
}
func kubectl(ctx context.Context, in io.Reader, out io.Writer, kubeconfigFile string, args []string) (err error) {
if kubeconfigFile != "" {
args = append(args, "--kubeconfig", kubeconfigFile)
}
var buf bytes.Buffer
cmd := exec.CommandContext(ctx, "kubectl", args...)
cmd.Stdin = in
cmd.Stdout = out
cmd.Stderr = &buf
logrus.Debugf("Running %+v", cmd.Args)
err = cmd.Run()
if err != nil && ctx.Err() != nil {
return errors.WithStack(ctx.Err())
}
stderr := buf.String()
if err != nil && len(stderr) > 0 {
stderr = strings.TrimSpace(stderr)
err = &kubectlError{errors.WithStack(err), strings.Split(stderr, "\n")}
}
if e := ctx.Err(); e != nil && err == nil {
err = e
} else {
err = errors.Wrapf(err, "%+v", cmd.Args)
}
return
}
type kubectlError struct {
error
stderr []string
}
func (e *kubectlError) Error() string {
return fmt.Sprintf("%s. stderr: %s", e.error, strings.Join(e.stderr, "\n "))
}
func getArgs(namespace string, args ...string) []string {
args = append([]string{"get"}, args...)
if namespace != "" {
args = append(args, "-n", namespace)
}
return args
}
func getTimeout(ctx context.Context) string {
t, ok := ctx.Deadline()
if ok {
return t.Sub(time.Now().Add(time.Second)).String()
}
return defaultTimeout.String()
}
|
}
|
auth.rs
|
//
use openssl::sha::sha256;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use crate::acc::AccountInner;
use crate::acc::AcmeKey;
use crate::api::{ApiAuth, ApiChallenge, ApiEmptyObject, ApiEmptyString};
use crate::jwt::*;
use crate::persist::Persist;
use crate::util::{base64url, read_json};
use crate::Result;
/// An authorization ([ownership proof]) for a domain name.
///
/// Each authorization for an order much be progressed to a valid state before the ACME API
/// will issue a certificate.
///
/// Authorizations may or may not be required depending on previous orders against the same
/// ACME account. The ACME API decides if the authorization is needed.
///
/// Currently there are two ways of providing the authorization.
///
/// * In a text file served using [HTTP] from a web server of the domain being authorized.
/// * A `TXT` [DNS] record under the domain being authorized.
///
/// [ownership proof]: ../index.html#domain-ownership
/// [HTTP]: #method.http_challenge
/// [DNS]: #method.dns_challenge
#[derive(Debug)]
pub struct Auth<P: Persist> {
inner: Arc<AccountInner<P>>,
api_auth: ApiAuth,
auth_url: String,
}
impl<P: Persist> Auth<P> {
pub(crate) fn new(inner: &Arc<AccountInner<P>>, api_auth: ApiAuth, auth_url: &str) -> Self {
Auth {
inner: inner.clone(),
api_auth,
auth_url: auth_url.into(),
}
}
/// Domain name for this authorization.
pub fn domain_name(&self) -> &str {
&self.api_auth.identifier.value
}
/// Whether we actually need to do the authorization. This might not be needed if we have
/// proven ownership of the domain recently in a previous order.
pub fn need_challenge(&self) -> bool {
!self.api_auth.is_status_valid()
}
/// Get the http challenge.
///
/// The http challenge must be placed so it is accessible under:
///
/// ```text
/// http://<domain-to-be-proven>/.well-known/acme-challenge/<token>
/// ```
///
/// The challenge will be accessed over HTTP (not HTTPS), for obvious reasons.
///
/// ```no_run
/// use acme_lib::persist::Persist;
/// use acme_lib::order::Auth;
/// use acme_lib::Error;
/// use std::fs::File;
/// use std::io::Write;
///
/// fn web_authorize<P: Persist>(auth: &Auth<P>) -> Result<(), Error> {
/// let challenge = auth.http_challenge();
/// // Assuming our web server's root is under /var/www
/// let path = {
/// let token = challenge.http_token();
/// format!("/var/www/.well-known/acme-challenge/{}", token)
/// };
/// let mut file = File::create(&path)?;
/// file.write_all(challenge.http_proof().as_bytes())?;
/// challenge.validate(5000)?;
/// Ok(())
/// }
/// ```
pub fn http_challenge(&self) -> Challenge<P, Http> {
self.api_auth
.http_challenge()
.map(|c| Challenge::new(&self.inner, c.clone(), &self.auth_url))
.expect("http-challenge")
}
/// Get the dns challenge.
///
/// The dns challenge is a `TXT` record that must put created under:
///
/// ```text
/// _acme-challenge.<domain-to-be-proven>. TXT <proof>
/// ```
///
/// The <proof> contains the signed token proving this account update it.
///
/// ```no_run
/// use acme_lib::persist::Persist;
/// use acme_lib::order::Auth;
/// use acme_lib::Error;
///
/// fn dns_authorize<P: Persist>(auth: &Auth<P>) -> Result<(), Error> {
/// let challenge = auth.dns_challenge();
/// let record = format!("_acme-challenge.{}.", auth.domain_name());
/// // route_53_set_record(&record, "TXT", challenge.dns_proof());
/// challenge.validate(5000)?;
/// Ok(())
/// }
/// ```
///
/// The dns proof is not the same as the http proof.
pub fn dns_challenge(&self) -> Challenge<P, Dns> {
self.api_auth
.dns_challenge()
.map(|c| Challenge::new(&self.inner, c.clone(), &self.auth_url))
.expect("dns-challenge")
}
/// Get the TLS ALPN challenge.
///
/// The TLS ALPN challenge is a certificate that must be served when a
/// request is made for the ALPN protocol "tls-alpn-01". The certificate
/// must contain a single dNSName SAN containing the domain being
/// validated, as well as an ACME extension containing the SHA256 of the
/// key authorization.
pub fn tls_alpn_challenge(&self) -> Challenge<P, TlsAlpn> {
self.api_auth
.tls_alpn_challenge()
.map(|c| Challenge::new(&self.inner, c.clone(), &self.auth_url))
.expect("tls-alpn-challenge")
}
/// Access the underlying JSON object for debugging. We don't
/// refresh the authorization when the corresponding challenge is validated,
/// so there will be no changes to see here.
pub fn api_auth(&self) -> &ApiAuth {
&self.api_auth
}
}
/// Marker type for http challenges.
#[doc(hidden)]
pub struct Http;
/// Marker type for dns challenges.
#[doc(hidden)]
pub struct Dns;
/// Marker type for tls alpn challenges.
#[doc(hidden)]
pub struct TlsAlpn;
/// A DNS, HTTP, or TLS-ALPN challenge as obtained from the [`Auth`].
///
/// [`Auth`]: struct.Auth.html
pub struct Challenge<P: Persist, A> {
inner: Arc<AccountInner<P>>,
api_challenge: ApiChallenge,
auth_url: String,
_ph: std::marker::PhantomData<A>,
}
impl<P: Persist> Challenge<P, Http> {
/// The `token` is a unique identifier of the challenge. It is the file name in the
/// http challenge like so:
///
/// ```text
/// http://<domain-to-be-proven>/.well-known/acme-challenge/<token>
/// ```
pub fn http_token(&self) -> &str {
&self.api_challenge.token
}
/// The `proof` is some text content that is placed in the file named by `token`.
pub fn http_proof(&self) -> String {
let acme_key = self.inner.transport.acme_key();
key_authorization(&self.api_challenge.token, acme_key, false)
}
}
impl<P: Persist> Challenge<P, Dns> {
/// The `proof` is the `TXT` record placed under:
///
/// ```text
/// _acme-challenge.<domain-to-be-proven>. TXT <proof>
/// ```
pub fn dns_proof(&self) -> String {
let acme_key = self.inner.transport.acme_key();
key_authorization(&self.api_challenge.token, acme_key, true)
}
}
impl<P: Persist> Challenge<P, TlsAlpn> {
/// The `proof` is the contents of the ACME extension to be placed in the
/// certificate used for validation.
pub fn tls_alpn_proof(&self) -> [u8; 32] {
let acme_key = self.inner.transport.acme_key();
sha256(key_authorization(&self.api_challenge.token, acme_key, false).as_bytes())
}
}
impl<P: Persist, A> Challenge<P, A> {
fn new(inner: &Arc<AccountInner<P>>, api_challenge: ApiChallenge, auth_url: &str) -> Self {
Challenge {
inner: inner.clone(),
api_challenge,
auth_url: auth_url.into(),
_ph: std::marker::PhantomData,
}
}
/// Check whether this challlenge really need validation. It might already been
/// done in a previous order for the same account.
pub fn need_validate(&self) -> bool {
self.api_challenge.is_status_pending()
}
/// Tell the ACME API to attempt validating the proof of this challenge.
///
/// The user must first update the DNS record or HTTP web server depending
/// on the type challenge being validated.
pub fn validate(self, delay_millis: u64) -> Result<()> {
let url_chall = &self.api_challenge.url;
let res = self.inner.transport.call(url_chall, &ApiEmptyObject)?;
let _: ApiChallenge = read_json(res)?;
let auth = wait_for_auth_status(&self.inner, &self.auth_url, delay_millis)?;
if !auth.is_status_valid() {
let error = auth
.challenges
.iter()
.filter_map(|c| c.error.as_ref())
.nth(0);
let reason = if let Some(error) = error {
format!(
"Failed: {}",
error.detail.clone().unwrap_or_else(|| error._type.clone())
)
} else {
"Validation failed and no error found".into()
};
return Err(reason.into());
}
Ok(())
}
/// Access the underlying JSON object for debugging.
pub fn api_challenge(&self) -> &ApiChallenge {
&self.api_challenge
}
}
fn key_authorization(token: &str, key: &AcmeKey, extra_sha256: bool) -> String {
let jwk: Jwk = key.into();
let jwk_thumb: JwkThumb = (&jwk).into();
let jwk_json = serde_json::to_string(&jwk_thumb).expect("jwk_thumb");
let digest = base64url(&sha256(jwk_json.as_bytes()));
let key_auth = format!("{}.{}", token, digest);
if extra_sha256 {
base64url(&sha256(key_auth.as_bytes()))
} else {
key_auth
}
}
fn wait_for_auth_status<P: Persist>(
inner: &Arc<AccountInner<P>>,
auth_url: &str,
delay_millis: u64,
) -> Result<ApiAuth> {
let auth = loop {
let res = inner.transport.call(auth_url, &ApiEmptyString)?;
let auth: ApiAuth = read_json(res)?;
if !auth.is_status_pending() {
break auth;
}
thread::sleep(Duration::from_millis(delay_millis));
};
Ok(auth)
}
#[cfg(test)]
mod test {
use crate::persist::*;
use crate::*;
#[test]
fn test_get_challenges() -> Result<()>
|
}
|
{
let server = crate::test::with_directory_server();
let url = DirectoryUrl::Other(&server.dir_url);
let persist = MemoryPersist::new();
let dir = Directory::from_url(persist, url)?;
let acc = dir.account("[email protected]")?;
let ord = acc.new_order("acmetest.example.com", &[])?;
let authz = ord.authorizations()?;
assert!(authz.len() == 1);
let auth = &authz[0];
{
let http = auth.http_challenge();
assert!(http.need_validate());
}
{
let dns = auth.dns_challenge();
assert!(dns.need_validate());
}
Ok(())
}
|
config.go
|
package config
import (
"fmt"
"regexp"
"strings"
"time"
)
const (
OutFormatJSON = "json"
OutFormatLineNumber = "line-number"
OutFormatColoredLineNumber = "colored-line-number"
OutFormatTab = "tab"
OutFormatCheckstyle = "checkstyle"
)
var OutFormats = []string{
OutFormatColoredLineNumber,
OutFormatLineNumber,
OutFormatJSON,
OutFormatTab,
OutFormatCheckstyle,
}
type ExcludePattern struct {
Pattern string
Linter string
Why string
}
var DefaultExcludePatterns = []ExcludePattern{
{
Pattern: "Error return value of .((os\\.)?std(out|err)\\..*|.*Close" +
"|.*Flush|os\\.Remove(All)?|.*printf?|os\\.(Un)?Setenv). is not checked",
Linter: "errcheck",
Why: "Almost all programs ignore errors on these functions and in most cases it's ok",
},
{
Pattern: "(comment on exported (method|function|type|const)|" +
"should have( a package)? comment|comment should be of the form)",
Linter: "golint",
Why: "Annoying issue about not having a comment. The rare codebase has such comments",
},
{
Pattern: "func name will be used as test\\.Test.* by other packages, and that stutters; consider calling this",
Linter: "golint",
Why: "False positive when tests are defined in package 'test'",
},
{
Pattern: "(possible misuse of unsafe.Pointer|should have signature)",
Linter: "govet",
Why: "Common false positives",
},
{
Pattern: "ineffective break statement. Did you mean to break out of the outer loop",
Linter: "megacheck",
Why: "Developers tend to write in C-style with an explicit 'break' in a 'switch', so it's ok to ignore",
},
{
Pattern: "Use of unsafe calls should be audited",
Linter: "gosec",
Why: "Too many false-positives on 'unsafe' usage",
},
{
Pattern: "Subprocess launch(ed with variable|ing should be audited)",
Linter: "gosec",
Why: "Too many false-positives for parametrized shell calls",
},
{
Pattern: "G104",
Linter: "gosec",
Why: "Duplicated errcheck checks",
},
{
Pattern: "(Expect directory permissions to be 0750 or less|Expect file permissions to be 0600 or less)",
Linter: "gosec",
Why: "Too many issues in popular repos",
},
{
Pattern: "Potential file inclusion via variable",
Linter: "gosec",
Why: "False positive is triggered by 'src, err := ioutil.ReadFile(filename)'",
},
}
func GetDefaultExcludePatternsStrings() []string {
var ret []string
for _, p := range DefaultExcludePatterns {
ret = append(ret, p.Pattern)
}
return ret
}
type Run struct {
IsVerbose bool `mapstructure:"verbose"`
Silent bool
CPUProfilePath string
MemProfilePath string
Concurrency int
PrintResourcesUsage bool `mapstructure:"print-resources-usage"`
Config string
NoConfig bool
Args []string
BuildTags []string `mapstructure:"build-tags"`
ExitCodeIfIssuesFound int `mapstructure:"issues-exit-code"`
AnalyzeTests bool `mapstructure:"tests"`
Deadline time.Duration
PrintVersion bool
SkipFiles []string `mapstructure:"skip-files"`
SkipDirs []string `mapstructure:"skip-dirs"`
}
type LintersSettings struct {
Govet struct {
CheckShadowing bool `mapstructure:"check-shadowing"`
}
Golint struct {
MinConfidence float64 `mapstructure:"min-confidence"`
}
Gofmt struct {
Simplify bool
}
Goimports struct {
LocalPrefixes string `mapstructure:"local-prefixes"`
}
Gocyclo struct {
MinComplexity int `mapstructure:"min-complexity"`
}
Varcheck struct {
CheckExportedFields bool `mapstructure:"exported-fields"`
}
Structcheck struct {
CheckExportedFields bool `mapstructure:"exported-fields"`
}
Maligned struct {
SuggestNewOrder bool `mapstructure:"suggest-new"`
}
Dupl struct {
Threshold int
}
Goconst struct {
MinStringLen int `mapstructure:"min-len"`
MinOccurrencesCount int `mapstructure:"min-occurrences"`
}
Depguard struct {
ListType string `mapstructure:"list-type"`
Packages []string
IncludeGoRoot bool `mapstructure:"include-go-root"`
}
Misspell struct {
Locale string
}
Unused struct {
CheckExported bool `mapstructure:"check-exported"`
}
Lll LllSettings
Unparam UnparamSettings
Nakedret NakedretSettings
Prealloc PreallocSettings
Errcheck ErrcheckSettings
}
type ErrcheckSettings struct {
CheckTypeAssertions bool `mapstructure:"check-type-assertions"`
CheckAssignToBlank bool `mapstructure:"check-blank"`
Ignore IgnoreFlag `mapstructure:"ignore"`
Exclude string `mapstructure:"exclude"`
}
type LllSettings struct {
LineLength int `mapstructure:"line-length"`
TabWidth int `mapstructure:"tab-width"`
}
type UnparamSettings struct {
CheckExported bool `mapstructure:"check-exported"`
Algo string
}
type NakedretSettings struct {
MaxFuncLines int `mapstructure:"max-func-lines"`
}
type PreallocSettings struct {
Simple bool
RangeLoops bool `mapstructure:"range-loops"`
ForLoops bool `mapstructure:"for-loops"`
}
var defaultLintersSettings = LintersSettings{
Lll: LllSettings{
LineLength: 120,
TabWidth: 1,
},
Unparam: UnparamSettings{
Algo: "cha",
},
Nakedret: NakedretSettings{
MaxFuncLines: 30,
},
Prealloc: PreallocSettings{
Simple: true,
RangeLoops: true,
ForLoops: false,
},
Errcheck: ErrcheckSettings{
Ignore: IgnoreFlag{},
},
}
type Linters struct {
Enable []string
Disable []string
EnableAll bool `mapstructure:"enable-all"`
DisableAll bool `mapstructure:"disable-all"`
Fast bool
Presets []string
}
type Issues struct {
ExcludePatterns []string `mapstructure:"exclude"`
UseDefaultExcludes bool `mapstructure:"exclude-use-default"`
MaxIssuesPerLinter int `mapstructure:"max-issues-per-linter"`
MaxSameIssues int `mapstructure:"max-same-issues"`
DiffFromRevision string `mapstructure:"new-from-rev"`
DiffPatchFilePath string `mapstructure:"new-from-patch"`
Diff bool `mapstructure:"new"`
}
type Config struct { //nolint:maligned
Run Run
Output struct {
Format string
PrintIssuedLine bool `mapstructure:"print-issued-lines"`
PrintLinterName bool `mapstructure:"print-linter-name"`
PrintWelcomeMessage bool `mapstructure:"print-welcome"`
}
LintersSettings LintersSettings `mapstructure:"linters-settings"`
Linters Linters
Issues Issues
InternalTest bool // Option is used only for testing golangci-lint code, don't use it
}
func NewDefault() *Config
|
// IgnoreFlags was taken from errcheck in order to keep the API identical.
// https://github.com/kisielk/errcheck/blob/1787c4bee836470bf45018cfbc783650db3c6501/main.go#L25-L60
type IgnoreFlag map[string]*regexp.Regexp
func (f IgnoreFlag) String() string {
pairs := make([]string, 0, len(f))
for pkg, re := range f {
prefix := ""
if pkg != "" {
prefix = pkg + ":"
}
pairs = append(pairs, prefix+re.String())
}
return fmt.Sprintf("%q", strings.Join(pairs, ","))
}
func (f IgnoreFlag) Set(s string) error {
if s == "" {
return nil
}
for _, pair := range strings.Split(s, ",") {
colonIndex := strings.Index(pair, ":")
var pkg, re string
if colonIndex == -1 {
pkg = ""
re = pair
} else {
pkg = pair[:colonIndex]
re = pair[colonIndex+1:]
}
regex, err := regexp.Compile(re)
if err != nil {
return err
}
f[pkg] = regex
}
return nil
}
// Type returns the type of the flag follow the pflag format.
func (IgnoreFlag) Type() string {
return "stringToRegexp"
}
|
{
return &Config{
LintersSettings: defaultLintersSettings,
}
}
|
yaHTML5Sort.js
|
'use strict';
if (typeof yaloadonce_cada367e228644d8b17a7162c125d8e2 !== 'undefined')
throw "Reference yaHTML5Sort.js only once."
var yaloadonce_cada367e228644d8b17a7162c125d8e2 = true;
(function () {
var root = {}, instances = [];
function addClass(node, name) {
if (name && (name = name.trim())) {
var classes = (node.getAttribute('class') || '').replace(/[\n\t]/g, ' ').trim();
if ((' ' + classes + ' ').indexOf(' ' + name + ' ') === -1) {
node.setAttribute('class', classes + ' ' + name);
return true;
}
}
return false;
}
function removeClass(node, name) {
if (name && (name = name.trim())) {
var classes = ' ' + (node.getAttribute('class') || '').replace(/[\n\t]/g, ' ').trim() + ' ';
var newclasses = classes.replace(' ' + name + ' ', ' ');
if (classes.length != newclasses.length)
node.setAttribute('class', newclasses.trim());
}
}
function hasClass(node, name) {
if (name && (name = name.trim())) {
var classes = (node.getAttribute('class') || '').replace(/[\n\t]/g, ' ').trim();
return (' ' + classes + ' ').indexOf(' ' + name + ' ') !== -1;
}
return false;
}
function apply(scope) {
scope.$apply();
if (scope !== root.rootscope)
root.rootscope.$apply();
}
function nextElementSibling(item) {
var i = item;
if (i.nextElementSibling) return i.nextElementSibling;
while (i = i.nextSibling)
if (i.nodeType === 1) return i;
return null;
}
function previousElementSibling(item) {
var i = item;
if (i.previousElementSibling) return i.previousElementSibling;
while (i = i.previousSibling)
if (i.nodeType === 1) return i;
return null;
}
function removePlaceholder() {
if (root.placeholder && root.placeholder.parentNode)
root.placeholder.parentNode.removeChild(root.placeholder);
}
function isPlaceholderNeighbor() {
return root.placeholder === root.sourceNode.previousElementSibling
|| root.placeholder === root.sourceNode.nextElementSibling;
}
function
|
() {
return Array.prototype.indexOf.call(root.placeholder.parentNode.children, root.placeholder);
}
function findRepeat(item, upperhalf) {
var search = item;
if (search.nodeType === 8 && search.data.indexOf('ngRepeat:') > 0)
return search;
//try up first if in upper half
if (upperhalf)
while ((search = search.previousSibling) && (search.nodeType !== 8 || search.data.indexOf('ngRepeat:') === -1));
//then search down
if (!upperhalf || search === null) {
search = item;
while ((search = search.nextSibling) && (search.nodeType !== 8 || search.data.indexOf('ngRepeat:') === -1));
}
//try up again if not already tried
if (!upperhalf && search === null) {
search = item;
while ((search = search.previousSibling) && (search.nodeType !== 8 || search.data.indexOf('ngRepeat:') === -1));
}
return search;
}
function init(scope, attrs) {
var instance = {},
options = scope[attrs.yaSort] || {},
match = attrs.ngRepeat.match(/^\s*([\s\S]+?)\s+in\s+([\s\S]+?)(?:\s+\|\s+([\s\S]+?))?(?:\s+as\s+([\s\S]+?))?(?:\s+track\s+by\s+([\s\S]+?))?\s*$/);
instance.entercount = 0;
instance.item = match[1];
instance.items = match[2];
instance.copy = options.oncopy !== undefined;
instance.replace = options.onreplace !== undefined;
instance.candrag = options.candrag || function () { return true; };
instance.onmove = options.onmove || function () { return false; };
instance.oncopy = options.oncopy || function () { return false; };
instance.onreplace = options.onreplace || function () { return false; };
instance.candrop = options.candrop || function () { return true; };
instance.dragHandleClass = options.dragHandleClass || null;
instance.dragSourceItemClass = options.dragSourceItemClass || null;
instance.dropTargetItemClass = options.dropTargetItemClass || null;
instance.dragItemClass = options.dragItemClass || null;
instance.dropPlaceholderClass = options.dropPlaceholderClass || null;
instance.itemArray = scope.$eval(match[2], scope) || scope.$eval(match[2] + '=[]', scope);
instance.disabled = options.disabled || false;
attrs.yaSort = instances.push(instance) - 1;
return instance;
}
//has to run before ng-repeat (priority 1000) so ngRepeat directive can be sniffed and yaSort initialized before ngRepeat has a chance to remove this dom node
//the ya-sort instance is initialized and drag-drop events attached to the node containing (parent element to) the yaSort directive
angular.module('yaHTML5Sort', [])
.directive('yaSort', ['$rootScope', function (rootscope) {
return {
priority: 1001,
restrict: 'A',
link: function (scope, element, attrs) {
var container = element[0].parentNode, instance = init(scope, attrs);
container.setAttribute('ya-instance', attrs.yaSort);
if (instance.disabled) return;
container.addEventListener('dragenter', function (e) {
e.preventDefault();
if (!root.sourceItem) return;
instance.entercount++;
}, false);
container.addEventListener('dragleave', function (e) {
if (!root.sourceItem) return;
if (--instance.entercount === 0)
removePlaceholder();
}, false);
container.addEventListener('dragover', function (e) {
e.preventDefault();
e.stopPropagation();
if (!root.sourceItem) return;
var item = e.target,
iscontainer = item === container,
empty = instance.itemArray.length === 0,
containerhasitems = iscontainer && !empty,
notcompatible = !instance.candrop(root.sourceItem, root.sourceArray, instance.itemArray);
var layer = 0;
if (!iscontainer)
while (item.parentElement !== container) {
item = item.parentElement;
layer++;
}
if (notcompatible || containerhasitems || (instance.replace && e.shiftKey && item === root.sourceNode))
e.dataTransfer.dropEffect = 'none';
else
e.dataTransfer.dropEffect = (e.ctrlKey && root.copy) ? 'copy' : 'move';
if ((e.shiftKey && instance.replace) || notcompatible || containerhasitems) {
removePlaceholder();
instance.entercount = layer + 1;
}
else if (item !== root.placeholder) {
var upperhalf = e.offsetY < item.offsetHeight / 2;
if (iscontainer && empty) item = item.firstChild;
var sortitem = (!item.hasAttribute) ? false : item.hasAttribute('ya-sort');
if (!sortitem && (item = findRepeat(item, upperhalf)) != null)
upperhalf = false;
if (item !== null) {
var notprevious = previousElementSibling(item) !== root.placeholder;
var notnext = nextElementSibling(item) !== root.placeholder;
if (sortitem || (notnext && notprevious)) {
if (upperhalf) {
if (notprevious)
container.insertBefore(root.placeholder, item);
}
else if (notnext)
container.insertBefore(root.placeholder, item.nextSibling);
}
}
}
}, false);
container.addEventListener('drop', function (e) {
e.preventDefault();
e.stopPropagation();
if (!root.sourceItem) return;
instance.entercount = 0;
if (!root.placeholder.parentNode) return;
var index = placeholderIndex();
if (e.ctrlKey && root.copy) {
var copy = JSON.parse(JSON.stringify(root.sourceItem));
if (!instance.oncopy(copy, root.sourceArray, index, instance.itemArray))
instance.itemArray.splice(index, 0, copy);
apply(rootscope);
} else if (!isPlaceholderNeighbor()) {
if (!instance.onmove(root.sourceItem, root.sourceArray, index, instance.itemArray)) {
instance.itemArray.splice(index, 0, JSON.parse(JSON.stringify(root.sourceItem)));
root.sourceArray.splice(root.sourceArray.indexOf(root.sourceItem), 1);
}
apply(rootscope);
}
}, false);
}
};
}])
//runs after ngrepeat for each repeated item, may not run at all if the ngrepeat array is empty
//attaches drag-drop events to the repeated item(s), ng-include runs at 400 priority so that needs to run first too
.directive('yaSort', ['$rootScope', '$timeout', function (rootscope, $timeout) {
return {
priority: 399,
restrict: 'A',
link: function (scope, element, attrs) {
var _element = element[0], hovercount = 0,
instance = instances[_element.parentElement.getAttribute('ya-instance')];
if (instance.disabled) return;
_element.setAttribute('draggable', 'true');
_element.addEventListener('mousedown', function (e) {
root.mouseTarget = e.target;
}, false);
_element.addEventListener('dragenter', function (e) {
e.preventDefault();
if (!root.sourceItem) return;
hovercount++;
}, false);
_element.addEventListener('drop', function (e) {
e.preventDefault();
e.stopPropagation();
if (!root.sourceItem) return;
instance.entercount = hovercount = 0;
if (e.shiftKey && instance.replace) {
if (!instance.candrop(root.sourceItem, root.sourceArray, instance.itemArray))
return;
if (root.sourceItem != scope[instance.item]) {
if (e.ctrlKey && root.copy) {
var copy = JSON.parse(JSON.stringify(root.sourceItem));
if (!instance.oncopy(copy, root.sourceArray, scope.$index, instance.itemArray) &&
!instance.onreplace(copy, root.sourceArray, scope.$index, instance.itemArray))
instance.itemArray[scope.$index] = copy;
} else {
if (!instance.onreplace(root.sourceItem, root.sourceArray, scope.$index, instance.itemArray)) {
instance.itemArray[scope.$index] = root.sourceItem;
root.sourceArray.splice(root.sourceArray.indexOf(root.sourceItem), 1);
}
}
apply(rootscope);
}
} else if (root.placeholder.parentNode) {
var index = placeholderIndex();
if (e.ctrlKey && root.copy) {
removePlaceholder();
var copy = JSON.parse(JSON.stringify(root.sourceItem));
if (!instance.oncopy(copy, root.sourceArray, index, instance.itemArray))
instance.itemArray.splice(index, 0, copy);
apply(rootscope);
} else if (root.sourceNode.nextElementSibling === root.placeholder ||
root.sourceNode.previousElementSibling === root.placeholder) {
removePlaceholder();
} else {
removePlaceholder();
if (!instance.onmove(root.sourceItem, root.sourceArray, index, instance.itemArray)) {
instance.itemArray.splice(index, 0, JSON.parse(JSON.stringify(root.sourceItem)));
root.sourceArray.splice(root.sourceArray.indexOf(root.sourceItem), 1);
}
apply(rootscope);
}
}
}, false);
function findHandle(e) {
var handle = false;
while (!(handle = hasClass(root.mouseTarget, instance.dragHandleClass)) && root.mouseTarget != _element)
root.mouseTarget = root.mouseTarget.parentElement;
root.mouseTarget = null;
if (!handle) e.preventDefault();
return handle;
}
_element.addEventListener('dragstart', function (e) {
if (instance.dragHandleClass && !findHandle(e)) return;
if (!instance.candrag(root.sourceItem, _element.parentNode))
e.preventDefault();
else {
e.dataTransfer.effectAllowed = 'all';
e.dataTransfer.setData('Text', 'firefox');
root.sourceNode = _element;
root.copy = instance.copy;
root.sourceItem = scope.$eval(instance.item, scope);
root.sourceArray = instance.itemArray;
root.rootscope = rootscope;
root.placeholder = _element.cloneNode(false);
root.placeholder.removeAttribute('ya-sort');
addClass(root.placeholder, instance.dropPlaceholderClass);
addClass(root.placeholder, instance.dropTargetItemClass);
removeClass(root.placeholder, instance.dragSourceItemClass);
addClass(_element, instance.dragItemClass);
$timeout(function () {
removeClass(_element, instance.dragItemClass);
addClass(_element, instance.dragSourceItemClass);
}, 0);
}
e.stopPropagation();
}, false);
_element.addEventListener('dragover', function (e) {
if (root.sourceItem && instance.dropTargetItemClass && instance.candrop(root.sourceItem, root.sourceArray, instance.itemArray)) {
if (instance.replace && e.shiftKey && _element !== root.sourceNode)
addClass(_element, instance.dropTargetItemClass);
else
removeClass(_element, instance.dropTargetItemClass);
}
}, false);
_element.addEventListener('dragend', function (e) {
e.preventDefault();
removePlaceholder();
instance.entercount = hovercount = 0;
removeClass(_element, instance.dragSourceItemClass);
}, false);
_element.addEventListener('dragleave', function (e) {
if (!root.sourceItem) return;
if (--hovercount === 0)
removeClass(_element, instance.dropTargetItemClass);
}, false);
}
};
}]);
})();
|
placeholderIndex
|
pathsCache.ts
|
import glob from "fast-glob";
import { pathUtil } from "../utils/pathUtil.js";
import IConfig from "../interfaces/IConfig.js";
export default class
|
{
private static instance: PathsCache;
private paths?: string[];
private postPaths?: string[];
private outputPaths?: string[];
private constructor() {}
private static initialize() {
PathsCache.instance = new PathsCache();
}
static addPath(path: string) {
PathsCache.instance.paths?.push(path);
if (pathUtil.isPost(path)) {
PathsCache.instance.postPaths?.push(path);
}
}
static clearPath(path: string) {
if (
PathsCache.instance.paths &&
PathsCache.instance.paths.indexOf(path) > -1
) {
PathsCache.instance.paths?.splice(
PathsCache.instance.paths?.indexOf(path),
1
);
}
if (
PathsCache.instance.postPaths &&
PathsCache.instance.postPaths.indexOf(path) > -1
) {
PathsCache.instance.postPaths?.splice(
PathsCache.instance.postPaths?.indexOf(path),
1
);
}
}
static async getPaths(config: IConfig) {
if (!PathsCache.instance) {
PathsCache.initialize();
}
if (!PathsCache.instance.paths) {
PathsCache.instance.paths = await glob("**/*.{md,ejs}", {
cwd: `${config.folders.content.path}`,
ignore: [config.folders.output.path, "node_modules"],
});
}
return PathsCache.instance.paths;
}
static async getPostPaths(config: IConfig) {
if (!PathsCache.instance) {
PathsCache.initialize();
}
if (!PathsCache.instance.postPaths) {
PathsCache.instance.postPaths = (
await PathsCache.getPaths(config)
).filter((path) => pathUtil.isPost(path));
}
return PathsCache.instance.postPaths;
}
static clearOutputPath(path: string) {
if (!PathsCache.instance.outputPaths) {
throw new Error("Output paths must be cached before using.");
}
if (
PathsCache.instance.outputPaths &&
PathsCache.instance.outputPaths.indexOf(path) > -1
) {
PathsCache.instance.outputPaths?.splice(
PathsCache.instance.outputPaths?.indexOf(path),
1
);
}
}
static async cacheOutputPaths(config: IConfig) {
if (!PathsCache.instance) {
PathsCache.initialize();
}
const filePatterns = [
`${config.folders.output.path}/**/*.html`,
`${config.folders.output.path}/api/**/*.json`,
`${config.folders.output.path}/*.*`,
];
const rootFiles = await glob("*", {
cwd: `${config.folders.rootFiles.path}`,
});
let compiledScripts: string[] = [];
if (config.typescript.enabled) {
compiledScripts = [
`${config.folders.output.path}/**/*.js`,
`${config.folders.output.path}/**/*.js.map`,
];
}
let compiledStyles: string[] = [];
if (config.sass.enabled) {
compiledStyles = [
`${config.folders.output.path}/**/*.css`,
`${config.folders.output.path}/**/*.css.map`,
];
}
PathsCache.instance.outputPaths = await glob(filePatterns, {
cwd: `${config.folders.site.path}`,
ignore: [
...compiledScripts,
...compiledStyles,
...rootFiles.map((item) => `${config.folders.output.path}/${item}`),
],
});
}
static getOutputPaths() {
if (!PathsCache.instance.outputPaths) {
throw new Error(
"Output paths must be initialized and cached before using."
);
}
return PathsCache.instance.outputPaths;
}
}
|
PathsCache
|
cleanup.go
|
package e2e
import (
"context"
"fmt"
"strconv"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/eks-anywhere/internal/pkg/ec2"
"github.com/aws/eks-anywhere/internal/pkg/s3"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/validations"
)
func CleanUpAwsTestResources(storageBucket string, maxAge string, tag string) error {
session, err := session.NewSession()
if err != nil {
return fmt.Errorf("error creating session: %v", err)
}
logger.V(1).Info("Fetching list of EC2 instances")
key := "Integration-Test"
value := tag
maxAgeFloat, err := strconv.ParseFloat(maxAge, 64)
if err != nil {
return fmt.Errorf("error parsing max age: %v", err)
}
results, err := ec2.ListInstances(session, key, value, maxAgeFloat)
if err != nil {
return fmt.Errorf("error listing EC2 instances: %v", err)
}
logger.V(1).Info("Successfully listed EC2 instances for termination")
if len(results) != 0 {
logger.V(1).Info("Terminating EC2 instances")
err = ec2.TerminateEc2Instances(session, results)
if err != nil {
return fmt.Errorf("error terminating EC2 instacnes: %v", err)
}
logger.V(1).Info("Successfully terminated EC2 instances")
} else {
logger.V(1).Info("No EC2 instances available for termination")
}
logger.V(1).Info("Clean up s3 bucket objects")
err = s3.CleanUpS3Bucket(session, storageBucket, maxAgeFloat)
if err != nil {
return fmt.Errorf("error clean up s3 bucket objects: %v", err)
}
logger.V(1).Info("Successfully cleaned up s3 bucket")
return nil
}
func
|
(ctx context.Context, clusterName string) error {
clusterName, err := validations.ValidateClusterNameArg([]string{clusterName})
if err != nil {
return fmt.Errorf("error validating cluster name: %v", err)
}
err = vsphereRmVms(ctx, clusterName)
if err != nil {
return fmt.Errorf("error removing vcenter vms: %v", err)
}
logger.V(1).Info("Vsphere vcenter vms cleanup complete")
return nil
}
|
CleanUpVsphereTestResources
|
config.go
|
package config
import (
"fmt"
"text/template"
"github.com/spf13/viper"
tb "gopkg.in/telebot.v3"
)
type RunType string
var (
version = "dev"
commit = "none"
date = "unknown"
ProjectName string = "flowerss"
BotToken string
Socks5 string
TelegraphToken []string
TelegraphAccountName string
TelegraphAuthorName string = "rss-bot-core"
TelegraphAuthorURL string
// EnableTelegraph 是否啟用telegraph
EnableTelegraph bool = false
PreviewText int = 0
DisableWebPagePreview bool = false
Mysql MysqlConfig
SQLitePath string
EnableMysql bool = false
// UpdateInterval rss抓取間隔
UpdateInterval int = 10
// ErrorThreshold rss源抓取錯誤閾值
ErrorThreshold uint = 100
// MessageTpl rss更新推送模版
MessageTpl *template.Template
// MessageMode telegram消息渲染模式
MessageMode tb.ParseMode
// TelegramEndpoint telegram bot 伺服器地址,默認為空
TelegramEndpoint string = tb.DefaultApiURL
// UserAgent User-Agent
UserAgent string
// RunMode 運行模式 Release / Debug
RunMode RunType = ReleaseMode
// AllowUsers 允許使用bot的用戶
AllowUsers []int64
// DBLogMode 是否列印資料庫日誌
DBLogMode bool = false
)
const (
logo = `
__ _
/ _| | _____ _____ _ __ ___ ___
| |_| |/ _ \ \ /\ / / _ \ '__/ __/ __|
| _| | (_) \ V V / __/ | \__ \__ \
|_| |_|\___/ \_/\_/ \___|_| |___/___/
`
defaultMessageTplMode = tb.ModeHTML
defaultMessageTpl = `<b>{{.SourceTitle}}</b>{{ if .PreviewText }}
---------- Preview ----------
{{.PreviewText}}
-----------------------------
{{- end}}{{if .EnableTelegraph}}
<a href="{{.TelegraphURL}}">【預覽】</a><a href="{{.RawLink}}">{{.ContentTitle}}</a>
{{- else }}
<a href="{{.RawLink}}">{{.ContentTitle}}</a>
{{- end }}
{{.Tags}}
`
defaultMessageMarkdownTpl = `** {{.SourceTitle}} **{{ if .PreviewText }}
---------- Preview ----------
{{.PreviewText}}
-----------------------------
{{- end}}{{if .EnableTelegraph}}
[【預覽】]({{.TelegraphURL}})[{{.ContentTitle}}]({{.RawLink}})
{{- else }}
[{{.ContentTitle}}]({{.RawLink}})
{{- end }}
{{.Tags}}
`
//defaultMessageListItemTpl = `{{if .EnableTelegraph}}
//[【預覽】]({{.TelegraphURL}})[{{.ContentTitle}}]({{.RawLink}}) {{- else }}
//[{{.ContentTitle}}]({{.RawLink}}){{- end }}`
defaultMessageListItemTpl = `{{if .EnableTelegraph}}
<a href="{{.TelegraphURL}}">【預覽】</a><a href="{{.RawLink}}">{{.ContentTitle}}</a>
{{- else }}
<a href="{{.RawLink}}">{{.ContentTitle}}</a>
{{- end }}`
TestMode RunType = "Test"
ReleaseMode RunType = "Release"
)
// MysqlConfig mysql 配置
type MysqlConfig struct {
Host string
Port int
User string
Password string
DB string
}
type TplData struct {
SourceTitle string
ContentTitle string
RawLink string
PreviewText string
TelegraphURL string
Tags string
EnableTelegraph bool
}
func AppVersionInfo() (s string) {
s = fmt.Sprintf("version %v, commit %v, built at %v", version, commit, date)
return
}
// GetString get string config value by key
func GetString(key string) string {
var value string
if viper.IsSet(key) {
value = viper.GetString(key)
}
return value
}
| ||
command.js
|
"use strict";
const log = require("npmlog");
const versionCommand = require("@lerna/version/command");
/**
* @see https://github.com/yargs/yargs/blob/master/docs/advanced.md#providing-a-command-module
*/
exports.command = "publish [bump]";
exports.describe = "Publish packages in the current project.";
exports.builder = yargs => {
const opts = {
c: {
describe: "Publish packages after every successful merge using the sha as part of the tag.",
alias: "canary",
type: "boolean",
},
// preid is copied from ../version/command because a whitelist for one option isn't worth it
preid: {
describe: "Specify the prerelease identifier when publishing a prerelease",
type: "string",
requiresArg: true,
defaultDescription: "alpha",
},
contents: {
describe: "Subdirectory to publish. Must apply to ALL packages.",
type: "string",
requiresArg: true,
defaultDescription: ".",
},
"dist-tag": {
describe: "Publish packages with the specified npm dist-tag",
type: "string",
requiresArg: true,
},
"git-head": {
describe:
"Explicit SHA to set as gitHead when packing tarballs, only allowed with 'from-package' positional.",
type: "string",
requiresArg: true,
},
otp: {
describe: "Supply a one-time password for publishing with two-factor authentication.",
type: "string",
requiresArg: true,
},
registry: {
describe: "Use the specified registry for all npm client operations.",
type: "string",
requiresArg: true,
},
"registry-scope": {
describe: "Use the specified registry for all npm client operations.",
type: "string",
requiresArg: true,
},
"require-scripts": {
describe: "Execute ./scripts/prepublish.js and ./scripts/postpublish.js, relative to package root.",
type: "boolean",
},
"no-git-reset": {
describe: "Do not reset changes to working tree after publishing is complete.",
type: "boolean",
},
"git-reset": {
// proxy for --no-git-reset
hidden: true,
type: "boolean",
},
"temp-tag": {
describe: "Create a temporary tag while publishing.",
type: "boolean",
},
"no-verify-access": {
describe: "Do not verify package read-write access for current npm user.",
type: "boolean",
},
"verify-access": {
// proxy for --no-verify-access
hidden: true,
type: "boolean",
},
// y: {
// describe: "Skip all confirmation prompts.",
// alias: "yes",
// type: "boolean",
// },
};
composeVersionOptions(yargs);
yargs.options(opts);
// "unhide" duplicate options
const { hiddenOptions } = yargs.getOptions();
const sharedKeys = ["preid", "y"];
for (const sharedKey of sharedKeys) {
hiddenOptions.splice(hiddenOptions.findIndex(k => k === sharedKey), 1);
}
yargs.group(Object.keys(opts).concat(sharedKeys), "Command Options:");
return yargs
.option("npm-tag", {
// TODO: remove in next major release
hidden: true,
conflicts: "dist-tag",
type: "string",
requiresArg: true,
})
.option("verify-registry", {
// TODO: remove in next major release
hidden: true,
type: "boolean",
})
.option("skip-npm", {
// TODO: remove in next major release
// deprecation notice handled in initialize()
hidden: true,
type: "boolean",
})
.check(argv => {
/* eslint-disable no-param-reassign */
if (argv.npmTag) {
argv.distTag = argv.npmTag;
argv["dist-tag"] = argv.npmTag;
delete argv.npmTag;
delete argv["npm-tag"];
log.warn("deprecated", "--npm-tag has been renamed --dist-tag");
}
/* eslint-enable no-param-reassign */
return argv;
});
};
exports.handler = function handler(argv) {
return require(".")(argv);
};
function
|
(yargs) {
versionCommand.addBumpPositional(yargs, ["from-git", "from-package"]);
versionCommand.builder(yargs, "publish");
return yargs;
}
|
composeVersionOptions
|
lb.go
|
package main
|
"net"
"github.com/PacktPublishing/Go-for-DevOps/chapter/8/rollout/lb/server/grpc"
"github.com/PacktPublishing/Go-for-DevOps/chapter/8/rollout/lb/server/http"
)
func main() {
ln, err := net.Listen("tcp", ":8080")
if err != nil {
panic(err)
}
lb, err := http.New()
if err != nil {
panic(err)
}
log.Println("load balancer started(8080)...")
go func() {
if err := lb.Serve(ln); err != nil {
panic(err)
}
}()
serv, err := grpc.New(":8081", lb)
if err != nil {
panic(err)
}
log.Println("grpc server started(8081)...")
if err := serv.Start(); err != nil {
panic(err)
}
}
|
import (
"log"
|
server.go
|
package server
import (
"embed"
"errors"
"fmt"
"io/fs"
"log"
"net/http"
"os"
"path"
"strconv"
"github.com/Oppodelldog/roamer/internal/server/action"
"github.com/Oppodelldog/roamer/internal/server/ws"
)
//go:embed html
var content embed.FS
//go:embed img
var img embed.FS
//go:embed js
var js embed.FS
//go:embed css
var css embed.FS
//go:embed root/favicon.ico
var root embed.FS
const port = 10982
func Start() {
var (
actions = make(chan action.Action)
sequencerActions = make(chan action.Action)
soundActions = make(chan action.Action)
hub = ws.StartHub(newSessionFunc(actions, sequencerActions, soundActions))
)
action.StartConfigWorker(actions, hub.Broadcast())
action.StartSequencerWorker(sequencerActions, hub.Broadcast())
action.StartSoundSettingsWorker(soundActions, hub.Broadcast())
http.Handle("/", restrictMethod(http.HandlerFunc(serveIndexPage), http.MethodGet))
http.Handle("/attributions.html", restrictMethod(addPrefix("/html/", http.FileServer(http.FS(contentFS()))), http.MethodGet))
http.Handle("/img/", restrictMethod(http.FileServer(http.FS(imgFS())), http.MethodGet))
http.Handle("/js/", restrictMethod(http.FileServer(http.FS(jsFS())), http.MethodGet))
http.Handle("/css/", restrictMethod(http.FileServer(http.FS(cssFS())), http.MethodGet))
http.Handle("/favicon.ico", restrictMethod(addPrefix("/root", http.FileServer(http.FS(root))), http.MethodGet))
http.Handle("/manifest.json", restrictMethod(ManifestHandler(Manifest{
Name: "Roamer",
ShortName: "Roamer",
ThemeColor: "#ffffff",
BackgroundColor: "#000000",
Display: "browser",
Scope: "/",
StartUrl: "/",
}), http.MethodGet))
http.Handle("/ws", restrictMethod(websocketHandler(hub), http.MethodGet))
log.Printf("Starting Roamer")
log.Printf("http://127.0.0.1:%v", port)
err := http.ListenAndServe(":"+strconv.Itoa(port), nil)
if err != nil && !errors.Is(err, http.ErrServerClosed) {
fmt.Printf("error running http server: %v", err)
}
}
func newSessionFunc(actions, sequencerActions, soundActions chan action.Action) ws.NewSessionFunc {
return func(client *ws.Client) {
action.ClientSession(client, actions, sequencerActions, soundActions)
}
}
func addPrefix(s string, h http.Handler) http.Handler
|
func restrictMethod(h http.Handler, allowedMethods ...string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
for _, allowedMethod := range allowedMethods {
if r.Method == allowedMethod {
h.ServeHTTP(w, r)
return
}
}
w.WriteHeader(http.StatusMethodNotAllowed)
})
}
func websocketHandler(hub *ws.Hub) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ws.ServeWs(hub, w, r)
}
}
func filesystem(envVar string, fs fs.FS) fs.FS {
if absolutePath, ok := os.LookupEnv(envVar); ok {
return os.DirFS(absolutePath)
}
return fs
}
func contentFS() fs.FS {
return assetFS(content)
}
func cssFS() fs.FS {
return assetFS(css)
}
func jsFS() fs.FS {
return assetFS(js)
}
func imgFS() fs.FS {
return assetFS(img)
}
func assetFS(efs embed.FS) fs.FS {
return filesystem("ROAMER_ASSETS", efs)
}
|
{
return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
request.URL.Path = path.Join(s, request.URL.Path)
h.ServeHTTP(writer, request)
})
}
|
producer.ts
|
const kafka = require('kafka-node');
const bp = require('body-parser');
let kafkaHost = 'kafka-pod:9092';
let kafkaTopic = 'testingtopic';
try {
const Producer = kafka.Producer;
const client = new kafka.KafkaClient({kafkaHost: kafkaHost});
const producer = new Producer(client);
let payloads = [
|
topic: kafkaTopic,
messages: 'Hello world!',
partition: 0,
attributes: 0
}
];
producer.on('ready', function() {
producer.send(payloads, (err: any, data: any) => {
if (err) {
console.log('[kafka-producer -> '+kafkaTopic+']: broker update failed');
} else {
console.log('[kafka-producer -> '+kafkaTopic+']: broker update success');
}
});
});
producer.on('error', function(err: any) {
console.log(err);
console.log('[kafka-producer -> '+kafkaTopic+']: connection errored');
throw err;
});
}
catch(e) {
console.log(e);
}
|
{
|
events_endrx.rs
|
#[doc = "Reader of register EVENTS_ENDRX"]
pub type R = crate::R<u32, super::EVENTS_ENDRX>;
#[doc = "Writer for register EVENTS_ENDRX"]
pub type W = crate::W<u32, super::EVENTS_ENDRX>;
#[doc = "Register EVENTS_ENDRX `reset()`'s with value 0"]
impl crate::ResetValue for super::EVENTS_ENDRX {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Receive buffer is filled up\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EVENTS_ENDRX_A {
#[doc = "0: Event not generated"]
NOTGENERATED,
#[doc = "1: Event generated"]
GENERATED,
}
impl From<EVENTS_ENDRX_A> for bool {
#[inline(always)]
fn from(variant: EVENTS_ENDRX_A) -> Self {
match variant {
EVENTS_ENDRX_A::NOTGENERATED => false,
EVENTS_ENDRX_A::GENERATED => true,
}
}
}
#[doc = "Reader of field `EVENTS_ENDRX`"]
pub type EVENTS_ENDRX_R = crate::R<bool, EVENTS_ENDRX_A>;
impl EVENTS_ENDRX_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> EVENTS_ENDRX_A {
match self.bits {
false => EVENTS_ENDRX_A::NOTGENERATED,
true => EVENTS_ENDRX_A::GENERATED,
}
}
#[doc = "Checks if the value of the field is `NOTGENERATED`"]
#[inline(always)]
pub fn is_not_generated(&self) -> bool {
*self == EVENTS_ENDRX_A::NOTGENERATED
}
#[doc = "Checks if the value of the field is `GENERATED`"]
#[inline(always)]
pub fn is_generated(&self) -> bool {
*self == EVENTS_ENDRX_A::GENERATED
}
}
#[doc = "Write proxy for field `EVENTS_ENDRX`"]
pub struct EVENTS_ENDRX_W<'a> {
w: &'a mut W,
}
impl<'a> EVENTS_ENDRX_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: EVENTS_ENDRX_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Event not generated"]
#[inline(always)]
pub fn not_generated(self) -> &'a mut W {
self.variant(EVENTS_ENDRX_A::NOTGENERATED)
}
#[doc = "Event generated"]
#[inline(always)]
pub fn generated(self) -> &'a mut W {
self.variant(EVENTS_ENDRX_A::GENERATED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
impl R {
#[doc = "Bit 0 - Receive buffer is filled up"]
#[inline(always)]
pub fn events_endrx(&self) -> EVENTS_ENDRX_R
|
}
impl W {
#[doc = "Bit 0 - Receive buffer is filled up"]
#[inline(always)]
pub fn events_endrx(&mut self) -> EVENTS_ENDRX_W {
EVENTS_ENDRX_W { w: self }
}
}
|
{
EVENTS_ENDRX_R::new((self.bits & 0x01) != 0)
}
|
imgaug-playground.py
|
import imgaug as ia
from imgaug import augmenters as iaa
import numpy as np
from scipy import misc
import imageio
import cv2
import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.augmentables import Keypoint, KeypointsOnImage
ia.seed(1)
image = ia.quokka(size=(256, 256))
kps = KeypointsOnImage([
Keypoint(x=65, y=100),
Keypoint(x=75, y=200),
Keypoint(x=100, y=100),
Keypoint(x=200, y=80)
], shape=image.shape)
seq = iaa.Sequential([
iaa.Multiply((1.2, 1.5)), # change brightness, doesn't affect keypoints
iaa.Affine(
rotate=10,
scale=(0.5, 0.7)
) # rotate by exactly 10deg and scale to 50-70%, affects keypoints
])
# Augment keypoints and images.
image_aug, kps_aug = seq(image=image, keypoints=kps)
# print coordinates before/after augmentation (see below)
# use after.x_int and after.y_int to get rounded integer coordinates
for i in range(len(kps.keypoints)):
before = kps.keypoints[i]
after = kps_aug.keypoints[i]
print("Keypoint %d: (%.8f, %.8f) -> (%.8f, %.8f)" % (
i, before.x, before.y, after.x, after.y)
)
# image with keypoints before/after augmentation (shown below)
image_before = kps.draw_on_image(image, size=7)
image_after = kps_aug.draw_on_image(image_aug, size=7)
def main():
imgs = np.zeros((1, 100, 100, 3), dtype=np.uint8) + 255
bbs = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=0, x2=50, y1=0, y2=50)
], shape=imgs.shape[1:])
aug = iaa.Sequential([
|
iaa.Crop(px=10),
iaa.Pad(px=10, pad_cval=128),
iaa.Affine(scale=0.5, cval=0)
])
aug_det = aug.to_deterministic()
imgs_aug = aug_det.augment_images(imgs)
bbs_aug = aug_det.augment_bounding_boxes([bbs])
print("bbs:")
for bbs_aug_i in bbs_aug[0].bounding_boxes:
print(bbs_aug_i)
cv2.imshow('orig',imgs)
cv2.imshow('aug',bbs_aug[0].draw_on_image(imgs_aug[0]))
cv2.waitKey()
if __name__ == "__main__":
main()
| |
llvm_types.py
|
from abc import ABCMeta, abstractmethod
from typing import Any, List
class LLVMType(metaclass=ABCMeta):
@abstractmethod
def to_json(self) -> Any: pass
class LLVMIntType(LLVMType):
def __init__(self, width : int) -> None:
self.width = width
def to_json(self) -> Any:
return {'type': 'primitive type', 'primitive': 'integer', 'size': self.width}
class LLVMArrayType(LLVMType):
def __init__(self, elemtype : 'LLVMType', size : int) -> None:
self.size = size
self.elemtype = elemtype
def to_json(self) -> Any:
return { 'type': 'array',
'element type': self.elemtype.to_json(),
'size': self.size }
class LLVMPointerType(LLVMType):
def __init__(self, points_to : 'LLVMType') -> None:
self.points_to = points_to
def to_json(self) -> Any:
return {'type': 'pointer', 'points to': self.points_to.to_json()}
class LLVMAliasType(LLVMType):
def __init__(self, name : str) -> None:
self.name = name
def to_json(self) -> Any:
return {'type': 'type alias',
'alias of': self.name}
class LLVMStructType(LLVMType):
def __init__(self, field_types : List[LLVMType]) -> None:
self.field_types = field_types
def to_json(self) -> Any:
return {'type': 'struct',
'fields': [fld_ty.to_json() for fld_ty in self.field_types]}
class LLVMPackedStructType(LLVMType):
def __init__(self, field_types : List[LLVMType]) -> None:
self.field_types = field_types
def to_json(self) -> Any:
return {'type': 'packed struct',
'fields': [fld_ty.to_json() for fld_ty in self.field_types]}
##################################################
# Convenient helpers with intuitive/short names #
##################################################
i8 = LLVMIntType(8)
i16 = LLVMIntType(16)
i32 = LLVMIntType(32)
i64 = LLVMIntType(64)
def
|
(size : int, ty : 'LLVMType') -> 'LLVMArrayType':
"""``[size x ty]``, i.e. an array of ``size`` elements of type ``ty``."""
return LLVMArrayType(ty, size)
def ptr(ty : 'LLVMType') -> 'LLVMPointerType':
"""``ty*``, i.e. a pointer to a value of type ``ty``."""
return LLVMPointerType(ty)
def alias(name : str) -> 'LLVMAliasType':
"""An LLVM type alias (i.e., name)."""
return LLVMAliasType(name)
def struct_type(*field_types : LLVMType) -> 'LLVMStructType':
"""An LLVM struct type."""
return LLVMStructType(list(field_types))
def packed_struct_type(*field_types : LLVMType) -> 'LLVMPackedStructType':
"""An LLVM packed struct type."""
return LLVMPackedStructType(list(field_types))
|
array
|
least_requested_test.go
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package priorities
import (
"reflect"
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
)
func TestLeastRequested(t *testing.T) {
labels1 := map[string]string{
"foo": "bar",
"baz": "blah",
}
labels2 := map[string]string{
"bar": "foo",
"baz": "blah",
}
machine1Spec := v1.PodSpec{
NodeName: "machine1",
}
machine2Spec := v1.PodSpec{
NodeName: "machine2",
}
noResources := v1.PodSpec{
Containers: []v1.Container{},
}
cpuOnly := v1.PodSpec{
NodeName: "machine1",
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("0"),
},
},
},
},
}
cpuOnly2 := cpuOnly
cpuOnly2.NodeName = "machine2"
cpuAndMemory := v1.PodSpec{
NodeName: "machine2",
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("2000"),
},
},
},
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("3000"),
},
},
},
},
}
tests := []struct {
pod *v1.Pod
pods []*v1.Pod
nodes []*v1.Node
expectedList schedulerapi.HostPriorityList
test string
}{
{
/*
Node1 scores (remaining resources) on 0-10 scale
CPU Score: ((4000 - 0) *10) / 4000 = 10
Memory Score: ((10000 - 0) *10) / 10000 = 10
Node1 Score: (10 + 10) / 2 = 10
Node2 scores (remaining resources) on 0-10 scale
CPU Score: ((4000 - 0) *10) / 4000 = 10
Memory Score: ((10000 - 0) *10) / 10000 = 10
Node2 Score: (10 + 10) / 2 = 10
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
test: "nothing scheduled, nothing requested",
},
{
/*
Node1 scores on 0-10 scale
CPU Score: ((4000 - 3000) *10) / 4000 = 2.5
Memory Score: ((10000 - 5000) *10) / 10000 = 5
Node1 Score: (2.5 + 5) / 2 = 3
Node2 scores on 0-10 scale
CPU Score: ((6000 - 3000) *10) / 6000 = 5
Memory Score: ((10000 - 5000) *10) / 10000 = 5
Node2 Score: (5 + 5) / 2 = 5
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 5}},
test: "nothing scheduled, resources requested, differently sized machines",
},
{
/*
Node1 scores on 0-10 scale
CPU Score: ((4000 - 0) *10) / 4000 = 10
Memory Score: ((10000 - 0) *10) / 10000 = 10
Node1 Score: (10 + 10) / 2 = 10
Node2 scores on 0-10 scale
CPU Score: ((4000 - 0) *10) / 4000 = 10
Memory Score: ((10000 - 0) *10) / 10000 = 10
Node2 Score: (10 + 10) / 2 = 10
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
test: "no resources requested, pods scheduled",
pods: []*v1.Pod{
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: machine2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: machine2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
},
},
{
/*
Node1 scores on 0-10 scale
CPU Score: ((10000 - 6000) *10) / 10000 = 4
Memory Score: ((20000 - 0) *10) / 20000 = 10
Node1 Score: (4 + 10) / 2 = 7
Node2 scores on 0-10 scale
CPU Score: ((10000 - 6000) *10) / 10000 = 4
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
Node2 Score: (4 + 7.5) / 2 = 5
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: cpuOnly2, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
{Spec: cpuAndMemory, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
},
},
{
/*
Node1 scores on 0-10 scale
CPU Score: ((10000 - 6000) *10) / 10000 = 4
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
Node1 Score: (4 + 7.5) / 2 = 5
Node2 scores on 0-10 scale
CPU Score: ((10000 - 6000) *10) / 10000 = 4
Memory Score: ((20000 - 10000) *10) / 20000 = 5
Node2 Score: (4 + 5) / 2 = 4
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 4}},
test: "resources requested, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
/*
Node1 scores on 0-10 scale
CPU Score: ((10000 - 6000) *10) / 10000 = 4
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
Node1 Score: (4 + 7.5) / 2 = 5
Node2 scores on 0-10 scale
CPU Score: ((10000 - 6000) *10) / 10000 = 4
Memory Score: ((50000 - 10000) *10) / 50000 = 8
Node2 Score: (4 + 8) / 2 = 6
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 6}},
test: "resources requested, pods scheduled with resources, differently sized machines",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
/*
Node1 scores on 0-10 scale
CPU Score: ((4000 - 6000) *10) / 4000 = 0
Memory Score: ((10000 - 0) *10) / 10000 = 10
Node1 Score: (0 + 10) / 2 = 5
Node2 scores on 0-10 scale
CPU Score: ((4000 - 6000) *10) / 4000 = 0
Memory Score: ((10000 - 5000) *10) / 10000 = 5
Node2 Score: (0 + 5) / 2 = 2
*/
pod: &v1.Pod{Spec: cpuOnly},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 2}},
test: "requested resources exceed node capacity",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
{
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
test: "zero node resources, pods scheduled with resources",
pods: []*v1.Pod{
{Spec: cpuOnly},
{Spec: cpuAndMemory},
},
},
}
for _, test := range tests {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
list, err := priorityFunction(LeastRequestedPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(test.expectedList, list) {
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
}
}
}
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: 5}},
test: "no resources requested, pods scheduled with resources",
pods: []*v1.Pod{
|
request.py
|
import urllib.request,json
from .models import models
# Article = article.Article
Source = models.Source
Article = models.Article
# Getting api key
api_key = None
# Getting the movie base url
base_url = None
def configure_request(app):
global api_key,base_url
api_key = app.config['NEWS_API_KEY']
base_url = app.config["NEWS_API_BASE_URL"]
def get_articles(filter):
'''
Function that gets the json response to our url request
'''
# get_articles('category=bbc-news')
get_articles_url = base_url.format('top-headlines',api_key,filter)
with urllib.request.urlopen(get_articles_url) as url:
get_articles_data = url.read()
get_articles_response = json.loads(get_articles_data)
article_results = None
if get_articles_response['articles']:
article_results_list = get_articles_response['articles']
article_results = process_articles(article_results_list)
return article_results
def process_articles(article_list):
article_results = []
for article_item in article_list:
source = article_item.get('source')
author = article_item.get('author')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
urlToImage = article_item.get('urlToImage')
publishedAt = article_item.get('publishedAt')
content = article_item.get('content')
if content and urlToImage:
article_object = Article(source,author, title, description, url,urlToImage, publishedAt, content)
article_results.append(article_object)
return article_results
def get_sources():
get_sources_url = base_url.format('sources', api_key,'')
with urllib.request.urlopen(get_sources_url) as url:
sources_data = url.read()
sources_response = json.loads(sources_data)
sources_object = None
sources_response_data = sources_response['sources']
if sources_response_data:
sources_object = process_sources(sources_response_data)
return sources_object
def process_sources(source_list):
source_results = []
for source_item in source_list:
id = source_item.get('id')
name = source_item.get('name')
description = source_item.get('description')
url = source_item.get('url')
|
category = source_item.get('category')
language = source_item.get('language')
country = source_item.get('country')
sources_object = Source(id, name, description, url, category,language, country)
source_results.append(sources_object)
return source_results
| |
crates_blank_slate.rs
|
mod proteins {
enum AminoAcid {
Ala, Arg, Asn, Asp, Cys, Gln, Glu, Gly, His, Ile,
Leu, Lys, Met, Phe, Pro, Ser, Thr, Trp, Tyr, Val
}
|
mod synthesis {
// proteins/synthesis.rs
pub fn synthesize(seq: &[AminoAcid]) // error: can't find type `AminoAcid`
//~^ ERROR: cannot find type `AminoAcid` in this scope
-> Protein
{
Protein
}
pub struct Protein;
}
}
| |
main.go
|
package main
import (
"context"
"fmt"
"time"
"github.com/topherbullock/goroutine-cancellation-patterns/helpers"
)
var rootContext = context.Background()
func
|
() {
ctx, cancel := context.WithCancel(rootContext)
go say(ctx, "hello")
go say(ctx, "gophers")
<-helpers.WaitForKeypress()
cancel()
fmt.Println("context cancelled")
<-helpers.WaitForKeypress()
}
func say(ctx context.Context, message string) {
ticker := time.NewTicker(1 * time.Second)
for {
select {
case <-ticker.C:
fmt.Println(message)
}
}
}
|
main
|
invertir.py
|
# 10. Programe un método recursivo que invierta los números de un arreglo de enteros.
def invertir(numArray):
if (len(numArray) == 0):
return []
else:
return ([numArray[-1]] + invertir(numArray[:-1]))
|
numeros = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
print(invertir(numeros))
| |
oceanapi.go
|
package v7
import "time"
// NewTideClient 创建一个潮汐查询实例。
|
isGeo: false,
Name: "ocean",
Parameter: map[string]string{"date": date, "location": location},
SubName: "tide",
Timeout: 15 * time.Second,
}
}
// NewOceanCurrentsClient 创建一个潮流查询实例。
// https://dev.qweather.com/docs/api/ocean/currents/
func NewOceanCurrentsClient(location, date string) (client HeWeatherAPI) {
return &universeHeWeatherAPI{
isGeo: false,
Name: "ocean",
Parameter: map[string]string{"date": date, "location": location},
SubName: "currents",
Timeout: 15 * time.Second,
}
}
|
// https://dev.qweather.com/docs/api/ocean/tide/
func NewOceanTideClient(location, date string) (client HeWeatherAPI) {
return &universeHeWeatherAPI{
|
main.js
|
import React from 'react';
import {
LinearProgress,
AppBar,
IconButton,
Toolbar,
Typography
} from '@material-ui/core';
import { BrowserRouter as Router, Route } from "react-router-dom";
import { menuList, routes } from './constants';
import MenuIcon from '@material-ui/icons/Menu';
import Sidebar from '../component/sidebar';
export default function Main(props) {
const [state, setState] = React.useState({
sideMenu: false,
});
const toggleDrawer = (open) => event => {
if (event && event.type === 'keydown' && (event.key === 'Tab' || event.key === 'Shift')) {
return;
}
setState({ ...state, sideMenu: open });
};
const createRouter = () => {
return (
<div>
{routes.map((route, index) => (
<Route
key={index}
path={route.path}
exact={route.exact}
component={route.main}
/>
))}
</div>
);
}
const createTitleRouter = () => {
return (
<div>
{routes.map((route, index) => (
<Route
key={index}
path={route.path}
exact={route.exact}
component={route.title}
/>
))}
</div>
);
}
return (
<Router>
<AppBar position='static'>
<Toolbar>
<IconButton
edge='start'
color='inherit'
aria-label='Open drawer'
onClick={toggleDrawer(true)}
>
<MenuIcon />
</IconButton>
<Typography variant='h6' noWrap style={{marginLeft: 10}}>
{ createTitleRouter() }
</Typography>
</Toolbar>
</AppBar>
<Sidebar toggleDrawer={toggleDrawer} menuList={menuList} open={state.sideMenu} />
{
props.loading? <LinearProgress color='secondary' />:
<div style={{marginTop: 20, marginBottom: 40, marginLeft: 20, marginRight: 20}}>
{createRouter()}
</div>
}
</Router>
|
);
}
| |
antiav_srp.py
|
# Copyright (C) 2014 Optiv, Inc. ([email protected]), Updated 2016 for cuckoo 2.0
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
|
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class AntiAVSRP(Signature):
name = "antiav_srp"
description = "Modifies Software Restriction Policies likely to cripple AV"
severity = 3
categories = ["anti-av"]
authors = ["Optiv"]
minimum = "2.0"
ttp = ["T1089"]
regkeys_re = [
".*\\\\Policies\\\\Microsoft\\\\Windows\\\\Safer\\\\\CodeIdentifiers\\\\0\\\\Paths\\\\.*",
]
def on_complete(self):
for indicator in self.regkeys_re:
for regkey in self.check_key(pattern=indicator, regex=True, actions=["regkey_written"], all=True):
self.mark_ioc("registry", regkey)
return self.has_marks()
| |
memtable_test.go
|
package memtable
import (
"fmt"
"testing"
"github.com/merlin82/leveldb/internal"
)
func Test_MemTable(t *testing.T) {
memTable := New()
memTable.Add(1234567, internal.TypeValue, []byte("aadsa34a"), []byte("bb23b3423"))
|
}
|
value, _ := memTable.Get([]byte("aadsa34a"))
fmt.Println(string(value))
fmt.Println(memTable.ApproximateMemoryUsage())
|
connection.py
|
from twisted.internet.protocol import Protocol
from gandyloo import parse
class MinesweeperClient(Protocol):
'''Represents a connection to a server using twisted's Protocol framework.
Created with an event sink, where parsed events (subclasses of
gandyloo.message.Response) are fired. Sink should have a method
self.response(resp).
'''
def
|
(self, event_sink):
self.buffer = ""
self.hello_received = False
self.size = None
self.event_sink = event_sink
def dataReceived(self, data):
self.buffer += data
if not self.hello_received:
try:
resp, self.buffer = parse.parse_start(self.buffer, first=True)
except parse.NotReadyError:
return # Haven't received enough data yet
self.hello_received = True
self.size = resp.size
self.event_sink.response(resp)
try:
while True:
resp, self.buffer = parse.parse_start(self.buffer, self.size)
self.event_sink.response(resp)
except parse.NotReadyError:
return
def command(self, command):
self.transport.write(command.render())
def clientConnectionLost(self, connection, reason):
self.event_sink.response(message.CloseResp(reason))
|
__init__
|
loopback.go
|
// {{{ Copyright (c) Paul R. Tagliamonte <[email protected]> 2017-2021
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE. }}}
package loopback
/*
#include <linux/loop.h>
*/
import "C"
import (
"fmt"
"os"
"syscall"
)
type (
// loopInfo64 is the 64 bit loop info variant. This is used throughout
// this codebase. If you need to use loopInfo due to a 32 bit program,
// please open a bug on this library.
loopInfo64 struct {
loDevice uint64 /* ioctl r/o */
loInode uint64 /* ioctl r/o */
loRdevice uint64 /* ioctl r/o */
loOffset uint64
loSizelimit uint64 /* bytes, 0 == max available */
loNumber uint32 /* ioctl r/o */
loEncryptType uint32
loEncryptKeySize uint32 /* ioctl w/o */
loFlags uint32 /* ioctl r/o */
loFileName [loNameSize]uint8
loCryptName [loNameSize]uint8
loEncryptKey [loKeySize]uint8 /* ioctl w/o */
loInit [2]uint64
}
)
const (
// loopSetFd will associate the loop device with the open file
loopSetFd = C.LOOP_SET_FD
// loopCtlGetFree will allocate or find a free loop device for use.
loopCtlGetFree = C.LOOP_CTL_GET_FREE
// loopGetStatus64 will get the status of the loop device.
loopGetStatus64 = C.LOOP_GET_STATUS64
// loopSetStatus64 will set the status of the loop device.
loopSetStatus64 = C.LOOP_SET_STATUS64
// loopClrFd will disassociate the loop device from any file descriptor.
loopClrFd = C.LOOP_CLR_FD
// loopSetCapacity will resize a live loop device.
loopSetCapacity = C.LOOP_SET_CAPACITY
)
const (
// loFlagsAutoClear will instruct the kernel to autodestruct on last close.
loFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR
// loFlagsReadOnly requests the loopback device be read-only.
loFlagsReadOnly = C.LO_FLAGS_READ_ONLY
// loFlagsPartScan will allow automatic partition scanning.
loFlagsPartScan = C.LO_FLAGS_PARTSCAN
// loKeySize is the length of the encryption key
loKeySize = C.LO_KEY_SIZE
// loNameSize is the length of the file name.
loNameSize = C.LO_NAME_SIZE
)
// syscalls will return an errno type (which implements error) for all calls,
// including success (errno 0). We only care about non-zero errnos.
func errnoIsErr(err error) error {
if err.(syscall.Errno) != 0 {
return err
}
return nil
}
// Loop will, when given a handle to a Loopback device (such as /dev/loop0),
// and a handle to the fs image to loop mount (such as a squashfs or ext4fs
// image), preform the required call to loop the image to the provided block
// device.
//
// The first argument (loopbackDevice) can be obtained using
// loopback.NextLoopDevice, if one is not known in advance.
func Loop(loopbackDevice, image *os.File) error {
_, _, err := syscall.Syscall(
syscall.SYS_IOCTL,
loopbackDevice.Fd(),
loopSetFd,
image.Fd(),
)
return errnoIsErr(err)
}
// Unloop will, given a handle to the Loopback device (such as /dev/loop0),
// preform the required call to the image to unloop the image mounted at
// that location.
func Unloop(loopbackDevice *os.File) error
|
// NextLoopDevice will return the next loopback device that isn't used. Under
// the hood this will ask loop-control for the LOOP_CTL_GET_FREE value, and
// interpolate that into the conventional GNU/Linux naming scheme for loopback
// devices, and os.Open that path.
func NextLoopDevice() (*os.File, error) {
loopInt, err := nextUnallocatedLoop()
if err != nil {
return nil, err
}
return os.Open(fmt.Sprintf("/dev/loop%d", loopInt))
}
// nextUnallocatedLoop will return the integer of the next loopback device we
// can use by calling loop-control with the LOOP_CTL_GET_FREE ioctl.
func nextUnallocatedLoop() (int, error) {
fd, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644)
if err != nil {
return 0, err
}
defer fd.Close()
index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd.Fd(), loopCtlGetFree, 0)
return int(index), errnoIsErr(err)
}
// UnmounterFunc will unmount the filesystem, unloop the file, and close the
// held file descriptor. Be sure this is defer'ed in a sensible location!
type UnmounterFunc func()
// MountImage will get the next loopback device that isn't used, loopback the
// provided image, and mount the loopback device to the target.
func MountImage(
image *os.File,
target string,
fstype string,
flags uintptr,
data string,
) (*os.File, UnmounterFunc, error) {
lo, err := NextLoopDevice()
if err != nil {
return nil, nil, err
}
if err := Loop(lo, image); err != nil {
lo.Close()
return nil, nil, err
}
if err := syscall.Mount(lo.Name(), target, fstype, flags, data); err != nil {
Unloop(lo)
lo.Close()
return nil, nil, err
}
return lo, func() {
syscall.Unmount(target, 0)
Unloop(lo)
lo.Close()
}, nil
}
// vim: foldmethod=marker
|
{
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopbackDevice.Fd(), loopClrFd, 0)
return errnoIsErr(err)
}
|
index.ts
|
/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import { BuilderContext, BuilderOutput, createBuilder } from '@angular-devkit/architect';
import { EmittedFiles, WebpackLoggingCallback, runWebpack } from '@angular-devkit/build-webpack';
import { getSystemPath, json, normalize, resolve, tags, virtualFs } from '@angular-devkit/core';
import { NodeJsSyncHost } from '@angular-devkit/core/node';
import * as fs from 'fs';
import * as ora from 'ora';
import * as path from 'path';
import { Observable, from } from 'rxjs';
import { concatMap, map, switchMap } from 'rxjs/operators';
import { ScriptTarget } from 'typescript';
import * as webpack from 'webpack';
import { ExecutionTransformer } from '../transforms';
import {
BuildBrowserFeatures,
NormalizedBrowserBuilderSchema,
deleteOutputDir,
normalizeAssetPatterns,
normalizeOptimization,
normalizeSourceMaps,
urlJoin,
} from '../utils';
import { BundleActionExecutor } from '../utils/action-executor';
import { WebpackConfigOptions } from '../utils/build-options';
import { ThresholdSeverity, checkBudgets } from '../utils/bundle-calculator';
import { findCachePath } from '../utils/cache-path';
import { colors } from '../utils/color';
import { copyAssets } from '../utils/copy-assets';
import { cachingDisabled } from '../utils/environment-options';
import { i18nInlineEmittedFiles } from '../utils/i18n-inlining';
import { I18nOptions } from '../utils/i18n-options';
import { getHtmlTransforms } from '../utils/index-file/transforms';
import {
IndexHtmlTransform,
writeIndexHtml,
} from '../utils/index-file/write-index-html';
import { ensureOutputPaths } from '../utils/output-paths';
import {
InlineOptions,
ProcessBundleFile,
ProcessBundleOptions,
ProcessBundleResult,
} from '../utils/process-bundle';
import { readTsconfig } from '../utils/read-tsconfig';
import { augmentAppWithServiceWorker } from '../utils/service-worker';
import { assertCompatibleAngularVersion } from '../utils/version';
import {
BrowserWebpackConfigOptions,
generateI18nBrowserWebpackConfigFromContext,
getIndexInputFile,
getIndexOutputFile,
} from '../utils/webpack-browser-config';
import { isWebpackFiveOrHigher } from '../utils/webpack-version';
import {
getAotConfig,
getBrowserConfig,
getCommonConfig,
getNonAotConfig,
getStatsConfig,
getStylesConfig,
getWorkerConfig,
normalizeExtraEntryPoints,
} from '../webpack/configs';
import { NgBuildAnalyticsPlugin } from '../webpack/plugins/analytics';
import { markAsyncChunksNonInitial } from '../webpack/utils/async-chunks';
import {
BundleStats,
createWebpackLoggingCallback,
generateBuildStats,
generateBuildStatsTable,
generateBundleStats,
statsErrorsToString,
statsHasErrors,
statsHasWarnings,
statsWarningsToString,
} from '../webpack/utils/stats';
import { Schema as BrowserBuilderSchema } from './schema';
const cacheDownlevelPath = cachingDisabled ? undefined : findCachePath('angular-build-dl');
export type BrowserBuilderOutput = json.JsonObject &
BuilderOutput & {
baseOutputPath: string;
outputPaths: string[];
/**
* @deprecated in version 9. Use 'outputPaths' instead.
*/
outputPath: string;
};
// todo: the below should be cleaned once dev-server support the new i18n
interface ConfigFromContextReturn {
config: webpack.Configuration;
projectRoot: string;
projectSourceRoot?: string;
i18n: I18nOptions;
}
export async function buildBrowserWebpackConfigFromContext(
options: BrowserBuilderSchema,
context: BuilderContext,
host: virtualFs.Host<fs.Stats> = new NodeJsSyncHost(),
extraBuildOptions: Partial<NormalizedBrowserBuilderSchema> = {},
): Promise<ConfigFromContextReturn> {
const webpackPartialGenerator = (wco: BrowserWebpackConfigOptions) => [
getCommonConfig(wco),
getBrowserConfig(wco),
getStylesConfig(wco),
getStatsConfig(wco),
getAnalyticsConfig(wco, context),
getCompilerConfig(wco),
wco.buildOptions.webWorkerTsConfig ? getWorkerConfig(wco) : {},
];
return generateI18nBrowserWebpackConfigFromContext(
options,
context,
webpackPartialGenerator,
host,
extraBuildOptions,
);
}
function getAnalyticsConfig(
wco: WebpackConfigOptions,
context: BuilderContext,
): webpack.Configuration {
if (context.analytics) {
// If there's analytics, add our plugin. Otherwise no need to slow down the build.
let category = 'build';
if (context.builder) {
// We already vetted that this is a "safe" package, otherwise the analytics would be noop.
category =
context.builder.builderName.split(':')[1] || context.builder.builderName || 'build';
}
// The category is the builder name if it's an angular builder.
return {
plugins: [new NgBuildAnalyticsPlugin(
wco.projectRoot,
context.analytics,
category,
!!wco.tsConfig.options.enableIvy,
)],
};
}
return {};
}
function getCompilerConfig(wco: WebpackConfigOptions): webpack.Configuration {
if (wco.buildOptions.main || wco.buildOptions.polyfills) {
return wco.buildOptions.aot ? getAotConfig(wco) : getNonAotConfig(wco);
}
return {};
}
async function initialize(
options: BrowserBuilderSchema,
context: BuilderContext,
host: virtualFs.Host<fs.Stats>,
differentialLoadingMode: boolean,
webpackConfigurationTransform?: ExecutionTransformer<webpack.Configuration>,
): Promise<{
config: webpack.Configuration;
projectRoot: string;
projectSourceRoot?: string;
i18n: I18nOptions;
}> {
const originalOutputPath = options.outputPath;
// Assets are processed directly by the builder except when watching
const adjustedOptions = options.watch ? options : { ...options, assets: [] };
// TODO_WEBPACK_5: Investigate build/serve issues with the `license-webpack-plugin` package
if (adjustedOptions.extractLicenses && isWebpackFiveOrHigher()) {
adjustedOptions.extractLicenses = false;
context.logger.warn(
'Warning: License extraction is currently disabled when using Webpack 5. ' +
'This is temporary and will be corrected in a future update.',
);
}
const {
config,
projectRoot,
projectSourceRoot,
i18n,
} = await buildBrowserWebpackConfigFromContext(adjustedOptions, context, host, { differentialLoadingMode });
// Validate asset option values if processed directly
if (options.assets?.length && !adjustedOptions.assets?.length) {
normalizeAssetPatterns(
options.assets,
new virtualFs.SyncDelegateHost(host),
normalize(context.workspaceRoot),
normalize(projectRoot),
projectSourceRoot === undefined ? undefined : normalize(projectSourceRoot),
).forEach(({ output }) => {
if (output.startsWith('..')) {
throw new Error('An asset cannot be written to a location outside of the output path.');
}
});
}
let transformedConfig;
if (webpackConfigurationTransform) {
transformedConfig = await webpackConfigurationTransform(config);
}
if (options.deleteOutputPath) {
deleteOutputDir(context.workspaceRoot, originalOutputPath);
}
return { config: transformedConfig || config, projectRoot, projectSourceRoot, i18n };
}
// tslint:disable-next-line: no-big-function
export function buildWebpackBrowser(
options: BrowserBuilderSchema,
context: BuilderContext,
transforms: {
webpackConfiguration?: ExecutionTransformer<webpack.Configuration>;
logging?: WebpackLoggingCallback;
indexHtml?: IndexHtmlTransform;
} = {},
): Observable<BrowserBuilderOutput> {
const host = new NodeJsSyncHost();
const root = normalize(context.workspaceRoot);
const projectName = context.target?.project;
if (!projectName) {
throw new Error('The builder requires a target.');
}
const baseOutputPath = path.resolve(context.workspaceRoot, options.outputPath);
let outputPaths: undefined | Map<string, string>;
// Check Angular version.
assertCompatibleAngularVersion(context.workspaceRoot, context.logger);
return from(context.getProjectMetadata(projectName))
.pipe(
switchMap(async projectMetadata => {
const sysProjectRoot = getSystemPath(
resolve(normalize(context.workspaceRoot),
normalize((projectMetadata.root as string) ?? '')),
);
const { options: compilerOptions } = readTsconfig(options.tsConfig, context.workspaceRoot);
const target = compilerOptions.target || ScriptTarget.ES5;
const buildBrowserFeatures = new BuildBrowserFeatures(sysProjectRoot);
const isDifferentialLoadingNeeded = buildBrowserFeatures.isDifferentialLoadingNeeded(target);
const differentialLoadingMode = !options.watch && isDifferentialLoadingNeeded;
if (target > ScriptTarget.ES2015 && isDifferentialLoadingNeeded) {
context.logger.warn(tags.stripIndent`
Warning: Using differential loading with targets ES5 and ES2016 or higher may
cause problems. Browsers with support for ES2015 will load the ES2016+ scripts
referenced with script[type="module"] but they may not support ES2016+ syntax.
`);
}
const hasIE9 = buildBrowserFeatures.supportedBrowsers.includes('ie 9');
const hasIE10 = buildBrowserFeatures.supportedBrowsers.includes('ie 10');
if (hasIE9 || hasIE10) {
const browsers =
(hasIE9 ? 'IE 9' + (hasIE10 ? ' & ' : '') : '') + (hasIE10 ? 'IE 10' : '');
context.logger.warn(
`Warning: Support was requested for ${browsers} in the project's browserslist configuration. ` +
(hasIE9 && hasIE10 ? 'These browsers are' : 'This browser is') +
' no longer officially supported with Angular v11 and higher.' +
'\nFor additional information: https://v10.angular.io/guide/deprecations#ie-9-10-and-mobile',
);
}
return {
...(await initialize(options, context, host, differentialLoadingMode, transforms.webpackConfiguration)),
buildBrowserFeatures,
isDifferentialLoadingNeeded,
target,
};
}),
// tslint:disable-next-line: no-big-function
switchMap(({ config, projectRoot, projectSourceRoot, i18n, buildBrowserFeatures, isDifferentialLoadingNeeded, target }) => {
const useBundleDownleveling = isDifferentialLoadingNeeded && !options.watch;
const startTime = Date.now();
const normalizedOptimization = normalizeOptimization(options.optimization);
const indexTransforms = getHtmlTransforms(
normalizedOptimization,
buildBrowserFeatures,
transforms.indexHtml,
);
return runWebpack(config, context, {
webpackFactory: require('webpack') as typeof webpack,
logging:
transforms.logging ||
(useBundleDownleveling
? () => { }
: createWebpackLoggingCallback(!!options.verbose, context.logger)),
}).pipe(
// tslint:disable-next-line: no-big-function
concatMap(async buildEvent => {
const { webpackStats: webpackRawStats, success, emittedFiles = [] } = buildEvent;
if (!webpackRawStats) {
throw new Error('Webpack stats build result is required.');
}
// Fix incorrectly set `initial` value on chunks.
const extraEntryPoints = normalizeExtraEntryPoints(options.styles || [], 'styles')
.concat(normalizeExtraEntryPoints(options.scripts || [], 'scripts'));
const webpackStats = {
...webpackRawStats,
chunks: markAsyncChunksNonInitial(webpackRawStats, extraEntryPoints),
};
if (!success && useBundleDownleveling) {
// If using bundle downleveling then there is only one build
// If it fails show any diagnostic messages and bail
if (statsHasWarnings(webpackStats)) {
context.logger.warn(statsWarningsToString(webpackStats, { colors: true }));
}
if (statsHasErrors(webpackStats)) {
context.logger.error(statsErrorsToString(webpackStats, { colors: true }));
}
return { success };
} else if (success) {
outputPaths = ensureOutputPaths(baseOutputPath, i18n);
let noModuleFiles: EmittedFiles[] | undefined;
let moduleFiles: EmittedFiles[] | undefined;
let files: EmittedFiles[] | undefined;
const scriptsEntryPointName = normalizeExtraEntryPoints(
options.scripts || [],
'scripts',
).map(x => x.bundleName);
if (isDifferentialLoadingNeeded && options.watch) {
moduleFiles = emittedFiles;
files = moduleFiles.filter(
x => x.extension === '.css' || (x.name && scriptsEntryPointName.includes(x.name)),
);
if (i18n.shouldInline) {
const success = await i18nInlineEmittedFiles(
context,
emittedFiles,
i18n,
baseOutputPath,
Array.from(outputPaths.values()),
scriptsEntryPointName,
// tslint:disable-next-line: no-non-null-assertion
webpackStats.outputPath!,
target <= ScriptTarget.ES5,
options.i18nMissingTranslation,
);
if (!success) {
return { success: false };
}
}
} else if (isDifferentialLoadingNeeded) {
moduleFiles = [];
noModuleFiles = [];
// Common options for all bundle process actions
const sourceMapOptions = normalizeSourceMaps(options.sourceMap || false);
const actionOptions: Partial<ProcessBundleOptions> = {
optimize: normalizedOptimization.scripts,
sourceMaps: sourceMapOptions.scripts,
hiddenSourceMaps: sourceMapOptions.hidden,
vendorSourceMaps: sourceMapOptions.vendor,
integrityAlgorithm: options.subresourceIntegrity ? 'sha384' : undefined,
};
let mainChunkId;
const actions: ProcessBundleOptions[] = [];
let workerReplacements: [string, string][] | undefined;
const seen = new Set<string>();
for (const file of emittedFiles) {
// Assets are not processed nor injected into the index
if (file.asset) {
// WorkerPlugin adds worker files to assets
if (file.file.endsWith('.worker.js')) {
if (!workerReplacements) {
workerReplacements = [];
}
workerReplacements.push([
file.file,
file.file.replace(/\-(es20\d{2}|esnext)/, '-es5'),
]);
} else {
continue;
}
}
// Scripts and non-javascript files are not processed
if (
file.extension !== '.js' ||
(file.name && scriptsEntryPointName.includes(file.name))
) {
if (files === undefined) {
files = [];
}
files.push(file);
continue;
}
// Ignore already processed files; emittedFiles can contain duplicates
if (seen.has(file.file)) {
continue;
}
seen.add(file.file);
if (file.name === 'vendor' || (!mainChunkId && file.name === 'main')) {
// tslint:disable-next-line: no-non-null-assertion
mainChunkId = file.id!.toString();
}
// All files at this point except ES5 polyfills are module scripts
const es5Polyfills = file.file.startsWith('polyfills-es5');
if (!es5Polyfills) {
moduleFiles.push(file);
}
// Retrieve the content/map for the file
// NOTE: Additional future optimizations will read directly from memory
// tslint:disable-next-line: no-non-null-assertion
let filename = path.join(webpackStats.outputPath!, file.file);
const code = fs.readFileSync(filename, 'utf8');
let map;
if (actionOptions.sourceMaps) {
try {
map = fs.readFileSync(filename + '.map', 'utf8');
if (es5Polyfills) {
fs.unlinkSync(filename + '.map');
}
} catch { }
}
if (es5Polyfills) {
fs.unlinkSync(filename);
filename = filename.replace(/\-es20\d{2}/, '');
}
const es2015Polyfills = file.file.startsWith('polyfills-es20');
// Record the bundle processing action
// The runtime chunk gets special processing for lazy loaded files
actions.push({
...actionOptions,
filename,
code,
map,
// id is always present for non-assets
// tslint:disable-next-line: no-non-null-assertion
name: file.id!,
runtime: file.file.startsWith('runtime'),
ignoreOriginal: es5Polyfills,
optimizeOnly: es2015Polyfills,
});
// ES2015 polyfills are only optimized; optimization check was performed above
if (es2015Polyfills) {
continue;
}
// Add the newly created ES5 bundles to the index as nomodule scripts
const newFilename = es5Polyfills
? file.file.replace(/\-es20\d{2}/, '')
: file.file.replace(/\-(es20\d{2}|esnext)/, '-es5');
noModuleFiles.push({ ...file, file: newFilename });
}
const processActions: typeof actions = [];
let processRuntimeAction: ProcessBundleOptions | undefined;
const processResults: ProcessBundleResult[] = [];
for (const action of actions) {
// If SRI is enabled always process the runtime bundle
// Lazy route integrity values are stored in the runtime bundle
if (action.integrityAlgorithm && action.runtime) {
processRuntimeAction = action;
} else {
processActions.push({ replacements: workerReplacements, ...action });
}
}
const executor = new BundleActionExecutor(
{ cachePath: cacheDownlevelPath, i18n },
options.subresourceIntegrity ? 'sha384' : undefined,
);
// Execute the bundle processing actions
try {
const dlSpinner = ora('Generating ES5 bundles for differential loading...').start();
for await (const result of executor.processAll(processActions)) {
processResults.push(result);
}
// Runtime must be processed after all other files
if (processRuntimeAction) {
const runtimeOptions = {
...processRuntimeAction,
runtimeData: processResults,
supportedBrowsers: buildBrowserFeatures.supportedBrowsers,
};
processResults.push(
await import('../utils/process-bundle').then(m => m.process(runtimeOptions)),
);
}
dlSpinner.succeed('ES5 bundle generation complete.');
if (i18n.shouldInline) {
const spinner = ora('Generating localized bundles...').start();
const inlineActions: InlineOptions[] = [];
const processedFiles = new Set<string>();
for (const result of processResults) {
if (result.original) {
inlineActions.push({
filename: path.basename(result.original.filename),
code: fs.readFileSync(result.original.filename, 'utf8'),
map:
result.original.map &&
fs.readFileSync(result.original.map.filename, 'utf8'),
outputPath: baseOutputPath,
es5: false,
missingTranslation: options.i18nMissingTranslation,
setLocale: result.name === mainChunkId,
});
processedFiles.add(result.original.filename);
if (result.original.map) {
processedFiles.add(result.original.map.filename);
}
}
if (result.downlevel) {
inlineActions.push({
filename: path.basename(result.downlevel.filename),
code: fs.readFileSync(result.downlevel.filename, 'utf8'),
map:
result.downlevel.map &&
fs.readFileSync(result.downlevel.map.filename, 'utf8'),
outputPath: baseOutputPath,
es5: true,
missingTranslation: options.i18nMissingTranslation,
setLocale: result.name === mainChunkId,
});
processedFiles.add(result.downlevel.filename);
if (result.downlevel.map) {
processedFiles.add(result.downlevel.map.filename);
}
}
}
let hasErrors = false;
try {
for await (const result of executor.inlineAll(inlineActions)) {
if (options.verbose) {
context.logger.info(
`Localized "${result.file}" [${result.count} translation(s)].`,
);
}
for (const diagnostic of result.diagnostics) {
spinner.stop();
if (diagnostic.type === 'error') {
hasErrors = true;
context.logger.error(diagnostic.message);
} else {
context.logger.warn(diagnostic.message);
}
spinner.start();
}
}
// Copy any non-processed files into the output locations
await copyAssets(
[
{
glob: '**/*',
// tslint:disable-next-line: no-non-null-assertion
input: webpackStats.outputPath!,
output: '',
ignore: [...processedFiles].map(f =>
// tslint:disable-next-line: no-non-null-assertion
path.relative(webpackStats.outputPath!, f),
),
},
],
Array.from(outputPaths.values()),
'',
);
} catch (err) {
spinner.fail(colors.redBright('Localized bundle generation failed.'));
return { success: false, error: mapErrorToMessage(err) };
}
if (hasErrors) {
spinner.fail(colors.redBright('Localized bundle generation failed.'));
} else {
spinner.succeed('Localized bundle generation complete.');
}
if (hasErrors) {
return { success: false };
}
}
} finally {
executor.stop();
}
type ArrayElement<A> = A extends ReadonlyArray<infer T> ? T : never;
function generateBundleInfoStats(
bundle: ProcessBundleFile,
chunk: ArrayElement<webpack.Stats.ToJsonOutput['chunks']> | undefined,
): BundleStats {
return generateBundleStats(
{
size: bundle.size,
files: bundle.map ? [bundle.filename, bundle.map.filename] : [bundle.filename],
names: chunk?.names,
entry: !!chunk?.names.includes('runtime'),
initial: !!chunk?.initial,
rendered: true,
},
true,
);
}
const bundleInfoStats: BundleStats[] = [];
for (const result of processResults) {
const chunk = webpackStats.chunks?.find((chunk) => chunk.id.toString() === result.name);
if (result.original) {
bundleInfoStats.push(generateBundleInfoStats(result.original, chunk));
}
if (result.downlevel) {
bundleInfoStats.push(generateBundleInfoStats(result.downlevel, chunk));
}
}
const unprocessedChunks = webpackStats.chunks?.filter((chunk) => !processResults
.find((result) => chunk.id.toString() === result.name),
) || [];
for (const chunk of unprocessedChunks) {
const asset = webpackStats.assets?.find(a => a.name === chunk.files[0]);
bundleInfoStats.push(generateBundleStats({ ...chunk, size: asset?.size }, true));
}
context.logger.info(
'\n' +
generateBuildStatsTable(bundleInfoStats, colors.enabled) +
'\n\n' +
generateBuildStats(
webpackStats?.hash || '<unknown>',
Date.now() - startTime,
true,
),
);
// Check for budget errors and display them to the user.
const budgets = options.budgets || [];
const budgetFailures = checkBudgets(budgets, webpackStats, processResults);
for (const { severity, message } of budgetFailures) {
switch (severity) {
case ThresholdSeverity.Warning:
webpackStats.warnings.push(message);
break;
case ThresholdSeverity.Error:
webpackStats.errors.push(message);
break;
default:
assertNever(severity);
}
}
if (statsHasWarnings(webpackStats)) {
context.logger.warn(statsWarningsToString(webpackStats, { colors: true }));
}
if (statsHasErrors(webpackStats)) {
context.logger.error(statsErrorsToString(webpackStats, { colors: true }));
return { success: false };
}
} else {
files = emittedFiles.filter(x => x.name !== 'polyfills-es5');
noModuleFiles = emittedFiles.filter(x => x.name === 'polyfills-es5');
if (i18n.shouldInline) {
const success = await i18nInlineEmittedFiles(
context,
emittedFiles,
i18n,
baseOutputPath,
Array.from(outputPaths.values()),
scriptsEntryPointName,
// tslint:disable-next-line: no-non-null-assertion
webpackStats.outputPath!,
target <= ScriptTarget.ES5,
options.i18nMissingTranslation,
);
if (!success) {
return { success: false };
}
}
}
// Copy assets
if (!options.watch && options.assets?.length) {
try {
await copyAssets(
normalizeAssetPatterns(
options.assets,
new virtualFs.SyncDelegateHost(host),
root,
normalize(projectRoot),
projectSourceRoot === undefined ? undefined : normalize(projectSourceRoot),
),
Array.from(outputPaths.values()),
context.workspaceRoot,
);
} catch (err) {
return { success: false, error: 'Unable to copy assets: ' + err.message };
}
}
for (const [locale, outputPath] of outputPaths.entries()) {
let localeBaseHref;
if (i18n.locales[locale] && i18n.locales[locale].baseHref !== '') {
localeBaseHref = urlJoin(
options.baseHref || '',
i18n.locales[locale].baseHref ?? `/${locale}/`,
);
}
try {
if (options.index) {
await writeIndexHtml({
host,
outputPath: path.join(outputPath, getIndexOutputFile(options)),
indexPath: path.join(context.workspaceRoot, getIndexInputFile(options)),
files,
noModuleFiles,
moduleFiles,
baseHref: localeBaseHref || options.baseHref,
deployUrl: options.deployUrl,
sri: options.subresourceIntegrity,
scripts: options.scripts,
styles: options.styles,
postTransforms: indexTransforms,
crossOrigin: options.crossOrigin,
// i18nLocale is used when Ivy is disabled
lang: locale || options.i18nLocale,
});
}
if (options.serviceWorker) {
await augmentAppWithServiceWorker(
host,
root,
normalize(projectRoot),
normalize(outputPath),
localeBaseHref || options.baseHref || '/',
options.ngswConfigPath,
);
}
} catch (err) {
return { success: false, error: mapErrorToMessage(err) };
}
}
}
return { success };
}),
map(
event =>
({
...event,
baseOutputPath,
outputPath: baseOutputPath,
outputPaths: outputPaths && Array.from(outputPaths.values()) || [baseOutputPath],
} as BrowserBuilderOutput),
),
);
}),
);
}
function
|
(error: unknown): string | undefined {
if (error instanceof Error) {
return error.message;
}
if (typeof error === 'string') {
return error;
}
return undefined;
}
function assertNever(input: never): never {
throw new Error(`Unexpected call to assertNever() with input: ${
JSON.stringify(input, null /* replacer */, 4 /* tabSize */)}`);
}
export default createBuilder<json.JsonObject & BrowserBuilderSchema>(buildWebpackBrowser);
|
mapErrorToMessage
|
listing310.py
|
#!/usr/bin/python3
# Copyright © 2012-2015 Graham Sellers
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
import time
fullscreen = True
try:
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLU import *
except:
print ('''
ERROR: PyOpenGL not installed properly.
''')
sys.exit()
from math import cos, sin
# Vertex program
vertex_shader_source = '''
#version 450 core
void main(void)
{
const vec4 vertices[3] = vec4[3](
vec4(0.25, -0.25, 0.5, 1.0),
vec4(-0.25, -0.25, 0.5, 1.0),
vec4(0.25, 0.25, 0.5, 1.0));
// Index into our array using gl_VertexID
gl_Position = vertices[gl_VertexID];
}
'''
# Fragment program
fragment_shader_source = '''
#version 450 core
out vec4 color;
void main(void)
{
color = vec4(sin(gl_FragCoord.x * 0.25) * 0.5 + 0.5,
cos(gl_FragCoord.y * 0.25) * 0.5 + 0.5,
sin(gl_FragCoord.x * 0.15) * cos(gl_FragCoord.y * 0.15),
1.0);
}
'''
def compile_program(vertex_source, fragment_source):
vertex_shader = None
fragment_shader = None
if vertex_source:
vertex_shader = glCreateShader(GL_VERTEX_SHADER)
glShaderSource(vertex_shader, vertex_source)
glCompileShader(vertex_shader)
if not glGetShaderiv(vertex_shader, GL_COMPILE_STATUS):
raise Exception('failed to compile shader "%s":\n%s' %
('vertex_shader', glGetShaderInfoLog(vertex_shader)))
if fragment_source:
fragment_shader = glCreateShader(GL_FRAGMENT_SHADER)
glShaderSource(fragment_shader, fragment_source)
glCompileShader(fragment_shader)
if not glGetShaderiv(fragment_shader, GL_COMPILE_STATUS):
raise Exception('failed to compile shader "%s":\n%s' %
('fragment_shader', glGetShaderInfoLog(fragment_shader)))
program = glCreateProgram()
glAttachShader(program, vertex_shader)
glAttachShader(program, fragment_shader)
glLinkProgram(program)
if vertex_shader:
glDeleteShader(vertex_shader)
if fragment_shader:
glDeleteShader(fragment_shader)
return program
class Scene:
def __init__(self):
pass
def display(self):
currentTime = time.time()
color = [ 0.0, 0.2, 0.0, 1.0 ];
glClearBufferfv(GL_COLOR, 0, color)
|
glutSwapBuffers()
def reshape(self, width, height):
pass
def keyboard(self, key, x, y ):
global fullscreen
print ('key:' , key)
if key == b'\x1b': # ESC
sys.exit()
elif key == b'f' or key == b'F': #fullscreen toggle
if (fullscreen == True):
glutReshapeWindow(512, 512)
glutPositionWindow(int((1360/2)-(512/2)), int((768/2)-(512/2)))
fullscreen = False
else:
glutFullScreen()
fullscreen = True
print('done')
def init(self):
pass
if __name__ == '__main__':
start = time.time()
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
w1 = glutCreateWindow('Listing 3.10')
glutInitWindowPosition(int((1360/2)-(512/2)), int((768/2)-(512/2)))
fullscreen = False
#glutFullScreen()
scene = Scene()
glutReshapeFunc(scene.reshape)
glutDisplayFunc(scene.display)
glutKeyboardFunc(scene.keyboard)
glutIdleFunc(scene.display)
scene.init()
glutMainLoop()
|
glUseProgram(compile_program(vertex_shader_source, fragment_shader_source))
glDrawArrays(GL_TRIANGLES, 0, 3);
|
model_parser_test.go
|
package influxdb
import (
"testing"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/models"
. "github.com/smartystreets/goconvey/convey"
)
func
|
(t *testing.T) {
Convey("Influxdb query parser", t, func() {
parser := &InfluxdbQueryParser{}
dsInfo := &models.DataSource{
JsonData: simplejson.New(),
}
Convey("can parse influxdb json model", func() {
json := `
{
"dsType": "influxdb",
"groupBy": [
{
"params": [
"$interval"
],
"type": "time"
},
{
"params": [
"datacenter"
],
"type": "tag"
},
{
"params": [
"none"
],
"type": "fill"
}
],
"measurement": "logins.count",
"policy": "default",
"refId": "B",
"resultFormat": "time_series",
"select": [
[
{
"type": "field",
"params": [
"value"
]
},
{
"type": "count",
"params": []
}
],
[
{
"type": "field",
"params": [
"value"
]
},
{
"type": "bottom",
"params": [
3
]
}
],
[
{
"type": "field",
"params": [
"value"
]
},
{
"type": "mean",
"params": []
},
{
"type": "math",
"params": [
" / 100"
]
}
]
],
"alias": "serie alias",
"tags": [
{
"key": "datacenter",
"operator": "=",
"value": "America"
},
{
"condition": "OR",
"key": "hostname",
"operator": "=",
"value": "server1"
}
]
}
`
dsInfo.JsonData.Set("timeInterval", ">20s")
modelJson, err := simplejson.NewJson([]byte(json))
So(err, ShouldBeNil)
res, err := parser.Parse(modelJson, dsInfo)
So(err, ShouldBeNil)
So(len(res.GroupBy), ShouldEqual, 3)
So(len(res.Selects), ShouldEqual, 3)
So(len(res.Tags), ShouldEqual, 2)
So(res.Interval, ShouldEqual, ">20s")
So(res.Alias, ShouldEqual, "serie alias")
})
Convey("can part raw query json model", func() {
json := `
{
"dsType": "influxdb",
"groupBy": [
{
"params": [
"$interval"
],
"type": "time"
},
{
"params": [
"null"
],
"type": "fill"
}
],
"interval": ">10s",
"policy": "default",
"query": "RawDummieQuery",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
"select": [
[
{
"params": [
"value"
],
"type": "field"
},
{
"params": [
],
"type": "mean"
}
]
],
"tags": [
]
}
`
modelJson, err := simplejson.NewJson([]byte(json))
So(err, ShouldBeNil)
res, err := parser.Parse(modelJson, dsInfo)
So(err, ShouldBeNil)
So(res.RawQuery, ShouldEqual, "RawDummieQuery")
So(len(res.GroupBy), ShouldEqual, 2)
So(len(res.Selects), ShouldEqual, 1)
So(len(res.Tags), ShouldEqual, 0)
So(res.Interval, ShouldEqual, ">10s")
})
})
}
|
TestInfluxdbQueryParser
|
test.py
|
# coding:utf-8
# modified from: https://github.com/haqishen/MFNet-pytorch
# By Yuxiang Sun, Aug. 2, 2019
# Email: [email protected]
import os
import argparse
import time
import datetime
import numpy as np
import sys
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
from util.MF_dataset import MF_dataset
from model import RTFNet
from sklearn.metrics import confusion_matrix
n_class = 9
data_dir = './dataset/'
model_dir = './weights_backup/'
def main():
conf_total = np.zeros((n_class,n_class))
model = eval(args.model_name)(n_class=n_class)
if args.gpu >= 0: model.cuda(args.gpu)
print('| loading model file %s... ' % model_file)
pretrained_weight = torch.load(model_file, map_location = lambda storage, loc: storage.cuda(args.gpu))
own_state = model.state_dict()
for name, param in pretrained_weight.items():
if name not in own_state:
continue
own_state[name].copy_(param)
print('done!')
test_dataset = MF_dataset(data_dir, args.dataset_name, have_label=True, input_h=args.img_height, input_w=args.img_width)
test_loader = DataLoader(
dataset = test_dataset,
batch_size = batch_size,
shuffle = False,
num_workers = args.num_workers,
pin_memory = True,
drop_last = False
)
test_loader.n_iter = len(test_loader)
ave_time_cost = 0.0
model.eval()
with torch.no_grad():
for it, (images, labels, names) in enumerate(test_loader):
images = Variable(images)
labels = Variable(labels)
if args.gpu >= 0:
images = images.cuda(args.gpu)
labels = labels.cuda(args.gpu)
|
end_time = time.time()
if it>10: # # ignore the first 10 frames
ave_time_cost += (end_time-start_time)
# convert tensor to numpy 1d array
label = labels.cpu().numpy().squeeze().flatten()
prediction = logits.argmax(1).cpu().numpy().squeeze().flatten() # prediction and label are both 1-d array, size: minibatch*640*480
# generate confusion matrix frame-by-frame
conf = confusion_matrix(label, prediction, [0,1,2,3,4,5,6,7,8]) # conf is an n_class*n_class matrix, vertical axis: groundtruth, horizontal axis: prediction
conf_total += conf
print("| frame %d/%d, time cost: %.2f ms" %(it+1, test_loader.n_iter, (end_time-start_time)*1000))
# calculate recall (Acc) and IoU for each class
recall_per_class = np.zeros(n_class)
iou_per_class = np.zeros(n_class)
for cid in range(0, n_class): # cid: class id
if conf_total[cid, 0:].sum() == 0:
recall_per_class[cid] = np.nan
else:
recall_per_class[cid] = float(conf_total[cid, cid]) / float(conf_total[cid, 0:].sum()) # recall (Acc) = TP/TP+FN
if (conf_total[cid, 0:].sum() + conf_total[0:, cid].sum() - conf_total[cid, cid]) == 0:
iou_per_class[cid] = np.nan
else:
iou_per_class[cid] = float(conf_total[cid, cid]) / float((conf_total[cid, 0:].sum() + conf_total[0:, cid].sum() - conf_total[cid, cid])) # IoU = TP/TP+FP+FN
print('\n###########################################################################')
print('\n| %s: %s test results (with batch size %d) on %s using %s:' %(args.model_name, args.weight_name, batch_size, datetime.date.today(), torch.cuda.get_device_name(args.gpu)))
print('\n| * the tested dataset name: %s' % args.dataset_name)
print('| * the tested image count: %d' % test_loader.n_iter)
print('| * the tested image size: %d*%d' %(args.img_height, args.img_width))
print("| * recall per class: \n unlabeled: %.6f, car: %.6f, person: %.6f, bike: %.6f, curve: %.6f, car_stop: %.6f, guardrail: %.6f, color_cone: %.6f, bump: %.6f" \
%(recall_per_class[0], recall_per_class[1], recall_per_class[2], recall_per_class[3], recall_per_class[4], recall_per_class[5], recall_per_class[6], recall_per_class[7], recall_per_class[8]))
print("| * iou per class: \n unlabeled: %.6f, car: %.6f, person: %.6f, bike: %.6f, curve: %.6f, car_stop: %.6f, guardrail: %.6f, color_cone: %.6f, bump: %.6f" \
%(iou_per_class[0], iou_per_class[1], iou_per_class[2], iou_per_class[3], iou_per_class[4], iou_per_class[5], iou_per_class[6], iou_per_class[7], iou_per_class[8]))
print("\n| * average values (np.mean(x)): \n recall: %.6f, iou: %.6f" \
%(recall_per_class.mean(), iou_per_class.mean()))
print("| * average values (np.mean(np.nan_to_num(x))): \n recall: %.6f, iou: %.6f" \
%(np.mean(np.nan_to_num(recall_per_class)), np.mean(np.nan_to_num(iou_per_class))))
print('\n| * the average time cost per frame (with batch size %d): %.2f ms, namely, the inference speed is %.2f fps' %(batch_size, ave_time_cost*1000/(test_loader.n_iter-11), 1.0/(ave_time_cost/(test_loader.n_iter-11)))) # ignore the first 10 frames
#print('\n| * the total confusion matrix: ')
#np.set_printoptions(precision=8, threshold=np.inf, linewidth=np.inf, suppress=True)
#print(conf_total)
print('\n###########################################################################')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Test with pytorch')
parser.add_argument('--model_name', '-M', type=str, default='RTFNet')
parser.add_argument('--weight_name', '-W', type=str, default='RTFNet_152') # RTFNet_152, RTFNet_50, please change the number of layers in the network file
parser.add_argument('--dataset_name', '-D', type=str, default='test') # test, test_day, test_night
parser.add_argument('--img_height', '-IH', type=int, default=480)
parser.add_argument('--img_width', '-IW', type=int, default=640)
parser.add_argument('--gpu', '-G', type=int, default=0)
parser.add_argument('--num_workers', '-j', type=int, default=8)
args = parser.parse_args()
batch_size = 1 # do not change this parameter!
torch.cuda.set_device(args.gpu)
print("\n| the gpu count:", torch.cuda.device_count())
print("| the current used gpu:", torch.cuda.current_device(), '\n')
model_dir = os.path.join(model_dir, args.weight_name) # model_dir = './weights_backup/'
if os.path.exists(model_dir) is False:
print("| the %s does not exit." %(model_dir))
sys.exit()
model_file = os.path.join(model_dir, 'final.pth')
if os.path.exists(model_file) is True:
print('| use the final model file.')
else:
print('| no model file found.')
sys.exit()
print('| testing %s: %s on GPU #%d with pytorch' % (args.model_name, args.weight_name, args.gpu))
main()
|
start_time = time.time()
logits = model(images) # logits.size(): mini_batch*num_class*480*640
|
nav.module.ts
|
import { NgModule } from '@angular/core';
import { CommonModule } from '@angular/common';
import { NavComponent } from './nav.component';
@NgModule({
imports: [
CommonModule
],
declarations: [NavComponent],
exports: [NavComponent]
})
export class
|
{ }
|
NavModule
|
msg_service_router_test.go
|
package middleware_test
import (
"os"
"testing"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
dbm "github.com/tendermint/tm-db"
"github.com/puneetsingh166/tm-load-test/baseapp"
"github.com/puneetsingh166/tm-load-test/client/tx"
"github.com/puneetsingh166/tm-load-test/simapp"
"github.com/puneetsingh166/tm-load-test/testutil/testdata"
"github.com/puneetsingh166/tm-load-test/types/tx/signing"
"github.com/puneetsingh166/tm-load-test/x/auth/middleware"
authsigning "github.com/puneetsingh166/tm-load-test/x/auth/signing"
)
func TestRegisterMsgService(t *testing.T) {
// Create an encoding config that doesn't register testdata Msg services.
encCfg := simapp.MakeTestEncodingConfig()
msr := middleware.NewMsgServiceRouter(encCfg.InterfaceRegistry)
require.Panics(t, func() {
testdata.RegisterMsgServer(
msr,
testdata.MsgServerImpl{},
)
})
// Register testdata Msg services, and rerun `RegisterService`.
testdata.RegisterInterfaces(encCfg.InterfaceRegistry)
require.NotPanics(t, func() {
testdata.RegisterMsgServer(
msr,
testdata.MsgServerImpl{},
)
})
}
func TestRegisterMsgServiceTwice(t *testing.T)
|
func TestMsgService(t *testing.T) {
priv, _, _ := testdata.KeyTestPubAddr()
encCfg := simapp.MakeTestEncodingConfig()
testdata.RegisterInterfaces(encCfg.InterfaceRegistry)
db := dbm.NewMemDB()
app := baseapp.NewBaseApp("test", log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, encCfg.TxConfig.TxDecoder())
app.SetInterfaceRegistry(encCfg.InterfaceRegistry)
msr := middleware.NewMsgServiceRouter(encCfg.InterfaceRegistry)
txHandler, err := middleware.NewDefaultTxHandler(middleware.TxHandlerOptions{
MsgServiceRouter: msr,
})
require.NoError(t, err)
app.SetTxHandler(txHandler)
testdata.RegisterMsgServer(
msr,
testdata.MsgServerImpl{},
)
_ = app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: 1}})
msg := testdata.MsgCreateDog{Dog: &testdata.Dog{Name: "Spot"}}
txBuilder := encCfg.TxConfig.NewTxBuilder()
txBuilder.SetFeeAmount(testdata.NewTestFeeAmount())
txBuilder.SetGasLimit(testdata.NewTestGasLimit())
err = txBuilder.SetMsgs(&msg)
require.NoError(t, err)
// First round: we gather all the signer infos. We use the "set empty
// signature" hack to do that.
sigV2 := signing.SignatureV2{
PubKey: priv.PubKey(),
Data: &signing.SingleSignatureData{
SignMode: encCfg.TxConfig.SignModeHandler().DefaultMode(),
Signature: nil,
},
Sequence: 0,
}
err = txBuilder.SetSignatures(sigV2)
require.NoError(t, err)
// Second round: all signer infos are set, so each signer can sign.
signerData := authsigning.SignerData{
ChainID: "test",
AccountNumber: 0,
Sequence: 0,
}
sigV2, err = tx.SignWithPrivKey(
encCfg.TxConfig.SignModeHandler().DefaultMode(), signerData,
txBuilder, priv, encCfg.TxConfig, 0)
require.NoError(t, err)
err = txBuilder.SetSignatures(sigV2)
require.NoError(t, err)
// Send the tx to the app
txBytes, err := encCfg.TxConfig.TxEncoder()(txBuilder.GetTx())
require.NoError(t, err)
res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes})
require.Equal(t, abci.CodeTypeOK, res.Code, "res=%+v", res)
}
|
{
// Setup baseapp.
encCfg := simapp.MakeTestEncodingConfig()
msr := middleware.NewMsgServiceRouter(encCfg.InterfaceRegistry)
testdata.RegisterInterfaces(encCfg.InterfaceRegistry)
// First time registering service shouldn't panic.
require.NotPanics(t, func() {
testdata.RegisterMsgServer(
msr,
testdata.MsgServerImpl{},
)
})
// Second time should panic.
require.Panics(t, func() {
testdata.RegisterMsgServer(
msr,
testdata.MsgServerImpl{},
)
})
}
|
ze_generated_example_configurations_client_test.go
|
//go:build go1.16
// +build go1.16
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
package armhdinsight_test
import (
"context"
"log"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/hdinsight/armhdinsight"
)
// x-ms-original-file: specification/hdinsight/resource-manager/Microsoft.HDInsight/stable/2021-06-01/examples/HDI_Configurations_List.json
func ExampleConfigurationsClient_List() {
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
log.Fatalf("failed to obtain a credential: %v", err)
}
ctx := context.Background()
client := armhdinsight.NewConfigurationsClient("<subscription-id>", cred, nil)
res, err := client.List(ctx,
"<resource-group-name>",
"<cluster-name>",
nil)
if err != nil {
log.Fatal(err)
}
log.Printf("Response result: %#v\n", res.ConfigurationsClientListResult)
}
// x-ms-original-file: specification/hdinsight/resource-manager/Microsoft.HDInsight/stable/2021-06-01/examples/ChangeHttpConnectivityDisable.json
func ExampleConfigurationsClient_BeginUpdate() {
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
log.Fatalf("failed to obtain a credential: %v", err)
}
ctx := context.Background()
client := armhdinsight.NewConfigurationsClient("<subscription-id>", cred, nil)
poller, err := client.BeginUpdate(ctx,
"<resource-group-name>",
"<cluster-name>",
"<configuration-name>",
map[string]*string{
"restAuthCredential.isEnabled": to.StringPtr("false"),
},
nil)
if err != nil {
log.Fatal(err)
}
_, err = poller.PollUntilDone(ctx, 30*time.Second)
if err != nil {
log.Fatal(err)
}
}
// x-ms-original-file: specification/hdinsight/resource-manager/Microsoft.HDInsight/stable/2021-06-01/examples/HDI_Configurations_Get.json
func ExampleConfigurationsClient_Get()
|
{
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
log.Fatalf("failed to obtain a credential: %v", err)
}
ctx := context.Background()
client := armhdinsight.NewConfigurationsClient("<subscription-id>", cred, nil)
res, err := client.Get(ctx,
"<resource-group-name>",
"<cluster-name>",
"<configuration-name>",
nil)
if err != nil {
log.Fatal(err)
}
log.Printf("Response result: %#v\n", res.ConfigurationsClientGetResult)
}
|
|
networks_stylegan2_unweighted.py
|
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Network architectures used in the StyleGAN2 paper."""
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib.ops.upfirdn_2d import upsample_2d, downsample_2d, upsample_conv_2d, conv_downsample_2d
from dnnlib.tflib.ops.fused_bias_act import fused_bias_act
# NOTE: Do not import any application-specific modules here!
# Specify all network parameters as kwargs.
#----------------------------------------------------------------------------
# Get/create weight tensor for a convolution or fully-connected layer.
def get_weight(shape, gain=1, use_wscale=True, lrmul=1, weight_var='weight'):
fan_in = np.prod(shape[:-1]) # [kernel, kernel, fmaps_in, fmaps_out] or [in, out]
he_std = gain / np.sqrt(fan_in) # He init
# Equalized learning rate and custom learning rate multiplier.
if use_wscale:
init_std = 1.0 / lrmul
runtime_coef = he_std * lrmul
else:
init_std = he_std / lrmul
runtime_coef = lrmul
# Create variable.
init = tf.initializers.random_normal(0, init_std)
return tf.get_variable(weight_var, shape=shape, initializer=init) * runtime_coef
#----------------------------------------------------------------------------
# Fully-connected layer.
def dense_layer(x, fmaps, gain=1, use_wscale=True, lrmul=1, weight_var='weight'):
if len(x.shape) > 2:
x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
w = get_weight([x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale, lrmul=lrmul, weight_var=weight_var)
w = tf.cast(w, x.dtype)
return tf.matmul(x, w)
#----------------------------------------------------------------------------
# Convolution layer with optional upsampling or downsampling.
def conv2d_layer(x, fmaps, kernel, up=False, down=False, resample_kernel=None, gain=1, use_wscale=True, lrmul=1, weight_var='weight'):
assert not (up and down)
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale, lrmul=lrmul, weight_var=weight_var)
if up:
x = upsample_conv_2d(x, tf.cast(w, x.dtype), data_format='NCHW', k=resample_kernel)
elif down:
x = conv_downsample_2d(x, tf.cast(w, x.dtype), data_format='NCHW', k=resample_kernel)
else:
x = tf.nn.conv2d(x, tf.cast(w, x.dtype), data_format='NCHW', strides=[1,1,1,1], padding='SAME')
return x
#----------------------------------------------------------------------------
# Apply bias and activation func.
def apply_bias_act(x, act='linear', alpha=None, gain=None, lrmul=1, bias_var='bias'):
b = tf.get_variable(bias_var, shape=[x.shape[1]], initializer=tf.initializers.zeros()) * lrmul
return fused_bias_act(x, b=tf.cast(b, x.dtype), act=act, alpha=alpha, gain=gain)
#----------------------------------------------------------------------------
# Naive upsampling (nearest neighbor) and downsampling (average pooling).
def naive_upsample_2d(x, factor=2):
with tf.variable_scope('NaiveUpsample'):
_N, C, H, W = x.shape.as_list()
x = tf.reshape(x, [-1, C, H, 1, W, 1])
x = tf.tile(x, [1, 1, 1, factor, 1, factor])
return tf.reshape(x, [-1, C, H * factor, W * factor])
def naive_downsample_2d(x, factor=2):
with tf.variable_scope('NaiveDownsample'):
_N, C, H, W = x.shape.as_list()
x = tf.reshape(x, [-1, C, H // factor, factor, W // factor, factor])
return tf.reduce_mean(x, axis=[3,5])
#----------------------------------------------------------------------------
# Modulated convolution layer.
def modulated_conv2d_layer(x, y, fmaps, kernel, up=False, down=False, demodulate=True, resample_kernel=None, gain=1, use_wscale=True, lrmul=1, fused_modconv=True, weight_var='weight', mod_weight_var='mod_weight', mod_bias_var='mod_bias'):
assert not (up and down)
assert kernel >= 1 and kernel % 2 == 1
# Get weight.
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale, lrmul=lrmul, weight_var=weight_var)
ww = w[np.newaxis] # [BkkIO] Introduce minibatch dimension.
# Modulate.
s = dense_layer(y, fmaps=x.shape[1].value, weight_var=mod_weight_var) # [BI] Transform incoming W to style.
s = apply_bias_act(s, bias_var=mod_bias_var) + 1 # [BI] Add bias (initially 1).
ww *= tf.cast(s[:, np.newaxis, np.newaxis, :, np.newaxis], w.dtype) # [BkkIO] Scale input feature maps.
# Demodulate.
if demodulate:
d = tf.rsqrt(tf.reduce_sum(tf.square(ww), axis=[1,2,3]) + 1e-8) # [BO] Scaling factor.
ww *= d[:, np.newaxis, np.newaxis, np.newaxis, :] # [BkkIO] Scale output feature maps.
# Reshape/scale input.
if fused_modconv:
x = tf.reshape(x, [1, -1, x.shape[2], x.shape[3]]) # Fused => reshape minibatch to convolution groups.
w = tf.reshape(tf.transpose(ww, [1, 2, 3, 0, 4]), [ww.shape[1], ww.shape[2], ww.shape[3], -1])
else:
x *= tf.cast(s[:, :, np.newaxis, np.newaxis], x.dtype) # [BIhw] Not fused => scale input activations.
# Convolution with optional up/downsampling.
if up:
x = upsample_conv_2d(x, tf.cast(w, x.dtype), data_format='NCHW', k=resample_kernel)
elif down:
x = conv_downsample_2d(x, tf.cast(w, x.dtype), data_format='NCHW', k=resample_kernel)
else:
x = tf.nn.conv2d(x, tf.cast(w, x.dtype), data_format='NCHW', strides=[1,1,1,1], padding='SAME')
# Reshape/scale output.
if fused_modconv:
x = tf.reshape(x, [-1, fmaps, x.shape[2], x.shape[3]]) # Fused => reshape convolution groups back to minibatch.
elif demodulate:
x *= tf.cast(d[:, :, np.newaxis, np.newaxis], x.dtype) # [BOhw] Not fused => scale output activations.
return x
#----------------------------------------------------------------------------
# Minibatch standard deviation layer.
def minibatch_stddev_layer(x, group_size=4, num_new_features=1):
group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size.
s = x.shape # [NCHW] Input shape.
y = tf.reshape(x, [group_size, -1, num_new_features, s[1]//num_new_features, s[2], s[3]]) # [GMncHW] Split minibatch into M groups of size G. Split channels into n channel groups c.
y = tf.cast(y, tf.float32) # [GMncHW] Cast to FP32.
y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMncHW] Subtract mean over group.
y = tf.reduce_mean(tf.square(y), axis=0) # [MncHW] Calc variance over group.
y = tf.sqrt(y + 1e-8) # [MncHW] Calc stddev over group.
y = tf.reduce_mean(y, axis=[2,3,4], keepdims=True) # [Mn111] Take average over fmaps and pixels.
y = tf.reduce_mean(y, axis=[2]) # [Mn11] Split channels into c channel groups
y = tf.cast(y, x.dtype) # [Mn11] Cast back to original data type.
y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [NnHW] Replicate over group and pixels.
return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap.
#----------------------------------------------------------------------------
# Main generator network.
# Composed of two sub-networks (mapping and synthesis) that are defined below.
# Used in configs B-F (Table 1).
def G_main(
latents_in, # First input: Latent vectors (Z) [minibatch, latent_size].
labels_in, # Second input: Conditioning labels [minibatch, label_size].
truncation_psi = 0.5, # Style strength multiplier for the truncation trick. None = disable.
truncation_cutoff = None, # Number of layers for which to apply the truncation trick. None = disable.
truncation_psi_val = None, # Value for truncation_psi to use during validation.
truncation_cutoff_val = None, # Value for truncation_cutoff to use during validation.
dlatent_avg_beta = 0.995, # Decay for tracking the moving average of W during training. None = disable.
style_mixing_prob = 0.9, # Probability of mixing styles during training. None = disable.
is_training = False, # Network is under training? Enables and disables specific features.
is_validation = False, # Network is under validation? Chooses which value to use for truncation_psi.
return_dlatents = False, # Return dlatents in addition to the images?
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
components = dnnlib.EasyDict(), # Container for sub-networks. Retained between calls.
mapping_func = 'G_mapping', # Build func name for the mapping network.
synthesis_func = 'G_synthesis_stylegan2', # Build func name for the synthesis network.
**kwargs): # Arguments for sub-networks (mapping and synthesis).
# Validate arguments.
assert not is_training or not is_validation
assert isinstance(components, dnnlib.EasyDict)
if is_validation:
truncation_psi = truncation_psi_val
truncation_cutoff = truncation_cutoff_val
if is_training or (truncation_psi is not None and not tflib.is_tf_expression(truncation_psi) and truncation_psi == 1):
truncation_psi = None
if is_training:
truncation_cutoff = None
if not is_training or (dlatent_avg_beta is not None and not tflib.is_tf_expression(dlatent_avg_beta) and dlatent_avg_beta == 1):
dlatent_avg_beta = None
if not is_training or (style_mixing_prob is not None and not tflib.is_tf_expression(style_mixing_prob) and style_mixing_prob <= 0):
style_mixing_prob = None
# Setup components.
if 'synthesis' not in components:
components.synthesis = tflib.Network('G_synthesis', func_name=globals()[synthesis_func], **kwargs)
num_layers = components.synthesis.input_shape[1]
dlatent_size = components.synthesis.input_shape[2]
if 'mapping' not in components:
components.mapping = tflib.Network('G_mapping', func_name=globals()[mapping_func], dlatent_broadcast=num_layers, **kwargs)
# Setup variables.
lod_in = tf.get_variable('lod', initializer=np.float32(0), trainable=False)
dlatent_avg = tf.get_variable('dlatent_avg', shape=[dlatent_size], initializer=tf.initializers.zeros(), trainable=False)
# Evaluate mapping network.
dlatents = components.mapping.get_output_for(latents_in, labels_in, is_training=is_training, **kwargs)
dlatents = tf.cast(dlatents, tf.float32)
# Update moving average of W.
if dlatent_avg_beta is not None:
with tf.variable_scope('DlatentAvg'):
batch_avg = tf.reduce_mean(dlatents[:, 0], axis=0)
update_op = tf.assign(dlatent_avg, tflib.lerp(batch_avg, dlatent_avg, dlatent_avg_beta))
with tf.control_dependencies([update_op]):
dlatents = tf.identity(dlatents)
# Perform style mixing regularization.
if style_mixing_prob is not None:
with tf.variable_scope('StyleMix'):
latents2 = tf.random_normal(tf.shape(latents_in))
dlatents2 = components.mapping.get_output_for(latents2, labels_in, is_training=is_training, **kwargs)
dlatents2 = tf.cast(dlatents2, tf.float32)
layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis]
cur_layers = num_layers - tf.cast(lod_in, tf.int32) * 2
mixing_cutoff = tf.cond(
tf.random_uniform([], 0.0, 1.0) < style_mixing_prob,
lambda: tf.random_uniform([], 1, cur_layers, dtype=tf.int32),
lambda: cur_layers)
dlatents = tf.where(tf.broadcast_to(layer_idx < mixing_cutoff, tf.shape(dlatents)), dlatents, dlatents2)
# Apply truncation trick.
if truncation_psi is not None:
with tf.variable_scope('Truncation'):
layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis]
layer_psi = np.ones(layer_idx.shape, dtype=np.float32)
if truncation_cutoff is None:
layer_psi *= truncation_psi
else:
layer_psi = tf.where(layer_idx < truncation_cutoff, layer_psi * truncation_psi, layer_psi)
dlatents = tflib.lerp(dlatent_avg, dlatents, layer_psi)
# Evaluate synthesis network.
deps = []
if 'lod' in components.synthesis.vars:
deps.append(tf.assign(components.synthesis.vars['lod'], lod_in))
with tf.control_dependencies(deps):
images_out = components.synthesis.get_output_for(dlatents, is_training=is_training, force_clean_graph=is_template_graph, **kwargs)
# Return requested outputs.
images_out = tf.identity(images_out, name='images_out')
if return_dlatents:
return images_out, dlatents
return images_out
#----------------------------------------------------------------------------
# Mapping network.
# Transforms the input latent code (z) to the disentangled latent code (w).
# Used in configs B-F (Table 1).
def G_mapping(
latents_in, # First input: Latent vectors (Z) [minibatch, latent_size].
labels_in, # Second input: Conditioning labels [minibatch, label_size].
latent_size = 512, # Latent vector (Z) dimensionality.
label_size = 0, # Label dimensionality, 0 if no labels.
dlatent_size = 512, # Disentangled latent (W) dimensionality.
dlatent_broadcast = None, # Output disentangled latent (W) as [minibatch, dlatent_size] or [minibatch, dlatent_broadcast, dlatent_size].
mapping_layers = 8, # Number of mapping layers.
mapping_fmaps = 512, # Number of activations in the mapping layers.
mapping_lrmul = 0.01, # Learning rate multiplier for the mapping layers.
mapping_nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
normalize_latents = True, # Normalize latent vectors (Z) before feeding them to the mapping layers?
dtype = 'float32', # Data type to use for activations and outputs.
**_kwargs): # Ignore unrecognized keyword args.
act = mapping_nonlinearity
# Inputs.
latents_in.set_shape([None, latent_size])
labels_in.set_shape([None, label_size])
latents_in = tf.cast(latents_in, dtype)
labels_in = tf.cast(labels_in, dtype)
x = latents_in
# Embed labels and concatenate them with latents.
if label_size:
with tf.variable_scope('LabelConcat'):
w = tf.get_variable('weight', shape=[label_size, latent_size], initializer=tf.initializers.random_normal())
y = tf.matmul(labels_in, tf.cast(w, dtype))
x = tf.concat([x, y], axis=1)
# Normalize latents.
if normalize_latents:
with tf.variable_scope('Normalize'):
x *= tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + 1e-8)
# Mapping layers.
for layer_idx in range(mapping_layers):
with tf.variable_scope('Dense%d' % layer_idx):
fmaps = dlatent_size if layer_idx == mapping_layers - 1 else mapping_fmaps
x = apply_bias_act(dense_layer(x, fmaps=fmaps, lrmul=mapping_lrmul), act=act, lrmul=mapping_lrmul)
# Broadcast.
if dlatent_broadcast is not None:
with tf.variable_scope('Broadcast'):
x = tf.tile(x[:, np.newaxis], [1, dlatent_broadcast, 1])
# Output.
assert x.dtype == tf.as_dtype(dtype)
return tf.identity(x, name='dlatents_out')
#----------------------------------------------------------------------------
# StyleGAN synthesis network with revised architecture (Figure 2d).
# Implements progressive growing, but no skip connections or residual nets (Figure 7).
# Used in configs B-D (Table 1).
def G_synthesis_stylegan_revised(
dlatents_in, # Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size].
dlatent_size = 512, # Disentangled latent (W) dimensionality.
num_channels = 3, # Number of output color channels.
resolution = 1024, # Output resolution.
fmap_base = 16 << 10, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_min = 1, # Minimum number of feature maps in any layer.
fmap_max = 512, # Maximum number of feature maps in any layer.
randomize_noise = True, # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
dtype = 'float32', # Data type to use for activations and outputs.
resample_kernel = [1,3,3,1], # Low-pass filter to apply when resampling activations. None = no filtering.
fused_modconv = True, # Implement modulated_conv2d_layer() as a single fused op?
structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
force_clean_graph = False, # True = construct a clean graph that looks nice in TensorBoard, False = default behavior.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return np.clip(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_min, fmap_max)
if is_template_graph: force_clean_graph = True
if force_clean_graph: randomize_noise = False
if structure == 'auto': structure = 'linear' if force_clean_graph else 'recursive'
act = nonlinearity
num_layers = resolution_log2 * 2 - 2
images_out = None
# Primary inputs.
dlatents_in.set_shape([None, num_layers, dlatent_size])
dlatents_in = tf.cast(dlatents_in, dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0), trainable=False), dtype)
# Noise inputs.
noise_inputs = []
for layer_idx in range(num_layers - 1):
res = (layer_idx + 5) // 2
shape = [1, 1, 2**res, 2**res]
noise_inputs.append(tf.get_variable('noise%d' % layer_idx, shape=shape, initializer=tf.initializers.random_normal(), trainable=False))
# Single convolution layer with all the bells and whistles.
def layer(x, layer_idx, fmaps, kernel, up=False):
x = modulated_conv2d_layer(x, dlatents_in[:, layer_idx], fmaps=fmaps, kernel=kernel, up=up, resample_kernel=resample_kernel, fused_modconv=fused_modconv)
if randomize_noise:
noise = tf.random_normal([tf.shape(x)[0], 1, x.shape[2], x.shape[3]], dtype=x.dtype)
else:
noise = tf.cast(noise_inputs[layer_idx], x.dtype)
noise_strength = tf.get_variable('noise_strength', shape=[], initializer=tf.initializers.zeros())
x += noise * tf.cast(noise_strength, x.dtype)
return apply_bias_act(x, act=act)
# Early layers.
with tf.variable_scope('4x4'):
with tf.variable_scope('Const'):
x = tf.get_variable('const', shape=[1, nf(1), 4, 4], initializer=tf.initializers.random_normal())
x = tf.tile(tf.cast(x, dtype), [tf.shape(dlatents_in)[0], 1, 1, 1])
with tf.variable_scope('Conv'):
x = layer(x, layer_idx=0, fmaps=nf(1), kernel=3)
# Building blocks for remaining layers.
def block(res, x): # res = 3..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
with tf.variable_scope('Conv0_up'):
x = layer(x, layer_idx=res*2-5, fmaps=nf(res-1), kernel=3, up=True)
with tf.variable_scope('Conv1'):
x = layer(x, layer_idx=res*2-4, fmaps=nf(res-1), kernel=3)
return x
def torgb(res, x): # res = 2..resolution_log2
with tf.variable_scope('ToRGB_lod%d' % (resolution_log2 - res)):
return apply_bias_act(modulated_conv2d_layer(x, dlatents_in[:, res*2-3], fmaps=num_channels, kernel=1, demodulate=False, fused_modconv=fused_modconv))
# Fixed structure: simple and efficient, but does not support progressive growing.
if structure == 'fixed':
for res in range(3, resolution_log2 + 1):
x = block(res, x)
images_out = torgb(resolution_log2, x)
# Linear structure: simple but inefficient.
if structure == 'linear':
images_out = torgb(2, x)
for res in range(3, resolution_log2 + 1):
lod = resolution_log2 - res
x = block(res, x)
img = torgb(res, x)
with tf.variable_scope('Upsample_lod%d' % lod):
images_out = upsample_2d(images_out)
with tf.variable_scope('Grow_lod%d' % lod):
images_out = tflib.lerp_clip(img, images_out, lod_in - lod)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def cset(cur_lambda, new_cond, new_lambda):
re
|
def grow(x, res, lod):
y = block(res, x)
img = lambda: naive_upsample_2d(torgb(res, y), factor=2**lod)
img = cset(img, (lod_in > lod), lambda: naive_upsample_2d(tflib.lerp(torgb(res, y), upsample_2d(torgb(res - 1, x)), lod_in - lod), factor=2**lod))
if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1))
return img()
images_out = grow(x, 3, resolution_log2 - 3)
assert images_out.dtype == tf.as_dtype(dtype)
return tf.identity(images_out, name='images_out')
#----------------------------------------------------------------------------
# StyleGAN2 synthesis network (Figure 7).
# Implements skip connections and residual nets (Figure 7), but no progressive growing.
# Used in configs E-F (Table 1).
def G_synthesis_stylegan2(
dlatents_in, # Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size].
dlatent_size = 512, # Disentangled latent (W) dimensionality.
num_channels = 3, # Number of output color channels.
resolution = 1024, # Output resolution.
fmap_base = 16 << 10, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_min = 1, # Minimum number of feature maps in any layer.
fmap_max = 512, # Maximum number of feature maps in any layer.
randomize_noise = True, # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables.
architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
dtype = 'float32', # Data type to use for activations and outputs.
resample_kernel = [1,3,3,1], # Low-pass filter to apply when resampling activations. None = no filtering.
fused_modconv = True, # Implement modulated_conv2d_layer() as a single fused op?
clip_style = 'ffhq',
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return np.clip(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_min, fmap_max)
assert architecture in ['orig', 'skip', 'resnet']
act = nonlinearity
num_layers = resolution_log2 * 2 - 2
images_out = None
# Primary inputs.
dlatents_in.set_shape([None, num_layers, dlatent_size])
dlatents_in = tf.cast(dlatents_in, dtype)
# Noise inputs.
noise_inputs = []
for layer_idx in range(num_layers - 1):
res = (layer_idx + 5) // 2
shape = [1, 1, 2**res, 2**res]
noise_inputs.append(tf.get_variable('noise%d' % layer_idx, shape=shape, initializer=tf.initializers.random_normal(), trainable=False))
# Single convolution layer with all the bells and whistles.
def layer(x, layer_idx, fmaps, kernel, up=False):
x = modulated_conv2d_layer(x, dlatents_in[:, layer_idx], fmaps=fmaps, kernel=kernel, up=up, resample_kernel=resample_kernel, fused_modconv=fused_modconv)
if randomize_noise:
noise = tf.random_normal([tf.shape(x)[0], 1, x.shape[2], x.shape[3]], dtype=x.dtype)
else:
noise = tf.cast(noise_inputs[layer_idx], x.dtype)
noise_strength = tf.get_variable('noise_strength', shape=[], initializer=tf.initializers.zeros())
noise = noise * tf.cast(noise_strength, x.dtype)
with tf.variable_scope('resampling'):
alpha = tf.get_variable('alpha', shape=[], initializer=tf.initializers.constant(0.5))
sp_att_mask = alpha + (1-alpha) * spatial_att(x, clip_style)
sp_att_mask *= tf.rsqrt(tf.reduce_mean(tf.square(sp_att_mask), axis=[2, 3], keepdims=True) + 1e-8)
x += noise
x = x * sp_att_mask
return apply_bias_act(x, act=act)
# Building blocks for main layers.
def block(x, res): # res = 3..resolution_log2
t = x
with tf.variable_scope('Conv0_up'):
x = layer(x, layer_idx=res*2-5, fmaps=nf(res-1), kernel=3, up=True)
with tf.variable_scope('Conv1'):
x = layer(x, layer_idx=res*2-4, fmaps=nf(res-1), kernel=3)
if architecture == 'resnet':
with tf.variable_scope('Skip'):
t = conv2d_layer(t, fmaps=nf(res-1), kernel=1, up=True, resample_kernel=resample_kernel)
x = (x + t) * (1 / np.sqrt(2))
return x
def upsample(y):
with tf.variable_scope('Upsample'):
return upsample_2d(y, k=resample_kernel)
def torgb(x, y, res): # res = 2..resolution_log2
with tf.variable_scope('ToRGB'):
t = apply_bias_act(modulated_conv2d_layer(x, dlatents_in[:, res*2-3], fmaps=num_channels, kernel=1, demodulate=False, fused_modconv=fused_modconv))
return t if y is None else y + t
# Early layers.
y = None
with tf.variable_scope('4x4'):
with tf.variable_scope('Const'):
x = tf.get_variable('const', shape=[1, nf(1), 4, 4], initializer=tf.initializers.random_normal())
x = tf.tile(tf.cast(x, dtype), [tf.shape(dlatents_in)[0], 1, 1, 1])
with tf.variable_scope('Conv'):
x = layer(x, layer_idx=0, fmaps=nf(1), kernel=3)
if architecture == 'skip':
y = torgb(x, y, 2)
# Main layers.
for res in range(3, resolution_log2 + 1):
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
x = block(x, res)
if architecture == 'skip':
y = upsample(y)
if architecture == 'skip' or res == resolution_log2:
y = torgb(x, y, res)
images_out = y
assert images_out.dtype == tf.as_dtype(dtype)
return tf.identity(images_out, name='images_out')
#----------------------------------------------------------------------------
# Original StyleGAN discriminator.
# Used in configs B-D (Table 1).
def D_stylegan(
images_in, # First input: Images [minibatch, channel, height, width].
labels_in, # Second input: Labels [minibatch, label_size].
num_channels = 3, # Number of input color channels. Overridden based on dataset.
resolution = 1024, # Input resolution. Overridden based on dataset.
label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
fmap_base = 16 << 10, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_min = 1, # Minimum number of feature maps in any layer.
fmap_max = 512, # Maximum number of feature maps in any layer.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable.
mbstd_num_features = 1, # Number of features for the minibatch standard deviation layer.
dtype = 'float32', # Data type to use for activations and outputs.
resample_kernel = [1,3,3,1], # Low-pass filter to apply when resampling activations. None = no filtering.
structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return np.clip(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_min, fmap_max)
if structure == 'auto': structure = 'linear' if is_template_graph else 'recursive'
act = nonlinearity
images_in.set_shape([None, num_channels, resolution, resolution])
labels_in.set_shape([None, label_size])
images_in = tf.cast(images_in, dtype)
labels_in = tf.cast(labels_in, dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype)
# Building blocks for spatial layers.
def fromrgb(x, res): # res = 2..resolution_log2
with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)):
return apply_bias_act(conv2d_layer(x, fmaps=nf(res-1), kernel=1), act=act)
def block(x, res): # res = 2..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
with tf.variable_scope('Conv0'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(res-1), kernel=3), act=act)
with tf.variable_scope('Conv1_down'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(res-2), kernel=3, down=True, resample_kernel=resample_kernel), act=act)
return x
# Fixed structure: simple and efficient, but does not support progressive growing.
if structure == 'fixed':
x = fromrgb(images_in, resolution_log2)
for res in range(resolution_log2, 2, -1):
x = block(x, res)
# Linear structure: simple but inefficient.
if structure == 'linear':
img = images_in
x = fromrgb(img, resolution_log2)
for res in range(resolution_log2, 2, -1):
lod = resolution_log2 - res
x = block(x, res)
with tf.variable_scope('Downsample_lod%d' % lod):
img = downsample_2d(img)
y = fromrgb(img, res - 1)
with tf.variable_scope('Grow_lod%d' % lod):
x = tflib.lerp_clip(x, y, lod_in - lod)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def cset(cur_lambda, new_cond, new_lambda):
return lambda: tf.cond(new_cond, new_lambda, cur_lambda)
def grow(res, lod):
x = lambda: fromrgb(naive_downsample_2d(images_in, factor=2**lod), res)
if lod > 0: x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1))
x = block(x(), res); y = lambda: x
y = cset(y, (lod_in > lod), lambda: tflib.lerp(x, fromrgb(naive_downsample_2d(images_in, factor=2**(lod+1)), res - 1), lod_in - lod))
return y()
x = grow(3, resolution_log2 - 3)
# Final layers at 4x4 resolution.
with tf.variable_scope('4x4'):
if mbstd_group_size > 1:
with tf.variable_scope('MinibatchStddev'):
x = minibatch_stddev_layer(x, mbstd_group_size, mbstd_num_features)
with tf.variable_scope('Conv'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(1), kernel=3), act=act)
with tf.variable_scope('Dense0'):
x = apply_bias_act(dense_layer(x, fmaps=nf(0)), act=act)
# Output layer with label conditioning from "Which Training Methods for GANs do actually Converge?"
with tf.variable_scope('Output'):
x = apply_bias_act(dense_layer(x, fmaps=max(labels_in.shape[1], 1)))
if labels_in.shape[1] > 0:
x = tf.reduce_sum(x * labels_in, axis=1, keepdims=True)
scores_out = x
# Output.
assert scores_out.dtype == tf.as_dtype(dtype)
scores_out = tf.identity(scores_out, name='scores_out')
return scores_out
#----------------------------------------------------------------------------
# StyleGAN2 discriminator (Figure 7).
# Implements skip connections and residual nets (Figure 7), but no progressive growing.
# Used in configs E-F (Table 1).
def D_stylegan2(
images_in, # First input: Images [minibatch, channel, height, width].
labels_in, # Second input: Labels [minibatch, label_size].
num_channels = 3, # Number of input color channels. Overridden based on dataset.
resolution = 1024, # Input resolution. Overridden based on dataset.
label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
fmap_base = 16 << 10, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_min = 1, # Minimum number of feature maps in any layer.
fmap_max = 512, # Maximum number of feature maps in any layer.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable.
mbstd_num_features = 1, # Number of features for the minibatch standard deviation layer.
dtype = 'float32', # Data type to use for activations and outputs.
resample_kernel = [1,3,3,1], # Low-pass filter to apply when resampling activations. None = no filtering.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return np.clip(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_min, fmap_max)
assert architecture in ['orig', 'skip', 'resnet']
act = nonlinearity
images_in.set_shape([None, num_channels, resolution, resolution])
labels_in.set_shape([None, label_size])
images_in = tf.cast(images_in, dtype)
labels_in = tf.cast(labels_in, dtype)
# Building blocks for main layers.
def fromrgb(x, y, res): # res = 2..resolution_log2
with tf.variable_scope('FromRGB'):
t = apply_bias_act(conv2d_layer(y, fmaps=nf(res-1), kernel=1), act=act)
return t if x is None else x + t
def block(x, res): # res = 2..resolution_log2
t = x
with tf.variable_scope('Conv0'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(res-1), kernel=3), act=act)
with tf.variable_scope('Conv1_down'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(res-2), kernel=3, down=True, resample_kernel=resample_kernel), act=act)
if architecture == 'resnet':
with tf.variable_scope('Skip'):
t = conv2d_layer(t, fmaps=nf(res-2), kernel=1, down=True, resample_kernel=resample_kernel)
x = (x + t) * (1 / np.sqrt(2))
return x
def downsample(y):
with tf.variable_scope('Downsample'):
return downsample_2d(y, k=resample_kernel)
# Main layers.
x = None
y = images_in
for res in range(resolution_log2, 2, -1):
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
if architecture == 'skip' or res == resolution_log2:
x = fromrgb(x, y, res)
x = block(x, res)
if architecture == 'skip':
y = downsample(y)
# Final layers.
with tf.variable_scope('4x4'):
if architecture == 'skip':
x = fromrgb(x, y, 2)
if mbstd_group_size > 1:
with tf.variable_scope('MinibatchStddev'):
x = minibatch_stddev_layer(x, mbstd_group_size, mbstd_num_features)
with tf.variable_scope('Conv'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(1), kernel=3), act=act)
with tf.variable_scope('Dense0'):
x = apply_bias_act(dense_layer(x, fmaps=nf(0)), act=act)
# Output layer with label conditioning from "Which Training Methods for GANs do actually Converge?"
with tf.variable_scope('Output'):
x = apply_bias_act(dense_layer(x, fmaps=max(labels_in.shape[1], 1)))
if labels_in.shape[1] > 0:
x = tf.reduce_sum(x * labels_in, axis=1, keepdims=True)
scores_out = x
# Output.
assert scores_out.dtype == tf.as_dtype(dtype)
scores_out = tf.identity(scores_out, name='scores_out')
return scores_out
#----------------------------------------------------------------------------
def instance_norm(x, epsilon=1e-8):
assert len(x.shape) == 4 # NCHW
with tf.variable_scope('InstanceNorm'):
orig_dtype = x.dtype
x = tf.cast(x, tf.float32)
x -= tf.reduce_mean(x, axis=[2,3], keepdims=True)
epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon')
x *= tf.rsqrt(tf.reduce_mean(tf.square(x), axis=[2,3], keepdims=True) + epsilon)
x = tf.cast(x, orig_dtype)
return x
def adjust_range(x):
assert len(x.shape) == 4
with tf.variable_scope('Adjust_range'):
orig_dtype = x.dtype
x = tf.cast(x, tf.float32)
x -= tf.reduce_mean(x, axis=[2, 3], keepdims=True)
x_max = tf.reduce_max(x, axis=(2, 3), keepdims=True)
x = x / (x_max + 1e-8)
x = tf.cast(x, orig_dtype)
return x
def spatial_att(x, clip_style):
"""
Spatial attention mask
:param x: [NCHW]
:param clip_style:
:return: None negative mask tensor [NCHW]
"""
fmaps = x.shape[1].value
if clip_style == 'ffhq':
x = tf.reduce_sum(tf.nn.relu(-x), axis=1, keepdims=True)
elif clip_style == 'cat':
x = tf.reduce_sum(tf.abs(x), axis=1, keepdims=True)
elif clip_style == 'church':
x = tf.reduce_max(-x, axis=1, keepdims=True)
else:
raise ValueError('Unsupported clip style %s' % clip_style)
x = (adjust_range(x) + 1.0) / 2.0
b = get_weight(shape=[x.shape[2].value, x.shape[3].value], weight_var='bias')
att = x + b
return tf.tile(att, [1, fmaps, 1, 1])
|
turn lambda: tf.cond(new_cond, new_lambda, cur_lambda)
|
getRouteFilterRule.ts
|
// *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
import * as pulumi from "@pulumi/pulumi";
import { input as inputs, output as outputs, enums } from "../../types";
import * as utilities from "../../utilities";
/**
* Route Filter Rule Resource.
*/
export function getRouteFilterRule(args: GetRouteFilterRuleArgs, opts?: pulumi.InvokeOptions): Promise<GetRouteFilterRuleResult> {
if (!opts) {
opts = {}
}
if (!opts.version) {
opts.version = utilities.getVersion();
}
return pulumi.runtime.invoke("azure-native:network/v20190701:getRouteFilterRule", {
"resourceGroupName": args.resourceGroupName,
"routeFilterName": args.routeFilterName,
"ruleName": args.ruleName,
}, opts);
}
export interface GetRouteFilterRuleArgs {
|
* The name of the resource group.
*/
readonly resourceGroupName: string;
/**
* The name of the route filter.
*/
readonly routeFilterName: string;
/**
* The name of the rule.
*/
readonly ruleName: string;
}
/**
* Route Filter Rule Resource.
*/
export interface GetRouteFilterRuleResult {
/**
* The access type of the rule.
*/
readonly access: string;
/**
* The collection for bgp community values to filter on. e.g. ['12076:5010','12076:5020'].
*/
readonly communities: string[];
/**
* A unique read-only string that changes whenever the resource is updated.
*/
readonly etag: string;
/**
* Resource ID.
*/
readonly id?: string;
/**
* Resource location.
*/
readonly location?: string;
/**
* The name of the resource that is unique within a resource group. This name can be used to access the resource.
*/
readonly name?: string;
/**
* The provisioning state of the route filter rule resource.
*/
readonly provisioningState: string;
/**
* The rule type of the rule.
*/
readonly routeFilterRuleType: string;
}
|
/**
|
load.ts
|
import { defineMacro, defineMacroPlugin } from 'vite-plugin-macro'
import { join } from 'path'
import { run } from './common'
import glob from 'fast-glob'
export function pluginLoad() {
return defineMacroPlugin({
name: 'macro-load',
typesPath: join(__dirname, 'load-macros.d.ts'),
exports: {
'@load': {
macros: [tryMacro, loadMacro],
},
},
})
}
const tryMacro = defineMacro('tryLoad')
.withSignature('(glob: string): void')
.withHandler(({ path, args, filepath }, { template }) => {
const glob = run(() => {
if (args.length === 0)
throw new Error(`glob should not be undefined in tryLoad()`)
const arg = args[0]
if (!arg.isStringLiteral())
throw new Error(`glob should be string literal in tryLoad()`)
return arg.node.value
})
path.replaceWith(
template.statement.ast(
`console.log('load glob "${glob}" from "${filepath}"')`
)
)
})
const loadMacro = defineMacro('load')
.withSignature(
|
)
.withHandler(({ path, args }, _, { appendImports, normalizePathPattern }) => {
const pattern = run(() => {
if (args.length === 0)
throw new Error(`glob should not be undefined in load()`)
const arg = args[0]
if (!arg.isStringLiteral())
throw new Error(`glob should be string literal in load()`)
return arg.node.value
})
const { normalized, base, resolveImportPath } =
normalizePathPattern(pattern)
const imports = searchByGlob(normalized, base).map(resolveImportPath)
appendImports(imports.map((imp) => ({ moduleName: imp })))
path.remove()
})
function searchByGlob(pattern: string, baseDir: string) {
return glob.sync(pattern, {
cwd: baseDir,
ignore: ['**/node_modules/**'],
})
}
|
'(glob: string): void',
'provide a glob pattern to load assets'
|
base.py
|
"""
SQLite3 backend for django.
Works with either the pysqlite2 module or the sqlite3 module in the
standard library.
"""
from __future__ import unicode_literals
import datetime
import decimal
import warnings
import re
from django.db import utils
from django.db.backends import *
from django.db.backends.sqlite3.client import DatabaseClient
from django.db.backends.sqlite3.creation import DatabaseCreation
from django.db.backends.sqlite3.introspection import DatabaseIntrospection
from django.db.models import fields
from django.db.models.sql import aggregates
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes
from django.utils import six
from django.utils import timezone
try:
try:
from pysqlite2 import dbapi2 as Database
except ImportError:
from sqlite3 import dbapi2 as Database
except ImportError as exc:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading either pysqlite2 or sqlite3 modules (tried in that order): %s" % exc)
try:
import pytz
except ImportError:
pytz = None
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
def parse_datetime_with_timezone_support(value):
dt = parse_datetime(value)
# Confirm that dt is naive before overwriting its tzinfo.
if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
def adapt_datetime_with_timezone_support(value):
# Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
if settings.USE_TZ:
if timezone.is_naive(value):
warnings.warn("SQLite received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return value.isoformat(str(" "))
def decoder(conv_func):
""" The Python sqlite3 interface returns always byte strings.
This function converts the received value to a regular string before
passing it to the receiver function.
"""
return lambda s: conv_func(s.decode('utf-8'))
Database.register_converter(str("bool"), decoder(lambda s: s == '1'))
Database.register_converter(str("time"), decoder(parse_time))
Database.register_converter(str("date"), decoder(parse_date))
Database.register_converter(str("datetime"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("timestamp"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("TIMESTAMP"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("decimal"), decoder(util.typecast_decimal))
Database.register_adapter(datetime.datetime, adapt_datetime_with_timezone_support)
Database.register_adapter(decimal.Decimal, util.rev_typecast_decimal)
if Database.version_info >= (2, 4, 1):
# Starting in 2.4.1, the str type is not accepted anymore, therefore,
# we convert all str objects to Unicode
# As registering a adapter for a primitive type causes a small
# slow-down, this adapter is only registered for sqlite3 versions
# needing it (Python 2.6 and up).
Database.register_adapter(str, lambda s: s.decode('utf-8'))
Database.register_adapter(SafeBytes, lambda s: s.decode('utf-8'))
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_timezones = False
supports_1000_query_parameters = False
supports_mixed_date_datetime_comparisons = False
has_bulk_insert = True
can_combine_inserts_with_and_without_auto_increment_pk = False
autocommits_when_autocommit_is_off = True
@cached_property
def uses_savepoints(self):
return Database.sqlite_version_info >= (3, 6, 8)
@cached_property
def supports_stddev(self):
"""Confirm support for STDDEV and related stats functions
SQLite supports STDDEV as an extension package; so
connection.ops.check_aggregate_support() can't unilaterally
rule out support for STDDEV. We need to manually check
whether the call works.
"""
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
try:
cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
has_support = True
except utils.DatabaseError:
has_support = False
cursor.execute('DROP TABLE STDDEV_TEST')
return has_support
@cached_property
def has_zoneinfo_database(self):
return pytz is not None
class DatabaseOperations(BaseDatabaseOperations):
def bulk_batch_size(self, fields, objs):
"""
SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of
999 variables per query.
If there is just single field to insert, then we can hit another
limit, SQLITE_MAX_COMPOUND_SELECT which defaults to 500.
"""
limit = 999 if len(fields) > 1 else 500
return (limit // len(fields)) if len(fields) > 0 else len(objs)
def check_aggregate_support(self, aggregate):
bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField)
bad_aggregates = (aggregates.Sum, aggregates.Avg,
aggregates.Variance, aggregates.StdDev)
if (isinstance(aggregate.source, bad_fields) and
isinstance(aggregate, bad_aggregates)):
raise NotImplementedError(
'You cannot use Sum, Avg, StdDev and Variance aggregations '
'on date/time fields in sqlite3 '
'since date/time is saved as text.')
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_date_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_interval_sql(self, sql, connector, timedelta):
# It would be more straightforward if we could use the sqlite strftime
# function, but it does not allow for keeping six digits of fractional
# second information, nor does it allow for formatting date and datetime
# values differently. So instead we register our own function that
# formats the datetime combined with the delta in a manner suitable
# for comparisons.
return 'django_format_dtdelta(%s, "%s", "%d", "%d", "%d")' % (sql,
connector, timedelta.days, timedelta.seconds, timedelta.microseconds)
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def datetime_extract_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_extract_sql.
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
return "django_datetime_extract('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_trunc_sql.
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
return "django_datetime_trunc('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def drop_foreignkey_sql(self):
return ""
def pk_default_value(self):
return "NULL"
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
def value_to_db_datetime(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.")
return six.text_type(value)
def value_to_db_time(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
raise ValueError("SQLite backend does not support timezone-aware times.")
return six.text_type(value)
def convert_values(self, value, field):
"""SQLite returns floats when it should be returning decimals,
and gets dates and datetimes wrong.
For consistency with other backends, coerce when required.
"""
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return util.typecast_decimal(field.format_number(value))
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type == 'DateField':
return parse_date(value)
elif internal_type == 'DateTimeField':
return parse_datetime_with_timezone_support(value)
elif internal_type == 'TimeField':
return parse_time(value)
# No field, or the field isn't known to be a decimal or integer
return value
def bulk_insert_sql(self, fields, num_values):
res = []
res.append("SELECT %s" % ", ".join(
"%%s AS %s" % self.quote_name(f.column) for f in fields
))
res.extend(["UNION ALL SELECT %s" % ", ".join(["%s"] * len(fields))] * (num_values - 1))
return " ".join(res)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
Database = Database
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overriden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_date_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("regexp", 2, _sqlite_regexp)
conn.create_function("django_format_dtdelta", 5, _sqlite_format_dtdelta)
return conn
def init_connection_state(self):
pass
def create_cursor(self):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if self.settings_dict['NAME'] != ":memory:":
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside atomic
# blocks. To work around that bug, on SQLite, atomic starts a
# transaction explicitly rather than simply disable autocommit.
return self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
self.connection.isolation_level = level
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign key references. This method is
intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint checks were off.
Raises an IntegrityError on the first invalid foreign key reference encountered (if any) and provides
detailed information about the invalid reference in the error message.
Backends can override this method if they can more directly apply constraint checking (e.g. via "SET CONSTRAINTS
ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0], table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=()):
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_date_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_datetime_extract(lookup_type, dt, tzname):
if dt is None:
return None
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
|
def _sqlite_format_dtdelta(dt, conn, days, secs, usecs):
try:
dt = util.typecast_timestamp(dt)
delta = datetime.timedelta(int(days), int(secs), int(usecs))
if conn.strip() == '+':
dt = dt + delta
else:
dt = dt - delta
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(dt)
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, re_string))
|
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
|
code-display.js
|
/*!
* Ext JS Library 3.0.0
* Copyright(c) 2006-2009 Ext JS, LLC
* [email protected]
* http://www.extjs.com/license
*/
// Create and append to the body, a Panel containing a block of code from the passed URL
function
|
(url, title) {
var panel = new Ext.Panel({
hideMode: 'visibility',
title: title,
width: 750,
style: {
'margin-top': '10px'
},
hideCollapseTool: true,
titleCollapse: true,
collapsible: true,
collapsed: true,
autoScroll: true,
renderTo: Ext.getBody(),
listeners: {
render: function(p) {
p.getUpdater().setRenderer({
render: Ext.isIE ? function(el, response, scripts, callback) {
el.update('');
var np = el.createChild({
tag: 'pre',
cls: 'code',
cn: {
tag: 'code'
}
});
var t = response.responseText.split("\n");
var c = np.child('code', true);
for (var i = 0, l = t.length; i < l; i++) {
var pre = document.createElement('pre');
if (t[i].length) {
pre.appendChild(document.createTextNode(t[i]));
c.appendChild(pre);
} else if (i < (l - 1)) {
c.appendChild(document.createElement("br"));
}
}
} : function(el, response, scripts, callback) {
el.update('');
el.createChild({
tag: 'pre',
cls: 'code',
cn: {
tag: 'code',
html: response.responseText
}
});
}
});
},
beforeexpand: function(p) {
p.load(url);
},
single: true
}
});
}
// Patch to allow XHR to local files. From hendricd: http://extjs.com/forum/member.php?u=8730
Ext.apply( Ext.lib.Ajax ,
{ forceActiveX:false,
createXhrObject:function(transactionId)
{
var obj={ status:{isError:false}
, tId:transactionId}, http;
try
{
if(Ext.isIE7 && !!this.forceActiveX){throw("IE7forceActiveX");}
obj.conn= new XMLHttpRequest();
}
catch(e)
{
for (var i = 0; i < this.activeX.length; ++i) {
try
{
obj.conn= new ActiveXObject(this.activeX[i]);
break;
}
catch(e) {
}
}
}
finally
{
obj.status.isError = typeof(obj.conn) === undefined;
}
return obj;
},
getHttpStatus: function(reqObj){
var statObj = { status:0
,statusText:''
,isError:false
,isLocal:false
,isOK:false
,error:null};
try {
if(!reqObj)throw('noobj');
statObj.status = reqObj.status;
statObj.isLocal = !reqObj.status && location.protocol == "file:" ||
Ext.isSafari && reqObj.status === undefined;
statObj.isOK = (statObj.isLocal || (statObj.status > 199 && statObj.status < 300));
statObj.statusText = reqObj.statusText || '';
} catch(e){ //status may not avail/valid yet (or called too early).
}
return statObj;
},
handleTransactionResponse:function(o, callback, isAbort)
{
callback = callback || {};
var responseObject=null;
if(!o.status.isError){
o.status = this.getHttpStatus(o.conn);
/* create and enhance the response with proper status and XMLDOM if necessary */
responseObject = this.createResponseObject(o, callback.argument);
}
if(o.status.isError){ /* checked again in case exception was raised - ActiveX was disabled during XML-DOM creation? */
// And mixin everything the XHR object had to offer as well
responseObject = Ext.applyIf(responseObject||{},this.createExceptionObject(o.tId, callback.argument, (isAbort ? isAbort : false)));
}
responseObject.options = o.options;
responseObject.stat = o.status;
if (o.status.isOK && !o.status.isError) {
if (callback.success) {
if (!callback.scope) {
callback.success(responseObject);
}
else {
callback.success.apply(callback.scope, [responseObject]);
}
}
} else {
if (callback.failure) {
if (!callback.scope) {
callback.failure(responseObject);
}
else {
callback.failure.apply(callback.scope, [responseObject]);
}
}
}
if(o.options.async){
this.releaseObject(o);
responseObject = null;
}else{
this.releaseObject(o);
return responseObject;
}
},
createResponseObject:function(o, callbackArg)
{
var obj = {};
var headerObj = {},headerStr='';
try{ //to catch bad encoding problems here
obj.responseText = o.conn.responseText;
}catch(e){obj.responseText ='';}
obj.responseXML = o.conn.responseXML;
try{
headerStr = o.conn.getAllResponseHeaders()||'';
} catch(e){}
if(o.status.isLocal){
o.status.isOK = ((o.status.status = (!!obj.responseText.length)?200:404) == 200);
if(o.status.isOK && (!obj.responseXML || obj.responseXML.childNodes.length == 0)){
var xdoc=null;
try{ //ActiveX may be disabled
if(typeof(DOMParser) == 'undefined'){
xdoc=new ActiveXObject("Microsoft.XMLDOM");
xdoc.async="false";
xdoc.loadXML(obj.responseText);
}else{
try{ //Opera 9 will fail parsing non-XML content, so trap here.
var domParser = new DOMParser();
xdoc = domParser.parseFromString(obj.responseText, 'application\/xml');
}catch(ex){}
finally{domParser = null;}
}
} catch(ex){
o.status.isError = true;
o.status.error = ex;
}
obj.responseXML = xdoc;
}
if(obj.responseXML){
var parseBad = (obj.responseXML.parseError || 0) != 0 || obj.responseXML.childNodes.length == 0;
if(!parseBad){
headerStr = 'Content-Type: ' + (obj.responseXML.contentType || 'text\/xml') + '\n' + headerStr ;
}
}
}
var header = headerStr.split('\n');
for (var i = 0; i < header.length; i++) {
var delimitPos = header[i].indexOf(':');
if (delimitPos != -1) {
headerObj[header[i].substring(0, delimitPos)] = header[i].substring(delimitPos + 2);
}
}
obj.tId = o.tId;
obj.status = o.status.status;
obj.statusText = o.status.statusText;
obj.getResponseHeader = headerObj;
obj.getAllResponseHeaders = headerStr;
obj.stat = o.status
if (typeof callbackArg !== undefined) {
obj.argument = callbackArg;
}
return obj;
},
request : function(method, uri, cb, data, options) {
options = Ext.apply({async:true,
headers:false,
userId:null,
password:null,
xmlData:null }, options||{});
var hs = options.headers;
if(hs){
for(var h in hs){
if(hs.hasOwnProperty(h)){
this.initHeader(h, hs[h], false);
}
}
}
if(options.xmlData){
this.initHeader('Content-Type', 'text/xml', false);
method = 'POST';
data = options.xmlData;
}
return this.makeRequest(method, uri, cb, data, options);
},
asyncRequest:function(method, uri, callback, postData)
{
var o = this.getConnectionObject();
if (!o || o.status.isError) {
return null;
}
else {
o.options = options;
try{
o.conn.open(method, uri, true);
} catch(ex){
o.status.isError = true;
o.status.error = ex;
return Ext.apply(o,this.handleTransactionResponse(o, callback));
}
if (this.useDefaultXhrHeader) {
if (!this.defaultHeaders['X-Requested-With']) {
this.initHeader('X-Requested-With', this.defaultXhrHeader, true);
}
}
if(postData && this.useDefaultHeader){
this.initHeader('Content-Type', this.defaultPostHeader);
}
if (this.hasDefaultHeaders || this.hasHeaders) {
this.setHeader(o);
}
this.handleReadyState(o, callback);
try{ o.conn.send(postData || null);
} catch(ex){
o.status.isError=true;
o.status.error = ex;
return Ext.apply(o,this.handleTransactionResponse(o, callback));
}
return o;
}
},
makeRequest:function(method, uri, callback, postData, options)
{
var o = this.getConnectionObject();
if (!o || o.status.isError) {
return null;
}
else {
o.options = options;
try{
o.conn.open(method, uri, options.async, options.userId, options.password);
} catch(ex){
o.status.isError = true;
o.status.error = ex;
var r=this.handleTransactionResponse(o, callback);
return Ext.apply(o,r);
}
if (this.useDefaultXhrHeader) {
if (!this.defaultHeaders['X-Requested-With']) {
this.initHeader('X-Requested-With', this.defaultXhrHeader, true);
}
}
if(postData && this.useDefaultHeader){
this.initHeader('Content-Type', this.defaultPostHeader);
}
if (this.hasDefaultHeaders || this.hasHeaders) {
this.setHeader(o);
}
if(o.options.async){ //Timers won't work here as it's a blocking call
this.handleReadyState(o, callback);
}
try{ o.conn.send(postData || null);
} catch(ex){
//Ext.apply(o,this.handleTransactionResponse(o, callback));
}
return options.async?o:Ext.apply(o,this.handleTransactionResponse(o, callback));
}
}});
Ext.lib.Ajax.forceActiveX = (document.location.protocol == 'file:');/* or other true/false mechanism */
|
createCodePanel
|
service_test.go
|
package rclgo_test
import (
"context"
"math/rand"
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
example_interfaces_srv "github.com/tiiuae/rclgo/internal/msgs/example_interfaces/srv"
"github.com/tiiuae/rclgo/pkg/rclgo"
"github.com/tiiuae/rclgo/pkg/rclgo/types"
)
func
|
(t *testing.T) {
type testSendResult struct {
req types.Message
resp types.Message
info *rclgo.RmwServiceInfo
err error
sum int64
}
var (
serviceCtx, clientCtx *rclgo.Context
client *rclgo.Client
err error
spinCtx, cancelSpin = context.WithCancel(context.Background())
spinErrs = make(chan error, 2)
requestReceivedChan = make(
chan *example_interfaces_srv.AddTwoInts_Request,
1,
)
responseSentErrChan = make(chan error, 1)
randGen = rand.NewSource(42)
qosProfile = rclgo.NewRmwQosProfileServicesDefault()
)
qosProfile.History = rclgo.RmwQosHistoryPolicyKeepAll
sendReq := func(a, b int64) *testSendResult {
req := example_interfaces_srv.NewAddTwoInts_Request()
req.A = a
req.B = b
result := testSendResult{req: req, sum: a + b}
result.resp, result.info, result.err = client.Send(spinCtx, req)
return &result
}
defer func() {
cancelSpin()
if serviceCtx != nil {
serviceCtx.Close()
}
if clientCtx != nil {
clientCtx.Close()
}
}()
Convey("Scenario: Client calls a service", t, func() {
Convey("Create a service", func() {
serviceCtx, err = newDefaultRCLContext()
So(err, ShouldBeNil)
node, err := serviceCtx.NewNode("service_node", "/test")
So(err, ShouldBeNil)
_, err = node.NewService(
"add",
example_interfaces_srv.AddTwoIntsTypeSupport,
&rclgo.ServiceOptions{Qos: qosProfile},
func(rsi *rclgo.RmwServiceInfo, rm types.Message, srs rclgo.ServiceResponseSender) {
req := rm.(*example_interfaces_srv.AddTwoInts_Request)
requestReceivedChan <- req
resp := example_interfaces_srv.NewAddTwoInts_Response()
resp.Sum = req.A + req.B
responseSentErrChan <- srs.SendResponse(resp)
},
)
So(err, ShouldBeNil)
go func() { spinErrs <- serviceCtx.Spin(spinCtx) }()
})
Convey("Create a client", func() {
clientCtx, err = newDefaultRCLContext()
So(err, ShouldBeNil)
node, err := clientCtx.NewNode("client_node", "/test")
So(err, ShouldBeNil)
client, err = node.NewClient(
"add",
example_interfaces_srv.AddTwoIntsTypeSupport,
&rclgo.ClientOptions{Qos: qosProfile},
)
So(err, ShouldBeNil)
go func() { spinErrs <- clientCtx.Spin(spinCtx) }()
})
Convey("The client sends a request", func() {
time.Sleep(200 * time.Millisecond)
var result *testSendResult
timeOut(2000, func() { result = sendReq(3, -7) }, "Sending request")
So(result.err, ShouldBeNil)
So(result.info, ShouldNotBeNil)
So(
result.resp.(*example_interfaces_srv.AddTwoInts_Response).Sum,
ShouldEqual,
-4,
)
So(<-requestReceivedChan, ShouldResemble, result.req)
So(<-responseSentErrChan, ShouldBeNil)
})
Convey("The client sends many requests in quick succession", func() {
const reqCount = 100
testResults := make(chan *testSendResult, reqCount)
requestReceivedChan = make(
chan *example_interfaces_srv.AddTwoInts_Request,
reqCount,
)
responseSentErrChan = make(chan error, reqCount)
for i := 0; i < reqCount; i++ {
a, b := randGen.Int63(), randGen.Int63()
go func() { testResults <- sendReq(a, b) }()
}
for i := 0; i < reqCount; i++ {
res := <-testResults
So(res, ShouldNotBeNil)
So(res.err, ShouldBeNil)
So(res.info, ShouldNotBeNil)
So(
res.resp.(*example_interfaces_srv.AddTwoInts_Response).Sum,
ShouldEqual,
res.sum,
)
}
})
Convey("The service and client are stopped", func() {
cancelSpin()
So(<-spinErrs, shouldContainError, context.Canceled)
So(<-spinErrs, shouldContainError, context.Canceled)
})
Convey("The service context is closed without errors", func() {
timeOut(2000, func() {
err = serviceCtx.Close()
}, "Service context is closing")
So(err, ShouldBeNil)
})
Convey("The client context is closed without errors", func() {
timeOut(2000, func() {
err = clientCtx.Close()
}, "Client context is closing")
So(err, ShouldBeNil)
})
})
}
|
TestServiceAndClient
|
lib.rs
|
//! Generic byte buffer
//!
//! Contains interfaces for byte buffers.
//!
//! ## Static buffer
//!
//! This crate includes implementation of simple static buffer where user can specify storage at
//! compile time.
//! Due to lack of const generics, it is achieved by just specifying type of underlying storage.
//! Any type can be represented as byte array, but normally it should be `Copy` types
//!
//! ```rust
//! use core::mem;
//! use baffa::{StaticBuffer, WriteBuf, WriteBufExt, ReadBufExt, ContBuf};
//!
//! let mut buffer = StaticBuffer::<u64>::new();
//!
//! assert_eq!(buffer.write_value(&u32::max_value()), 4); //WriteBufExt
//! assert_eq!(buffer.as_read_slice().len(), 4); //ContBuf::as_read_slice() returns written bytes
//! assert_eq!(buffer.as_write_slice().len(), 4); //ContBuf::as_write_slice() returns bytes that are yet to be written
//!
//! assert_eq!(buffer.write_slice(&[255u8, 255u8, 255u8, 255u8]), 4); //WriteBuf
//! assert_eq!(buffer.as_read_slice().len(), 8); //ContBuf::as_read_slice() returns written bytes
//! assert_eq!(buffer.as_write_slice().len(), 0); //ContBuf::as_write_slice() returns bytes that are yet to be written
//!
//! assert_eq!(buffer.write_value(&u32::max_value()), 0); //Not enough space :(
//!
//! let mut num = mem::MaybeUninit::<u64>::new(1);
//! assert_eq!(buffer.read_value(&mut num), 8); //ReadBufExt
//! let num = unsafe {
//! num.assume_init()
//! };
//! assert_eq!(num, u64::max_value());
//! ```
#![no_std]
#![warn(missing_docs)]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::style))]
#[cfg(feature = "std")]
extern crate std;
use core::{mem, cmp, ops};
pub mod stack;
pub mod iter;
#[cfg(feature = "alloc")]
mod alloc;
///Alias to static buffer.
pub type StaticBuffer<T> = stack::Buffer<T>;
///Alias to circular buffer.
pub type RingBuffer<T> = stack::Ring<T>;
///Common buffer.
pub trait Buf: ops::IndexMut<usize, Output=u8> + Sized {
///Returns size of the underlying memory in the buffer.
fn capacity(&self) -> usize;
///Returns number of elements inside the buffer.
fn len(&self) -> usize;
#[inline]
///Returns iterator over elements inside the buffer.
fn iter(&self) -> iter::Iter<'_, Self> {
iter::Iter::new(self, 0, self.len())
}
//TODO: separate unsafe trait? Technically need to beware of IndexMut::index_mut returning the
//same address in case stacked borrows become a thing
#[inline]
///Returns mutable iterator over elements inside the buffer.
fn iter_mut(&mut self) -> iter::IterMut<'_, Self> {
iter::IterMut::new(self, 0, self.len())
}
}
///Describes buffer that allows to change its capacity
pub trait DynBuf {
///Reserves additional space, enough to at least fit `size`.
///
///Generally should be noop if there is enough capacity.
fn reserve(&mut self, size: usize);
///Removes `size` number of bytes from underlying memory.
///
///If `size` is bigger than `capacity` should behave as if `size` is equal (i.e. clear whole
///memory).
fn shrink(&mut self, size: usize);
}
///Describes buffer that uses single contiguous memory block
///
///Meaning buffer can be accessed by single slice.
pub trait ContBuf {
///Returns slice of bytes that can be read.
fn as_read_slice(&self) -> &[u8];
///Returns mutable slice of bytes that can be read.
fn as_read_slice_mut(&mut self) -> &mut [u8];
///Returns slice of bytes that can be written (i.e. not written yet).
fn as_write_slice(&mut self) -> &mut [mem::MaybeUninit<u8>];
}
///Describes read-able buffer
pub trait ReadBuf: Buf {
#[inline(always)]
///Returns number of bytes left
///
///Returns buffer's `length` by default
fn available(&self) -> usize
|
///Moves cursor, considering bytes as consumed.
unsafe fn consume(&mut self, step: usize);
///Low level read function, that consumes available bytes up to `size`.
///
///This function is always used in safe manner by other default implementations:
///
///- `size` is always `min(buffer_size, available)`
///- `ptr` is always non-null.
unsafe fn read(&mut self, ptr: *mut u8, size: usize);
#[inline]
///Reads available bytes into slice
fn read_slice(&mut self, bytes: &mut [u8]) -> usize {
let read_len = cmp::min(bytes.len(), self.available());
if read_len > 0 {
unsafe {
self.read(bytes.as_mut_ptr(), bytes.len())
}
}
read_len
}
}
///Extension trait to provide extra functionality
pub trait ReadBufExt: ReadBuf {
#[inline]
///Reads value into storage.
///
///If not enough bytes, does nothing, returning 0
fn read_value<T: Copy + Sized>(&mut self, val: &mut mem::MaybeUninit<T>) -> usize {
let size = mem::size_of::<T>();
if size != 0 && self.available() >= size {
unsafe {
self.read(val.as_mut_ptr() as *mut u8, size);
}
size
} else {
0
}
}
}
impl<T: ReadBuf> ReadBufExt for T {}
///Describes write-able buffer
pub trait WriteBuf: Buf {
#[inline(always)]
///Returns number of bytes left
///
///Default implementation returns `capacity - len`
fn remaining(&self) -> usize {
self.capacity() - self.len()
}
///Moves cursor, considering bytes written.
unsafe fn advance(&mut self, step: usize);
///Low level write method, which copies data from pointer up to `size`.
///
///This function is always used in safe manner by other default implementations:
///
///- `size` is always `min(buffer_size, available)`
///- `ptr` is always non-null.
unsafe fn write(&mut self, ptr: *const u8, size: usize);
#[inline]
///Writes supplied slice into the buffer, returning number of written bytes.
///
///Allows partial writes.
fn write_slice(&mut self, bytes: &[u8]) -> usize {
let write_len = cmp::min(bytes.len(), self.remaining());
if write_len > 0 {
unsafe {
self.write(bytes.as_ptr(), write_len);
}
}
write_len
}
}
///Extension trait to provide extra functionality
pub trait WriteBufExt: WriteBuf {
#[inline]
///Writes supplied value by performing bit copy, advancing length and returning number of bytes written.
///
///If value cannot fit, does nothing
fn write_value<T: Copy + Sized>(&mut self, val: &T) -> usize {
let size = mem::size_of::<T>();
if size != 0 && self.remaining() >= size {
unsafe {
self.write(val as *const _ as *const u8, size);
}
size
} else {
0
}
}
}
impl<T: WriteBuf> WriteBufExt for T {}
|
{
Buf::len(self)
}
|
pod_preset.rs
|
// Generated from definition io.k8s.api.settings.v1alpha1.PodPreset
/// PodPreset is a policy resource that defines additional runtime requirements for a Pod.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct PodPreset {
pub metadata: Option<crate::v1_11::apimachinery::pkg::apis::meta::v1::ObjectMeta>,
pub spec: Option<crate::v1_11::api::settings::v1alpha1::PodPresetSpec>,
}
// Begin settings.k8s.io/v1alpha1/PodPreset
// Generated from operation createSettingsV1alpha1NamespacedPodPreset
impl PodPreset {
/// create a PodPreset
///
/// Use the returned [`crate::ResponseBody`]`<`[`CreateNamespacedPodPresetResponse`]`>` constructor, or [`CreateNamespacedPodPresetResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn create_namespaced_pod_preset(
namespace: &str,
body: &crate::v1_11::api::settings::v1alpha1::PodPreset,
optional: CreateNamespacedPodPresetOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<CreateNamespacedPodPresetResponse>), crate::RequestError> {
let CreateNamespacedPodPresetOptional {
pretty,
} = optional;
let __url = format!("/apis/settings.k8s.io/v1alpha1/namespaces/{namespace}/podpresets?",
namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let mut __request = http::Request::post(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`PodPreset::create_namespaced_pod_preset`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct CreateNamespacedPodPresetOptional<'a> {
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<CreateNamespacedPodPresetResponse as Response>::try_from_parts` to parse the HTTP response body of [`PodPreset::create_namespaced_pod_preset`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum CreateNamespacedPodPresetResponse {
Ok(crate::v1_11::api::settings::v1alpha1::PodPreset),
Created(crate::v1_11::api::settings::v1alpha1::PodPreset),
Accepted(crate::v1_11::api::settings::v1alpha1::PodPreset),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for CreateNamespacedPodPresetResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((CreateNamespacedPodPresetResponse::Ok(result), buf.len()))
},
http::StatusCode::CREATED => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((CreateNamespacedPodPresetResponse::Created(result), buf.len()))
},
http::StatusCode::ACCEPTED => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((CreateNamespacedPodPresetResponse::Accepted(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((CreateNamespacedPodPresetResponse::Other(result), read))
},
}
}
}
// Generated from operation deleteSettingsV1alpha1CollectionNamespacedPodPreset
impl PodPreset {
/// delete collection of PodPreset
///
/// Use the returned [`crate::ResponseBody`]`<`[`DeleteCollectionNamespacedPodPresetResponse`]`>` constructor, or [`DeleteCollectionNamespacedPodPresetResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `delete_optional`
///
/// Delete options. Use `Default::default()` to not pass any.
///
/// * `list_optional`
///
/// List options. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete_collection_namespaced_pod_preset(
namespace: &str,
delete_optional: crate::v1_11::DeleteOptional<'_>,
list_optional: crate::v1_11::ListOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<DeleteCollectionNamespacedPodPresetResponse>), crate::RequestError> {
let __url = format!("/apis/settings.k8s.io/v1alpha1/namespaces/{namespace}/podpresets?",
namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
list_optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::delete(__url);
let __body = serde_json::to_vec(&delete_optional).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<DeleteCollectionNamespacedPodPresetResponse as Response>::try_from_parts` to parse the HTTP response body of [`PodPreset::delete_collection_namespaced_pod_preset`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum DeleteCollectionNamespacedPodPresetResponse {
OkStatus(crate::v1_11::apimachinery::pkg::apis::meta::v1::Status),
OkValue(crate::v1_11::api::settings::v1alpha1::PodPresetList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for DeleteCollectionNamespacedPodPresetResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result: serde_json::Map<String, serde_json::Value> = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
let is_status = match result.get("kind") {
Some(serde_json::Value::String(s)) if s == "Status" => true,
_ => false,
};
if is_status {
let result = serde::Deserialize::deserialize(serde_json::Value::Object(result));
let result = result.map_err(crate::ResponseError::Json)?;
Ok((DeleteCollectionNamespacedPodPresetResponse::OkStatus(result), buf.len()))
}
else {
let result = serde::Deserialize::deserialize(serde_json::Value::Object(result));
let result = result.map_err(crate::ResponseError::Json)?;
Ok((DeleteCollectionNamespacedPodPresetResponse::OkValue(result), buf.len()))
}
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((DeleteCollectionNamespacedPodPresetResponse::Other(result), read))
},
}
}
}
// Generated from operation deleteSettingsV1alpha1NamespacedPodPreset
impl PodPreset {
/// delete a PodPreset
///
/// Use the returned [`crate::ResponseBody`]`<`[`DeleteNamespacedPodPresetResponse`]`>` constructor, or [`DeleteNamespacedPodPresetResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PodPreset
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete_namespaced_pod_preset(
name: &str,
namespace: &str,
optional: crate::v1_11::DeleteOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<DeleteNamespacedPodPresetResponse>), crate::RequestError> {
let __url = format!("/apis/settings.k8s.io/v1alpha1/namespaces/{namespace}/podpresets/{name}",
name = crate::url::percent_encoding::percent_encode(name.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
);
let mut __request = http::Request::delete(__url);
let __body = serde_json::to_vec(&optional).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<DeleteNamespacedPodPresetResponse as Response>::try_from_parts` to parse the HTTP response body of [`PodPreset::delete_namespaced_pod_preset`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum DeleteNamespacedPodPresetResponse {
OkStatus(crate::v1_11::apimachinery::pkg::apis::meta::v1::Status),
OkValue(crate::v1_11::api::settings::v1alpha1::PodPreset),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for DeleteNamespacedPodPresetResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result: serde_json::Map<String, serde_json::Value> = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
let is_status = match result.get("kind") {
Some(serde_json::Value::String(s)) if s == "Status" => true,
_ => false,
};
if is_status {
let result = serde::Deserialize::deserialize(serde_json::Value::Object(result));
let result = result.map_err(crate::ResponseError::Json)?;
Ok((DeleteNamespacedPodPresetResponse::OkStatus(result), buf.len()))
}
else {
let result = serde::Deserialize::deserialize(serde_json::Value::Object(result));
let result = result.map_err(crate::ResponseError::Json)?;
Ok((DeleteNamespacedPodPresetResponse::OkValue(result), buf.len()))
}
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((DeleteNamespacedPodPresetResponse::Other(result), read))
},
}
}
}
// Generated from operation listSettingsV1alpha1NamespacedPodPreset
impl PodPreset {
/// list or watch objects of kind PodPreset
///
/// This operation only supports listing all items of this type.
///
/// Use the returned [`crate::ResponseBody`]`<`[`ListNamespacedPodPresetResponse`]`>` constructor, or [`ListNamespacedPodPresetResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn list_namespaced_pod_preset(
namespace: &str,
optional: crate::v1_11::ListOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ListNamespacedPodPresetResponse>), crate::RequestError> {
let __url = format!("/apis/settings.k8s.io/v1alpha1/namespaces/{namespace}/podpresets?",
namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<ListNamespacedPodPresetResponse as Response>::try_from_parts` to parse the HTTP response body of [`PodPreset::list_namespaced_pod_preset`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ListNamespacedPodPresetResponse {
Ok(crate::v1_11::api::settings::v1alpha1::PodPresetList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ListNamespacedPodPresetResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ListNamespacedPodPresetResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ListNamespacedPodPresetResponse::Other(result), read))
},
}
}
}
// Generated from operation listSettingsV1alpha1PodPresetForAllNamespaces
impl PodPreset {
/// list or watch objects of kind PodPreset
///
/// This operation only supports listing all items of this type.
///
/// Use the returned [`crate::ResponseBody`]`<`[`ListPodPresetForAllNamespacesResponse`]`>` constructor, or [`ListPodPresetForAllNamespacesResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn list_pod_preset_for_all_namespaces(
optional: crate::v1_11::ListOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ListPodPresetForAllNamespacesResponse>), crate::RequestError> {
let __url = "/apis/settings.k8s.io/v1alpha1/podpresets?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<ListPodPresetForAllNamespacesResponse as Response>::try_from_parts` to parse the HTTP response body of [`PodPreset::list_pod_preset_for_all_namespaces`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ListPodPresetForAllNamespacesResponse {
Ok(crate::v1_11::api::settings::v1alpha1::PodPresetList),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ListPodPresetForAllNamespacesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ListPodPresetForAllNamespacesResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ListPodPresetForAllNamespacesResponse::Other(result), read))
},
}
}
}
// Generated from operation patchSettingsV1alpha1NamespacedPodPreset
impl PodPreset {
/// partially update the specified PodPreset
///
/// Use the returned [`crate::ResponseBody`]`<`[`PatchNamespacedPodPresetResponse`]`>` constructor, or [`PatchNamespacedPodPresetResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PodPreset
///
|
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn patch_namespaced_pod_preset(
name: &str,
namespace: &str,
body: &crate::v1_11::apimachinery::pkg::apis::meta::v1::Patch,
optional: crate::v1_11::PatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<PatchNamespacedPodPresetResponse>), crate::RequestError> {
let __url = format!("/apis/settings.k8s.io/v1alpha1/namespaces/{namespace}/podpresets/{name}?",
name = crate::url::percent_encoding::percent_encode(name.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::patch(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static(match body {
crate::v1_11::apimachinery::pkg::apis::meta::v1::Patch::Json(_) => "application/json-patch+json",
crate::v1_11::apimachinery::pkg::apis::meta::v1::Patch::Merge(_) => "application/merge-patch+json",
crate::v1_11::apimachinery::pkg::apis::meta::v1::Patch::StrategicMerge(_) => "application/strategic-merge-patch+json",
}));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<PatchNamespacedPodPresetResponse as Response>::try_from_parts` to parse the HTTP response body of [`PodPreset::patch_namespaced_pod_preset`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum PatchNamespacedPodPresetResponse {
Ok(crate::v1_11::api::settings::v1alpha1::PodPreset),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for PatchNamespacedPodPresetResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((PatchNamespacedPodPresetResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((PatchNamespacedPodPresetResponse::Other(result), read))
},
}
}
}
// Generated from operation readSettingsV1alpha1NamespacedPodPreset
impl PodPreset {
/// read the specified PodPreset
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReadNamespacedPodPresetResponse`]`>` constructor, or [`ReadNamespacedPodPresetResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PodPreset
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn read_namespaced_pod_preset(
name: &str,
namespace: &str,
optional: ReadNamespacedPodPresetOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReadNamespacedPodPresetResponse>), crate::RequestError> {
let ReadNamespacedPodPresetOptional {
exact,
export,
pretty,
} = optional;
let __url = format!("/apis/settings.k8s.io/v1alpha1/namespaces/{namespace}/podpresets/{name}?",
name = crate::url::percent_encoding::percent_encode(name.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(exact) = exact {
__query_pairs.append_pair("exact", &exact.to_string());
}
if let Some(export) = export {
__query_pairs.append_pair("export", &export.to_string());
}
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`PodPreset::read_namespaced_pod_preset`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct ReadNamespacedPodPresetOptional<'a> {
/// Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
pub exact: Option<bool>,
/// Should this value be exported. Export strips fields that a user can not specify.
pub export: Option<bool>,
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<ReadNamespacedPodPresetResponse as Response>::try_from_parts` to parse the HTTP response body of [`PodPreset::read_namespaced_pod_preset`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReadNamespacedPodPresetResponse {
Ok(crate::v1_11::api::settings::v1alpha1::PodPreset),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReadNamespacedPodPresetResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReadNamespacedPodPresetResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReadNamespacedPodPresetResponse::Other(result), read))
},
}
}
}
// Generated from operation replaceSettingsV1alpha1NamespacedPodPreset
impl PodPreset {
/// replace the specified PodPreset
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReplaceNamespacedPodPresetResponse`]`>` constructor, or [`ReplaceNamespacedPodPresetResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PodPreset
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn replace_namespaced_pod_preset(
name: &str,
namespace: &str,
body: &crate::v1_11::api::settings::v1alpha1::PodPreset,
optional: ReplaceNamespacedPodPresetOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReplaceNamespacedPodPresetResponse>), crate::RequestError> {
let ReplaceNamespacedPodPresetOptional {
pretty,
} = optional;
let __url = format!("/apis/settings.k8s.io/v1alpha1/namespaces/{namespace}/podpresets/{name}?",
name = crate::url::percent_encoding::percent_encode(name.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let mut __request = http::Request::put(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
__request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`PodPreset::replace_namespaced_pod_preset`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct ReplaceNamespacedPodPresetOptional<'a> {
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<ReplaceNamespacedPodPresetResponse as Response>::try_from_parts` to parse the HTTP response body of [`PodPreset::replace_namespaced_pod_preset`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReplaceNamespacedPodPresetResponse {
Ok(crate::v1_11::api::settings::v1alpha1::PodPreset),
Created(crate::v1_11::api::settings::v1alpha1::PodPreset),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReplaceNamespacedPodPresetResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReplaceNamespacedPodPresetResponse::Ok(result), buf.len()))
},
http::StatusCode::CREATED => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReplaceNamespacedPodPresetResponse::Created(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReplaceNamespacedPodPresetResponse::Other(result), read))
},
}
}
}
// Generated from operation watchSettingsV1alpha1NamespacedPodPreset
impl PodPreset {
/// list or watch objects of kind PodPreset
///
/// This operation only supports watching one item, or a list of items, of this type for changes.
///
/// Use the returned [`crate::ResponseBody`]`<`[`WatchNamespacedPodPresetResponse`]`>` constructor, or [`WatchNamespacedPodPresetResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn watch_namespaced_pod_preset(
namespace: &str,
optional: crate::v1_11::WatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<WatchNamespacedPodPresetResponse>), crate::RequestError> {
let __url = format!("/apis/settings.k8s.io/v1alpha1/namespaces/{namespace}/podpresets?",
namespace = crate::url::percent_encoding::percent_encode(namespace.as_bytes(), crate::url::percent_encoding::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<WatchNamespacedPodPresetResponse as Response>::try_from_parts` to parse the HTTP response body of [`PodPreset::watch_namespaced_pod_preset`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum WatchNamespacedPodPresetResponse {
Ok(crate::v1_11::apimachinery::pkg::apis::meta::v1::WatchEvent<PodPreset>),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for WatchNamespacedPodPresetResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let mut deserializer = serde_json::Deserializer::from_slice(buf).into_iter();
let (result, byte_offset) = match deserializer.next() {
Some(Ok(value)) => (value, deserializer.byte_offset()),
Some(Err(ref err)) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Some(Err(err)) => return Err(crate::ResponseError::Json(err)),
None => return Err(crate::ResponseError::NeedMoreData),
};
Ok((WatchNamespacedPodPresetResponse::Ok(result), byte_offset))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((WatchNamespacedPodPresetResponse::Other(result), read))
},
}
}
}
// Generated from operation watchSettingsV1alpha1PodPresetForAllNamespaces
impl PodPreset {
/// list or watch objects of kind PodPreset
///
/// This operation only supports watching one item, or a list of items, of this type for changes.
///
/// Use the returned [`crate::ResponseBody`]`<`[`WatchPodPresetForAllNamespacesResponse`]`>` constructor, or [`WatchPodPresetForAllNamespacesResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn watch_pod_preset_for_all_namespaces(
optional: crate::v1_11::WatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<WatchPodPresetForAllNamespacesResponse>), crate::RequestError> {
let __url = "/apis/settings.k8s.io/v1alpha1/podpresets?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let mut __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<WatchPodPresetForAllNamespacesResponse as Response>::try_from_parts` to parse the HTTP response body of [`PodPreset::watch_pod_preset_for_all_namespaces`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum WatchPodPresetForAllNamespacesResponse {
Ok(crate::v1_11::apimachinery::pkg::apis::meta::v1::WatchEvent<PodPreset>),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for WatchPodPresetForAllNamespacesResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let mut deserializer = serde_json::Deserializer::from_slice(buf).into_iter();
let (result, byte_offset) = match deserializer.next() {
Some(Ok(value)) => (value, deserializer.byte_offset()),
Some(Err(ref err)) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Some(Err(err)) => return Err(crate::ResponseError::Json(err)),
None => return Err(crate::ResponseError::NeedMoreData),
};
Ok((WatchPodPresetForAllNamespacesResponse::Ok(result), byte_offset))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((WatchPodPresetForAllNamespacesResponse::Other(result), read))
},
}
}
}
// End settings.k8s.io/v1alpha1/PodPreset
impl crate::Resource for PodPreset {
fn api_version() -> &'static str {
"settings.k8s.io/v1alpha1"
}
fn group() -> &'static str {
"settings.k8s.io"
}
fn kind() -> &'static str {
"PodPreset"
}
fn version() -> &'static str {
"v1alpha1"
}
}
impl crate::Metadata for PodPreset {
type Ty = crate::v1_11::apimachinery::pkg::apis::meta::v1::ObjectMeta;
fn metadata(&self) -> Option<&<Self as crate::Metadata>::Ty> {
self.metadata.as_ref()
}
}
impl<'de> serde::Deserialize<'de> for PodPreset {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_api_version,
Key_kind,
Key_metadata,
Key_spec,
Other,
}
impl<'de> serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error {
Ok(match v {
"apiVersion" => Field::Key_api_version,
"kind" => Field::Key_kind,
"metadata" => Field::Key_metadata,
"spec" => Field::Key_spec,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = PodPreset;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "struct PodPreset")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> {
let mut value_metadata: Option<crate::v1_11::apimachinery::pkg::apis::meta::v1::ObjectMeta> = None;
let mut value_spec: Option<crate::v1_11::api::settings::v1alpha1::PodPresetSpec> = None;
while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_api_version => {
let value_api_version: String = serde::de::MapAccess::next_value(&mut map)?;
if value_api_version != <Self::Value as crate::Resource>::api_version() {
return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_api_version), &<Self::Value as crate::Resource>::api_version()));
}
},
Field::Key_kind => {
let value_kind: String = serde::de::MapAccess::next_value(&mut map)?;
if value_kind != <Self::Value as crate::Resource>::kind() {
return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_kind), &<Self::Value as crate::Resource>::kind()));
}
},
Field::Key_metadata => value_metadata = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_spec => value_spec = serde::de::MapAccess::next_value(&mut map)?,
Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(PodPreset {
metadata: value_metadata,
spec: value_spec,
})
}
}
deserializer.deserialize_struct(
"PodPreset",
&[
"apiVersion",
"kind",
"metadata",
"spec",
],
Visitor,
)
}
}
impl serde::Serialize for PodPreset {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
let mut state = serializer.serialize_struct(
"PodPreset",
2 +
self.metadata.as_ref().map_or(0, |_| 1) +
self.spec.as_ref().map_or(0, |_| 1),
)?;
serde::ser::SerializeStruct::serialize_field(&mut state, "apiVersion", <Self as crate::Resource>::api_version())?;
serde::ser::SerializeStruct::serialize_field(&mut state, "kind", <Self as crate::Resource>::kind())?;
if let Some(value) = &self.metadata {
serde::ser::SerializeStruct::serialize_field(&mut state, "metadata", value)?;
}
if let Some(value) = &self.spec {
serde::ser::SerializeStruct::serialize_field(&mut state, "spec", value)?;
}
serde::ser::SerializeStruct::end(state)
}
}
|
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
|
resources.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! This module implements a checker for verifying that a non-resource struct does not
//! have resource fields inside it.
use omv_primitives::vm_status::StatusCode;
use omv_core::{
access::ModuleAccess,
errors::{verification_error, Location, PartialVMResult, VMResult},
file_format::{CompiledModule, Kind, SignatureToken, StructFieldInformation, TableIndex},
IndexKind,
};
pub struct
|
<'a> {
module: &'a CompiledModule,
}
impl<'a> ResourceTransitiveChecker<'a> {
pub fn verify_module(module: &'a CompiledModule) -> VMResult<()> {
Self::verify_module_impl(module).map_err(|e| e.finish(Location::Module(module.self_id())))
}
fn verify_module_impl(module: &'a CompiledModule) -> PartialVMResult<()> {
let checker = Self { module };
for (idx, struct_def) in checker.module.struct_defs().iter().enumerate() {
let sh = checker.module.struct_handle_at(struct_def.struct_handle);
if sh.is_nominal_resource {
continue;
}
let fields = match &struct_def.field_information {
StructFieldInformation::Native => continue,
StructFieldInformation::Declared(fields) => fields,
};
for field in fields {
if checker.contains_nominal_resource(&field.signature.0, &sh.type_parameters) {
return Err(verification_error(
StatusCode::INVALID_RESOURCE_FIELD,
IndexKind::StructDefinition,
idx as TableIndex,
));
}
}
}
Ok(())
}
/// Determines if the given signature token contains a nominal resource.
/// More specifically, a signature token contains a nominal resource if
/// 1) it is a type variable explicitly marked as resource kind.
/// 2) it is a signer, which is always a resource type
/// 3) it is a struct that
/// a) is marked as resource.
/// b) has a type actual which is a nominal resource.
fn contains_nominal_resource(&self, token: &SignatureToken, type_parameters: &[Kind]) -> bool {
match token {
SignatureToken::Signer => true,
SignatureToken::Struct(sh_idx) => {
let sh = self.module.struct_handle_at(*sh_idx);
sh.is_nominal_resource
}
SignatureToken::StructInstantiation(sh_idx, type_arguments) => {
let sh = self.module.struct_handle_at(*sh_idx);
if sh.is_nominal_resource {
return true;
}
for token in type_arguments {
if self.contains_nominal_resource(token, type_parameters) {
return true;
}
}
false
}
SignatureToken::Vector(ty) => self.contains_nominal_resource(ty, type_parameters),
SignatureToken::Reference(_)
| SignatureToken::MutableReference(_)
| SignatureToken::Bool
| SignatureToken::U8
| SignatureToken::U64
| SignatureToken::U128
| SignatureToken::Address
| SignatureToken::TypeParameter(_) => false,
}
}
}
|
ResourceTransitiveChecker
|
promote_as_admin.py
|
#!/usr/bin/env python
|
import sys
sys.path.append("../")
from harborclient_light import harborclient
host = "127.0.0.1"
user = "admin"
password = "Harbor12345"
client = harborclient.HarborClient(host, user, password)
# Promote as admin
user_id = 2
client.promote_as_admin(user_id)
| |
aws.go
|
package aws
import (
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/opsworks"
sh "github.com/codeskyblue/go-sh"
"github.com/libopenstorage/cloudops"
"github.com/libopenstorage/cloudops/backoff"
"github.com/libopenstorage/cloudops/pkg/exec"
"github.com/libopenstorage/cloudops/unsupported"
awscredentials "github.com/libopenstorage/secrets/aws/credentials"
"github.com/portworx/sched-ops/task"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/wait"
)
const (
awsDevicePrefix = "/dev/sd"
awsDevicePrefixWithX = "/dev/xvd"
awsDevicePrefixWithH = "/dev/hd"
awsDevicePrefixNvme = "/dev/nvme"
)
type awsOps struct {
cloudops.Compute
instanceType string
instance string
zone string
region string
outpostARN string
ec2 *ec2.EC2
autoscaling *autoscaling.AutoScaling
mutex sync.Mutex
}
var (
// ErrAWSEnvNotAvailable is the error type when aws credentials are not set
ErrAWSEnvNotAvailable = fmt.Errorf("AWS credentials are not set in environment")
nvmeCmd = exec.Which("nvme")
)
// NewClient creates a new cloud operations client for AWS
func NewClient() (cloudops.Ops, error) {
zone, instanceID, instanceType, outpostARN, err := getInfoFromMetadata()
if err != nil {
// try to get it from env
zone, instanceID, instanceType, err = getInfoFromEnv()
if err != nil {
return nil, err
}
}
region := zone[:len(zone)-1]
awsCreds, err := awscredentials.NewAWSCredentials("", "", "")
if err != nil {
return nil, err
}
creds, err := awsCreds.Get()
if err != nil {
return nil, err
}
ec2 := ec2.New(
session.New(
&aws.Config{
Region: ®ion,
Credentials: creds,
},
),
)
autoscaling := autoscaling.New(
session.New(
&aws.Config{
Region: ®ion,
Credentials: creds,
},
),
)
return backoff.NewExponentialBackoffOps(
&awsOps{
Compute: unsupported.NewUnsupportedCompute(),
instance: instanceID,
instanceType: instanceType,
ec2: ec2,
zone: zone,
region: region,
autoscaling: autoscaling,
outpostARN: outpostARN,
},
isExponentialError,
backoff.DefaultExponentialBackoff,
), nil
}
func (s *awsOps) filters(
labels map[string]string,
keys []string,
) []*ec2.Filter {
if len(labels) == 0 {
return nil
}
f := make([]*ec2.Filter, len(labels)+len(keys))
i := 0
for k, v := range labels {
s := string("tag:") + k
value := v
f[i] = &ec2.Filter{Name: &s, Values: []*string{&value}}
i++
}
for _, k := range keys {
s := string("tag-key:") + k
f[i] = &ec2.Filter{Name: &s}
i++
}
return f
}
func (s *awsOps) tags(labels map[string]string) []*ec2.Tag {
if len(labels) == 0 {
return nil
}
t := make([]*ec2.Tag, len(labels))
i := 0
for k, v := range labels {
key := k
value := v
t[i] = &ec2.Tag{Key: &key, Value: &value}
i++
}
return t
}
func (s *awsOps) waitStatus(id string, desired string) error {
request := &ec2.DescribeVolumesInput{VolumeIds: []*string{&id}}
actual := ""
_, err := task.DoRetryWithTimeout(
func() (interface{}, bool, error) {
awsVols, err := s.ec2.DescribeVolumes(request)
if err != nil {
return nil, true, err
}
if len(awsVols.Volumes) != 1 {
return nil, true, fmt.Errorf("expected one volume %v got %v",
id, len(awsVols.Volumes))
}
if awsVols.Volumes[0].State == nil {
return nil, true, fmt.Errorf("Nil volume state for %v", id)
}
actual = *awsVols.Volumes[0].State
if actual == desired {
return nil, false, nil
}
return nil, true, fmt.Errorf(
"Volume %v did not transition to %v current state %v",
id, desired, actual)
},
cloudops.ProviderOpsTimeout,
cloudops.ProviderOpsRetryInterval)
return err
}
func (s *awsOps) waitAttachmentStatus(
volumeID string,
desired string,
timeout time.Duration,
) (*ec2.Volume, error) {
id := volumeID
request := &ec2.DescribeVolumesInput{VolumeIds: []*string{&id}}
interval := 2 * time.Second
logrus.Infof("Waiting for state transition to %q", desired)
f := func() (interface{}, bool, error) {
awsVols, err := s.ec2.DescribeVolumes(request)
if err != nil {
return nil, false, err
}
if len(awsVols.Volumes) != 1 {
return nil, false, fmt.Errorf("expected one volume %v got %v",
volumeID, len(awsVols.Volumes))
}
var actual string
vol := awsVols.Volumes[0]
awsAttachment := vol.Attachments
if awsAttachment == nil || len(awsAttachment) == 0 {
// We have encountered scenarios where AWS returns a nil attachment state
// for a volume transitioning from detaching -> attaching.
actual = ec2.VolumeAttachmentStateDetached
} else {
actual = *awsAttachment[0].State
}
if actual == desired {
return vol, false, nil
}
return nil, true, fmt.Errorf("Volume %v failed to transition to %v current state %v",
volumeID, desired, actual)
}
outVol, err := task.DoRetryWithTimeout(f, timeout, interval)
if err != nil {
return nil, err
}
if vol, ok := outVol.(*ec2.Volume); ok {
return vol, nil
}
return nil, cloudops.NewStorageError(cloudops.ErrVolInval,
fmt.Sprintf("Invalid volume object for volume %s", volumeID), "")
}
func (s *awsOps) Name() string { return string(cloudops.AWS) }
func (s *awsOps) InstanceID() string { return s.instance }
func (s *awsOps) InspectInstance(instanceID string) (*cloudops.InstanceInfo, error) {
inst, err := DescribeInstanceByID(s.ec2, instanceID)
if err != nil {
return nil, err
}
name := instanceID
labels := labelsFromTags(inst.Tags)
if nameFromTags, present := labels["Name"]; present && len(nameFromTags) > 0 {
name = nameFromTags
}
instInfo := &cloudops.InstanceInfo{
CloudResourceInfo: cloudops.CloudResourceInfo{
Name: name,
ID: *inst.InstanceId,
Zone: s.zone,
Region: s.region,
Labels: labels,
},
}
return instInfo, nil
}
func (s *awsOps) InspectInstanceGroupForInstance(instanceID string) (*cloudops.InstanceGroupInfo, error) {
selfInfo, err := s.InspectInstance(instanceID)
if err != nil {
return nil, err
}
for tag, value := range selfInfo.Labels {
// https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-tagging.html#tag-lifecycle
if tag == "aws:autoscaling:groupName" {
input := &autoscaling.DescribeAutoScalingGroupsInput{
AutoScalingGroupNames: []*string{
aws.String(value),
},
}
result, err := s.autoscaling.DescribeAutoScalingGroups(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
return nil, aerr
}
return nil, err
}
if len(result.AutoScalingGroups) != 1 {
return nil, fmt.Errorf("DescribeAutoScalingGroups (%v) returned %v groups, expect 1",
value, len(result.AutoScalingGroups))
}
group := result.AutoScalingGroups[0]
zones := make([]string, 0)
for _, z := range group.AvailabilityZones {
zones = append(zones, *z)
}
retval := &cloudops.InstanceGroupInfo{
CloudResourceInfo: cloudops.CloudResourceInfo{
Name: *group.AutoScalingGroupName,
Zone: s.zone,
Region: s.region,
Labels: labelsFromTags(group.Tags),
},
Zones: zones,
AutoscalingEnabled: true,
Min: group.MinSize,
Max: group.MaxSize,
}
return retval, nil
}
}
return nil, &cloudops.ErrNoInstanceGroup{}
}
func (s *awsOps) ApplyTags(volumeID string, labels map[string]string) error {
req := &ec2.CreateTagsInput{
Resources: []*string{&volumeID},
Tags: s.tags(labels),
}
_, err := s.ec2.CreateTags(req)
return err
}
func (s *awsOps) RemoveTags(volumeID string, labels map[string]string) error {
req := &ec2.DeleteTagsInput{
Resources: []*string{&volumeID},
Tags: s.tags(labels),
}
_, err := s.ec2.DeleteTags(req)
return err
}
func (s *awsOps) matchTag(tag *ec2.Tag, match string) bool {
return tag.Key != nil &&
tag.Value != nil &&
len(*tag.Key) != 0 &&
len(*tag.Value) != 0 &&
*tag.Key == match
}
func (s *awsOps) DeviceMappings() (map[string]string, error) {
instance, err := s.describe()
if err != nil {
return nil, err
}
m := make(map[string]string)
for _, d := range instance.BlockDeviceMappings {
if d.DeviceName != nil && d.Ebs != nil && d.Ebs.VolumeId != nil {
devName := *d.DeviceName
// Skip the root device
if devName == *instance.RootDeviceName {
continue
}
devicePath, err := s.getActualDevicePath(devName, *d.Ebs.VolumeId)
if err != nil {
return nil, cloudops.NewStorageError(
cloudops.ErrInvalidDevicePath,
fmt.Sprintf("unable to get actual device path for %s. %v", devName, err),
s.instance)
}
m[devicePath] = *d.Ebs.VolumeId
}
}
return m, nil
}
// Describe current instance.
func (s *awsOps) Describe() (interface{}, error) {
return s.describe()
}
func (s *awsOps) describe() (*ec2.Instance, error) {
request := &ec2.DescribeInstancesInput{
InstanceIds: []*string{&s.instance},
}
out, err := s.ec2.DescribeInstances(request)
if err != nil {
return nil, err
}
if len(out.Reservations) != 1 {
return nil, fmt.Errorf("DescribeInstances(%v) returned %v reservations, expect 1",
s.instance, len(out.Reservations))
}
if len(out.Reservations[0].Instances) != 1 {
return nil, fmt.Errorf("DescribeInstances(%v) returned %v Reservations, expect 1",
s.instance, len(out.Reservations[0].Instances))
}
return out.Reservations[0].Instances[0], nil
}
func (s *awsOps) getPrefixFromRootDeviceName(rootDeviceName string) (string, error) {
devPrefix := awsDevicePrefix
if !strings.HasPrefix(rootDeviceName, devPrefix) {
devPrefix = awsDevicePrefixWithX
if !strings.HasPrefix(rootDeviceName, devPrefix) {
devPrefix = awsDevicePrefixWithH
if !strings.HasPrefix(rootDeviceName, devPrefix) {
return "", fmt.Errorf("unknown prefix type on root device: %s",
rootDeviceName)
}
}
}
return devPrefix, nil
}
// getParentDevice returns the parent device of the given device path
// by following the symbolic link. It is expected that the input device
// path exists
func (s *awsOps) getParentDevice(ipDevPath string) (string, error) {
// Check if the path is a symbolic link
var parentDevPath string
fi, err := os.Lstat(ipDevPath)
if err != nil {
return "", err
}
if fi.Mode()&os.ModeSymlink != 0 {
// input device path is a symbolic link
// get the parent device
output, err := filepath.EvalSymlinks(ipDevPath)
if err != nil {
return "", fmt.Errorf("failed to read symlink due to: %v", err)
}
parentDevPath = strings.TrimSpace(string(output))
} else {
parentDevPath = ipDevPath
}
return parentDevPath, nil
}
// getActualDevicePath does an os.Stat on the provided devicePath.
// If not found it will try all the different devicePrefixes provided by AWS
// such as /dev/sd and /dev/xvd and return the devicePath which is found
// or return an error
func (s *awsOps) getActualDevicePath(ipDevicePath, volumeID string) (string, error) {
var err error
letter := ipDevicePath[len(ipDevicePath)-1:]
devicePath := awsDevicePrefix + letter
if _, err = os.Stat(devicePath); err == nil {
return s.getParentDevice(devicePath)
}
devicePath = awsDevicePrefixWithX + letter
if _, err = os.Stat(devicePath); err == nil {
return s.getParentDevice(devicePath)
}
devicePath = awsDevicePrefixWithH + letter
if _, err = os.Stat(devicePath); err == nil {
return s.getParentDevice(devicePath)
}
if devicePath, err = s.getNvmeDeviceFromVolumeID(volumeID); err == nil {
if _, err = os.Stat(devicePath); err == nil {
return devicePath, nil
}
}
return "", fmt.Errorf("unable to map volume %v with block device mapping %v to an"+
" actual device path on the host", volumeID, ipDevicePath)
}
func (s *awsOps) getNvmeDeviceFromVolumeID(volumeID string) (string, error) {
// We will use nvme list command to find nvme device mappings
// A typical output of nvme list looks like this
// # nvme list
// Node SN Model Namespace Usage Format FW Rev
// ---------------- -------------------- ---------------------------------------- --------- -------------------------- ---------------- --------
// /dev/nvme0n1 vol00fd6f8c30dc619f4 Amazon Elastic Block Store 1 0.00 B / 137.44 GB 512 B + 0 B 1.0
// /dev/nvme1n1 vol044e12c8c0af45b3d Amazon Elastic Block Store 1 0.00 B / 107.37 GB 512 B + 0 B 1.0
trimmedVolumeID := strings.Replace(volumeID, "-", "", 1)
out, err := sh.Command(nvmeCmd, "list").Command("grep", trimmedVolumeID).Command("awk", "{print $1}").Output()
if err != nil {
return "", fmt.Errorf("unable to map %v volume to an nvme device: %v", volumeID, err)
}
return strings.TrimSpace(string(out)), nil
}
func (s *awsOps) FreeDevices(
blockDeviceMappings []interface{},
rootDeviceName string,
) ([]string, error) {
freeLetterTracker := []byte("fghijklmnop")
devNamesInUse := make(map[string]string) // used as a set, values not used
// We also need to fetch ephemeral device mappings as they are not populated
// in blockDeviceMappings
// See bottom of this page:
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html?icmpid=docs_ec2_console#instance-block-device-mapping
c := ec2metadata.New(session.New())
mappingsFromMetadata, err := c.GetMetadata("block-device-mapping")
if err != nil {
return nil, err
}
devices := strings.Split(mappingsFromMetadata, "\n")
for _, device := range devices {
if device == "root" || device == "ami" {
continue
}
devName, err := c.GetMetadata("block-device-mapping/" + device)
if err != nil {
return nil, err
}
if !strings.HasPrefix(devName, "/dev/") {
devName = "/dev/" + devName
}
devNamesInUse[devName] = ""
}
devPrefix := awsDevicePrefix
for _, d := range blockDeviceMappings {
dev := d.(*ec2.InstanceBlockDeviceMapping)
if dev.DeviceName == nil {
return nil, fmt.Errorf("Nil device name")
}
devName := *dev.DeviceName
if devName == rootDeviceName {
continue
}
devNamesInUse[devName] = ""
}
for devName := range devNamesInUse {
// Extract the letter from the devName (e.g extract 'f' from '/dev/sdf')
if !strings.HasPrefix(devName, devPrefix) {
devPrefix = awsDevicePrefixWithX
if !strings.HasPrefix(devName, devPrefix) {
devPrefix = awsDevicePrefixWithH
if !strings.HasPrefix(devName, devPrefix) {
return nil, fmt.Errorf("bad device name %q", devName)
}
}
}
letter := devName[len(devPrefix):]
// Reset devPrefix for next devices
devPrefix = awsDevicePrefix
// AWS instances can have the following device names
// /dev/xvd[b-c][a-z]
if len(letter) == 1 {
index := letter[0] - 'f'
if index > ('p' - 'f') {
continue
}
freeLetterTracker[index] = '0' // mark as used
} else if len(letter) == 2 {
// We do not attach EBS volumes with "/dev/xvdc[a-z]" formats
continue
} else {
return nil, fmt.Errorf("cannot parse device name %q", devName)
}
}
// Set the prefix to the same one used as the root drive
// The reason we do this is based on the virtualization type AWS might attach
// the device "sda" at /dev/sda OR /dev/xvda. So we look at how the root device
// is attached and use that prefix
devPrefix, err = s.getPrefixFromRootDeviceName(rootDeviceName)
if err != nil {
return nil, err
}
free := make([]string, len(freeLetterTracker))
count := 0
for _, b := range freeLetterTracker {
if b != '0' {
free[count] = devPrefix + string(b)
count++
}
}
if count == 0 {
return nil, fmt.Errorf("No more free devices")
}
return reverse(free[:count]), nil
}
func (s *awsOps) rollbackCreate(id string, createErr error) error {
logrus.Warnf("Rollback create volume %v, Error %v", id, createErr)
err := s.Delete(id)
if err != nil {
logrus.Warnf("Rollback failed volume %v, Error %v", id, err)
}
return createErr
}
func (s *awsOps) refreshVol(id *string) (*ec2.Volume, error) {
vols, err := s.Inspect([]*string{id})
if err != nil {
return nil, err
}
if len(vols) != 1 {
return nil, fmt.Errorf("failed to get vol: %s."+
"Found: %d volumes on inspecting", *id, len(vols))
}
resp, ok := vols[0].(*ec2.Volume)
if !ok {
return nil, cloudops.NewStorageError(cloudops.ErrVolInval,
fmt.Sprintf("Invalid volume returned by inspect API for vol: %s", *id),
"")
}
return resp, nil
}
func (s *awsOps) deleted(v *ec2.Volume) bool {
return *v.State == ec2.VolumeStateDeleting ||
*v.State == ec2.VolumeStateDeleted
}
func (s *awsOps) available(v *ec2.Volume) bool {
return *v.State == ec2.VolumeStateAvailable
}
func (s *awsOps) GetDeviceID(vol interface{}) (string, error) {
if d, ok := vol.(*ec2.Volume); ok {
return *d.VolumeId, nil
} else if d, ok := vol.(*ec2.Snapshot); ok {
return *d.SnapshotId, nil
} else {
return "", fmt.Errorf("invalid type: %v given to GetDeviceID", vol)
}
}
func (s *awsOps) Inspect(volumeIds []*string) ([]interface{}, error) {
req := &ec2.DescribeVolumesInput{VolumeIds: volumeIds}
resp, err := s.ec2.DescribeVolumes(req)
if err != nil {
return nil, err
}
var awsVols = make([]interface{}, len(resp.Volumes))
for i, v := range resp.Volumes {
awsVols[i] = v
}
return awsVols, nil
}
func (s *awsOps) Tags(volumeID string) (map[string]string, error) {
vol, err := s.refreshVol(&volumeID)
if err != nil {
return nil, err
}
labels := make(map[string]string)
for _, tag := range vol.Tags {
labels[*tag.Key] = *tag.Value
}
return labels, nil
}
func (s *awsOps) Enumerate(
volumeIds []*string,
labels map[string]string,
setIdentifier string,
) (map[string][]interface{}, error) {
sets := make(map[string][]interface{})
// Enumerate all volumes that have same labels.
f := s.filters(labels, nil)
req := &ec2.DescribeVolumesInput{Filters: f, VolumeIds: volumeIds}
awsVols, err := s.ec2.DescribeVolumes(req)
if err != nil {
return nil, err
}
// Volume sets are identified by volumes with the same setIdentifer.
for _, vol := range awsVols.Volumes {
if s.deleted(vol) {
continue
}
if len(setIdentifier) == 0 {
cloudops.AddElementToMap(sets, vol, cloudops.SetIdentifierNone)
} else {
found := false
for _, tag := range vol.Tags {
if s.matchTag(tag, setIdentifier) {
cloudops.AddElementToMap(sets, vol, *tag.Value)
found = true
break
}
}
if !found {
cloudops.AddElementToMap(sets, vol, cloudops.SetIdentifierNone)
}
}
}
return sets, nil
}
func (s *awsOps) Create(
v interface{},
labels map[string]string,
) (interface{}, error) {
vol, ok := v.(*ec2.Volume)
if !ok {
return nil, cloudops.NewStorageError(cloudops.ErrVolInval,
"Invalid volume template given", "")
}
req := &ec2.CreateVolumeInput{
AvailabilityZone: vol.AvailabilityZone,
Encrypted: vol.Encrypted,
KmsKeyId: vol.KmsKeyId,
Size: vol.Size,
VolumeType: vol.VolumeType,
SnapshotId: vol.SnapshotId,
}
if len(s.outpostARN) > 0 {
outpostARN := s.outpostARN
req.OutpostArn = &outpostARN
}
if len(vol.Tags) > 0 || len(labels) > 0 {
// Need to tag volumes on creation
tagSpec := &ec2.TagSpecification{}
tagSpec.SetResourceType(ec2.ResourceTypeVolume)
volTags := []*ec2.Tag{}
for _, tag := range vol.Tags {
// Make a copy of the keys and values
key := *tag.Key
value := *tag.Value
volTags = append(volTags, &ec2.Tag{Key: &key, Value: &value})
}
for k, v := range labels {
// Make a copy of the keys and values
key := k
value := v
volTags = append(volTags, &ec2.Tag{Key: &key, Value: &value})
}
tagSpec.Tags = volTags
req.TagSpecifications = []*ec2.TagSpecification{tagSpec}
}
// note, as of 2021-05-04, `opsworks` does not have `const VolumeTypeGp3 = gp3` (using RAW format)
if *vol.VolumeType == opsworks.VolumeTypeIo1 || *vol.VolumeType == "gp3" {
req.Iops = vol.Iops
}
resp, err := s.ec2.CreateVolume(req)
if err != nil {
return nil, err
}
if err = s.waitStatus(
*resp.VolumeId,
ec2.VolumeStateAvailable,
); err != nil {
return nil, s.rollbackCreate(*resp.VolumeId, err)
}
return s.refreshVol(resp.VolumeId)
}
func (s *awsOps) DeleteFrom(id, _ string) error {
return s.Delete(id)
}
func (s *awsOps) Delete(id string) error {
req := &ec2.DeleteVolumeInput{VolumeId: &id}
_, err := s.ec2.DeleteVolume(req)
return err
}
func (s *awsOps) Attach(volumeID string, options map[string]string) (string, error) {
s.mutex.Lock()
defer s.mutex.Unlock()
self, err := s.describe()
if err != nil {
return "", err
}
var blockDeviceMappings = make([]interface{}, len(self.BlockDeviceMappings))
for i, b := range self.BlockDeviceMappings {
blockDeviceMappings[i] = b
}
devices, err := s.FreeDevices(blockDeviceMappings, *self.RootDeviceName)
if err != nil {
return "", err
}
for _, device := range devices {
req := &ec2.AttachVolumeInput{
Device: &device,
InstanceId: &s.instance,
VolumeId: &volumeID,
}
if _, err := s.ec2.AttachVolume(req); err != nil {
if strings.Contains(err.Error(), "is already in use") {
logrus.Infof("Skipping device: %s as it's in use. Will try next free device", device)
continue
}
return "", err
}
vol, err := s.waitAttachmentStatus(
volumeID,
ec2.VolumeAttachmentStateAttached,
time.Minute,
)
if err != nil {
return "", err
}
return s.DevicePath(*vol.VolumeId)
}
return "", fmt.Errorf("failed to attach any of the free devices. Attempted: %v", devices)
}
func (s *awsOps) Detach(volumeID string) error {
return s.detachInternal(volumeID, s.instance)
}
func (s *awsOps) DetachFrom(volumeID, instanceName string) error {
return s.detachInternal(volumeID, instanceName)
}
func (s *awsOps) detachInternal(volumeID, instanceName string) error {
force := false
req := &ec2.DetachVolumeInput{
InstanceId: &instanceName,
VolumeId: &volumeID,
Force: &force,
}
if _, err := s.ec2.DetachVolume(req); err != nil {
return err
}
_, err := s.waitAttachmentStatus(volumeID,
ec2.VolumeAttachmentStateDetached,
time.Minute,
)
return err
}
func (s *awsOps) Expand(
volumeID string,
newSizeInGiB uint64,
) (uint64, error) {
vol, err := s.refreshVol(&volumeID)
if err != nil {
return 0, err
}
currentSizeInGiB := uint64(*vol.Size)
if currentSizeInGiB >= newSizeInGiB {
return currentSizeInGiB, cloudops.NewStorageError(cloudops.ErrDiskGreaterOrEqualToExpandSize,
fmt.Sprintf("disk is already has a size: %d greater than or equal "+
"requested size: %d", currentSizeInGiB, newSizeInGiB), "")
}
newSizeInGiBInt64 := int64(newSizeInGiB)
request := &ec2.ModifyVolumeInput{
VolumeId: vol.VolumeId,
Size: &newSizeInGiBInt64,
}
output, err := s.ec2.ModifyVolume(request)
if err != nil {
return currentSizeInGiB, fmt.Errorf("failed to modify AWS volume for %v: %v", volumeID, err)
}
if string(*output.VolumeModification.ModificationState) == ec2.VolumeModificationStateCompleted {
return uint64(*output.VolumeModification.TargetSize), nil
}
// Taken from k8s.io/legacy-cloud-providers/aws
backoff := wait.Backoff{
Duration: 1 * time.Second,
Factor: 2,
Steps: 10,
}
checkForResize := func() (bool, error) {
request := &ec2.DescribeVolumesModificationsInput{
VolumeIds: []*string{&volumeID},
}
describeOutput, err := s.ec2.DescribeVolumesModifications(request)
if err != nil {
return false, fmt.Errorf("error while checking status for AWS EBS volume resize: %v", err)
}
volumeModifications := describeOutput.VolumesModifications
if len(volumeModifications) == 0 {
return false, fmt.Errorf("no volume modifications found for AWS EBS volume %v", volumeID)
}
volumeModification := volumeModifications[len(volumeModifications)-1]
// According to https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring_mods.html
// Size changes usually take a few seconds to complete and take effect after a volume is in the Optimizing state.
if *volumeModification.ModificationState == ec2.VolumeModificationStateOptimizing {
return true, nil
}
return false, nil
}
waitWithErr := wait.ExponentialBackoff(backoff, checkForResize)
return newSizeInGiB, waitWithErr
}
func (s *awsOps) Snapshot(
volumeID string,
readonly bool,
) (interface{}, error) {
request := &ec2.CreateSnapshotInput{
VolumeId: &volumeID,
}
return s.ec2.CreateSnapshot(request)
}
func (s *awsOps) SnapshotDelete(snapID string) error {
request := &ec2.DeleteSnapshotInput{
SnapshotId: &snapID,
}
_, err := s.ec2.DeleteSnapshot(request)
return err
}
func (s *awsOps) DevicePath(volumeID string) (string, error) {
vol, err := s.refreshVol(&volumeID)
if err != nil {
return "", err
}
if vol.Attachments == nil || len(vol.Attachments) == 0 {
return "", cloudops.NewStorageError(cloudops.ErrVolDetached,
"Volume is detached", *vol.VolumeId)
}
if vol.Attachments[0].InstanceId == nil {
return "", cloudops.NewStorageError(cloudops.ErrVolInval,
"Unable to determine volume instance attachment", "")
}
if s.instance != *vol.Attachments[0].InstanceId {
return "", cloudops.NewStorageError(cloudops.ErrVolAttachedOnRemoteNode,
fmt.Sprintf("Volume attached on %q current instance %q",
*vol.Attachments[0].InstanceId, s.instance),
*vol.Attachments[0].InstanceId)
}
if vol.Attachments[0].State == nil {
return "", cloudops.NewStorageError(cloudops.ErrVolInval,
"Unable to determine volume attachment state", "")
}
if *vol.Attachments[0].State != ec2.VolumeAttachmentStateAttached {
return "", cloudops.NewStorageError(cloudops.ErrVolInval,
fmt.Sprintf("Invalid state %q, volume is not attached",
*vol.Attachments[0].State), "")
}
if vol.Attachments[0].Device == nil {
return "", cloudops.NewStorageError(cloudops.ErrVolInval,
"Unable to determine volume attachment path", "")
}
devicePath, err := s.getActualDevicePath(*vol.Attachments[0].Device, volumeID)
if err != nil
|
return devicePath, nil
}
func getInfoFromMetadata() (string, string, string, string, error) {
c := ec2metadata.New(session.New())
zone, err := c.GetMetadata("placement/availability-zone")
if err != nil {
return "", "", "", "", err
}
instanceID, err := c.GetMetadata("instance-id")
if err != nil {
return "", "", "", "", err
}
instanceType, err := c.GetMetadata("instance-type")
if err != nil {
return "", "", "", "", err
}
outpostARN, err := c.GetMetadata("outpost-arn")
if err != nil {
// this metadata endpoint isn't guaranteed to be present
if !strings.Contains(err.Error(), "Code 404") && !strings.Contains(err.Error(), "status code: 404") {
return "", "", "", "", err
}
}
return zone, instanceID, instanceType, outpostARN, nil
}
func getInfoFromEnv() (string, string, string, error) {
zone, err := cloudops.GetEnvValueStrict("AWS_ZONE")
if err != nil {
return "", "", "", err
}
instance, err := cloudops.GetEnvValueStrict("AWS_INSTANCE_NAME")
if err != nil {
return "", "", "", err
}
instanceType, err := cloudops.GetEnvValueStrict("AWS_INSTANCE_TYPE")
if err != nil {
return "", "", "", err
}
if _, err := credentials.NewEnvCredentials().Get(); err != nil {
return "", "", "", ErrAWSEnvNotAvailable
}
return zone, instance, instanceType, nil
}
// DescribeInstanceByID describes the given instance by instance ID
func DescribeInstanceByID(service *ec2.EC2, id string) (*ec2.Instance, error) {
request := &ec2.DescribeInstancesInput{
InstanceIds: []*string{&id},
}
out, err := service.DescribeInstances(request)
if err != nil {
return nil, err
}
if len(out.Reservations) != 1 {
return nil, fmt.Errorf("DescribeInstances(%v) returned %v reservations, expect 1",
id, len(out.Reservations))
}
if len(out.Reservations[0].Instances) != 1 {
return nil, fmt.Errorf("DescribeInstances(%v) returned %v Reservations, expect 1",
id, len(out.Reservations[0].Instances))
}
return out.Reservations[0].Instances[0], nil
}
func labelsFromTags(input interface{}) map[string]string {
labels := make(map[string]string)
ec2Tags, ok := input.([]*ec2.Tag)
if ok {
for _, tag := range ec2Tags {
if tag == nil {
continue
}
if tag.Key == nil || tag.Value == nil {
continue
}
labels[*tag.Key] = *tag.Value
}
return labels
}
autoscalingTags, ok := input.([]*autoscaling.TagDescription)
if ok {
for _, tag := range autoscalingTags {
if tag == nil {
continue
}
if tag.Key == nil || tag.Value == nil {
continue
}
labels[*tag.Key] = *tag.Value
}
return labels
}
return labels
}
func isExponentialError(err error) bool {
// Got the list of error codes from here
// https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
awsCodes := map[string]struct{}{
"VolumeLimitExceeded": {},
"AttachmentLimitExceeded": {},
"MaxIOPSLimitExceeded": {},
"ResourceLimitExceeded": {},
"RequestLimitExceeded": {},
"SnapshotLimitExceeded": {},
"TagLimitExceeded": {},
}
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if _, exist := awsCodes[awsErr.Code()]; exist {
return true
}
}
}
return false
}
func reverse(a []string) []string {
reversed := make([]string, len(a))
for i, item := range a {
reversed[len(a)-i-1] = item
}
return reversed
}
|
{
return "", cloudops.NewStorageError(cloudops.ErrVolInval,
err.Error(), "")
}
|
format_type.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding=utf-8
__author__ = 'alex jiang'
XML = 'XML'
JSON = 'JSON'
RAW = 'RAW'
APPLICATION_FORM = 'application/x-www-form-urlencoded'
APPLICATION_XML = 'application/xml'
APPLICATION_JSON = 'application/json'
APPLICATION_OCTET_STREAM = 'application/octet-stream'
TEXT_XML = 'text/xml'
def map_format_to_accept(format):
|
def map_accept_to_format(accept):
if accept.lower() == APPLICATION_XML or accept.lower() == TEXT_XML:
return XML
if accept.lower() == APPLICATION_JSON:
return JSON
return RAW
if __name__ == "__main__":
print map_format_to_accept(XML)
print map_format_to_accept(JSON)
print map_format_to_accept(RAW)
print map_accept_to_format("application/xml")
print map_accept_to_format("text/xml")
print map_accept_to_format("application/json")
|
if format == XML:
return APPLICATION_XML
if format == JSON:
return APPLICATION_JSON
return APPLICATION_OCTET_STREAM
|
main.go
|
package main
import (
"github.com/gin-gonic/gin"
"log"
"net/http"
)
type Body struct {
Tag string `json:"tag"`
}
func main() {
router := gin.Default()
router.GET("/ping", func(c *gin.Context) {
log.Println("ok ping")
})
router.POST("/monitor", func(c *gin.Context) {
//log.Println(c.Query("tag"))
//buf := make([]byte, 1024)
//n, _ := c.Request.Body.Read(buf)
//log.Println("yuli: ", string(buf[0:n]))
// ---> 绑定数据
var data Body
if err := c.ShouldBindJSON(&data); err != nil {
c.AbortWithStatusJSON(
http.StatusInternalServerError,
|
// --> 返回
c.JSON(http.StatusOK, gin.H{"msg": "ok"})
log.Printf("%+v", data)
log.Println("ok monitor")
})
router.Run(":8080")
}
|
gin.H{"error": err.Error()})
return
}
|
suggestModel.ts
|
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import { onUnexpectedError } from 'vs/base/common/errors';
import { isFalsyOrEmpty } from 'vs/base/common/arrays';
import { TimeoutTimer } from 'vs/base/common/async';
import Event, { Emitter } from 'vs/base/common/event';
import { IDisposable, dispose } from 'vs/base/common/lifecycle';
import { TPromise } from 'vs/base/common/winjs.base';
import { ICommonCodeEditor, IModel, IWordAtPosition } from 'vs/editor/common/editorCommon';
import { ISuggestSupport, SuggestRegistry, StandardTokenType } from 'vs/editor/common/modes';
import { Position } from 'vs/editor/common/core/position';
import { provideSuggestionItems, getSuggestionComparator, ISuggestionItem } from './suggest';
import { CompletionModel } from './completionModel';
import { CursorChangeReason, ICursorSelectionChangedEvent } from 'vs/editor/common/controller/cursorEvents';
export interface ICancelEvent {
retrigger: boolean;
}
export interface ITriggerEvent {
auto: boolean;
}
export interface ISuggestEvent {
completionModel: CompletionModel;
isFrozen: boolean;
|
export class LineContext {
static shouldAutoTrigger(editor: ICommonCodeEditor): boolean {
const model = editor.getModel();
if (!model) {
return false;
}
const pos = editor.getPosition();
const word = model.getWordAtPosition(pos);
if (!word) {
return false;
}
if (word.endColumn !== pos.column) {
return false;
}
if (!isNaN(Number(word.word))) {
return false;
}
return true;
}
static isInEditableRange(editor: ICommonCodeEditor): boolean {
const model = editor.getModel();
const position = editor.getPosition();
if (model.hasEditableRange()) {
const editableRange = model.getEditableRange();
if (!editableRange.containsPosition(position)) {
return false;
}
}
return true;
}
readonly lineNumber: number;
readonly column: number;
readonly leadingLineContent: string;
readonly leadingWord: IWordAtPosition;
readonly auto: boolean;
constructor(model: IModel, position: Position, auto: boolean) {
this.leadingLineContent = model.getLineContent(position.lineNumber).substr(0, position.column - 1);
this.leadingWord = model.getWordUntilPosition(position);
this.lineNumber = position.lineNumber;
this.column = position.column;
this.auto = auto;
}
}
export const enum State {
Idle = 0,
Manual = 1,
Auto = 2
}
export class SuggestModel implements IDisposable {
private toDispose: IDisposable[] = [];
private quickSuggestDelay: number;
private triggerCharacterListener: IDisposable;
private triggerAutoSuggestPromise: TPromise<void>;
private triggerRefilter = new TimeoutTimer();
private _state: State;
private requestPromise: TPromise<void>;
private context: LineContext;
private currentPosition: Position;
private completionModel: CompletionModel;
private _onDidCancel: Emitter<ICancelEvent> = new Emitter<ICancelEvent>();
get onDidCancel(): Event<ICancelEvent> { return this._onDidCancel.event; }
private _onDidTrigger: Emitter<ITriggerEvent> = new Emitter<ITriggerEvent>();
get onDidTrigger(): Event<ITriggerEvent> { return this._onDidTrigger.event; }
private _onDidSuggest: Emitter<ISuggestEvent> = new Emitter<ISuggestEvent>();
get onDidSuggest(): Event<ISuggestEvent> { return this._onDidSuggest.event; }
constructor(private editor: ICommonCodeEditor) {
this._state = State.Idle;
this.triggerAutoSuggestPromise = null;
this.requestPromise = null;
this.completionModel = null;
this.context = null;
this.currentPosition = editor.getPosition() || new Position(1, 1);
// wire up various listeners
this.toDispose.push(this.editor.onDidChangeModel(() => {
this.updateTriggerCharacters();
this.cancel();
}));
this.toDispose.push(editor.onDidChangeModelLanguage(() => {
this.updateTriggerCharacters();
this.cancel();
}));
this.toDispose.push(this.editor.onDidChangeConfiguration(() => {
this.updateTriggerCharacters();
this.updateQuickSuggest();
}));
this.toDispose.push(SuggestRegistry.onDidChange(() => {
this.updateTriggerCharacters();
this.updateActiveSuggestSession();
}));
this.toDispose.push(this.editor.onDidChangeCursorSelection(e => {
this.onCursorChange(e);
}));
this.updateTriggerCharacters();
this.updateQuickSuggest();
}
dispose(): void {
dispose([this._onDidCancel, this._onDidSuggest, this._onDidTrigger, this.triggerCharacterListener, this.triggerRefilter]);
this.toDispose = dispose(this.toDispose);
this.cancel();
}
// --- handle configuration & precondition changes
private updateQuickSuggest(): void {
this.quickSuggestDelay = this.editor.getConfiguration().contribInfo.quickSuggestionsDelay;
if (isNaN(this.quickSuggestDelay) || (!this.quickSuggestDelay && this.quickSuggestDelay !== 0) || this.quickSuggestDelay < 0) {
this.quickSuggestDelay = 10;
}
}
private updateTriggerCharacters(): void {
dispose(this.triggerCharacterListener);
if (this.editor.getConfiguration().readOnly
|| !this.editor.getModel()
|| !this.editor.getConfiguration().contribInfo.suggestOnTriggerCharacters) {
return;
}
const supportsByTriggerCharacter: { [ch: string]: ISuggestSupport[] } = Object.create(null);
for (const support of SuggestRegistry.all(this.editor.getModel())) {
if (isFalsyOrEmpty(support.triggerCharacters)) {
continue;
}
for (const ch of support.triggerCharacters) {
const array = supportsByTriggerCharacter[ch];
if (!array) {
supportsByTriggerCharacter[ch] = [support];
} else {
array.push(support);
}
}
}
this.triggerCharacterListener = this.editor.onDidType(text => {
const lastChar = text.charAt(text.length - 1);
const supports = supportsByTriggerCharacter[lastChar];
if (supports) {
// keep existing items that where not computed by the
// supports/providers that want to trigger now
const items: ISuggestionItem[] = [];
if (this.completionModel) {
for (const item of this.completionModel.items) {
if (supports.indexOf(item.support) < 0) {
items.push(item);
}
}
}
this.trigger(true, Boolean(this.completionModel), supports, items);
}
});
}
// --- trigger/retrigger/cancel suggest
get state(): State {
return this._state;
}
cancel(retrigger: boolean = false): void {
if (this.triggerAutoSuggestPromise) {
this.triggerAutoSuggestPromise.cancel();
this.triggerAutoSuggestPromise = null;
}
if (this.requestPromise) {
this.requestPromise.cancel();
this.requestPromise = null;
}
this._state = State.Idle;
this.completionModel = null;
this.context = null;
this._onDidCancel.fire({ retrigger });
}
private updateActiveSuggestSession(): void {
if (this._state !== State.Idle) {
if (!SuggestRegistry.has(this.editor.getModel())) {
this.cancel();
} else {
this.trigger(this._state === State.Auto, true);
}
}
}
private onCursorChange(e: ICursorSelectionChangedEvent): void {
const prevPosition = this.currentPosition;
this.currentPosition = this.editor.getPosition();
if (!e.selection.isEmpty()
|| e.source !== 'keyboard'
|| e.reason !== CursorChangeReason.NotSet) {
if (this._state === State.Idle) {
// Early exit if nothing needs to be done!
// Leave some form of early exit check here if you wish to continue being a cursor position change listener ;)
return;
}
this.cancel();
return;
}
if (!SuggestRegistry.has(this.editor.getModel())) {
return;
}
const model = this.editor.getModel();
if (!model) {
return;
}
if (this._state === State.Idle) {
// trigger 24x7 IntelliSense when idle, enabled, when cursor
// moved RIGHT, and when at a good position
if (this.editor.getConfiguration().contribInfo.quickSuggestions !== false
&& prevPosition.isBefore(this.currentPosition)
) {
this.cancel();
if (LineContext.shouldAutoTrigger(this.editor)) {
this.triggerAutoSuggestPromise = TPromise.timeout(this.quickSuggestDelay);
this.triggerAutoSuggestPromise.then(() => {
const model = this.editor.getModel();
const pos = this.editor.getPosition();
if (!model) {
return;
}
// validate enabled now
const { quickSuggestions } = this.editor.getConfiguration().contribInfo;
if (quickSuggestions === false) {
return;
} else if (quickSuggestions === true) {
// all good
} else {
model.forceTokenization(pos.lineNumber);
const { tokenType } = model
.getLineTokens(pos.lineNumber)
.findTokenAtOffset(pos.column - 1);
const inValidScope = quickSuggestions.other && tokenType === StandardTokenType.Other
|| quickSuggestions.comments && tokenType === StandardTokenType.Comment
|| quickSuggestions.strings && tokenType === StandardTokenType.String;
if (!inValidScope) {
return;
}
}
this.triggerAutoSuggestPromise = null;
this.trigger(true);
});
}
}
} else {
// refine active suggestion
this.triggerRefilter.cancelAndSet(() => {
const position = this.editor.getPosition();
const ctx = new LineContext(model, position, this._state === State.Auto);
this.onNewContext(ctx);
}, 25);
}
}
public trigger(auto: boolean, retrigger: boolean = false, onlyFrom?: ISuggestSupport[], existingItems?: ISuggestionItem[]): void {
const model = this.editor.getModel();
if (!model) {
return;
}
const ctx = new LineContext(model, this.editor.getPosition(), auto);
if (!LineContext.isInEditableRange(this.editor)) {
return;
}
// Cancel previous requests, change state & update UI
this.cancel(retrigger);
this._state = auto ? State.Auto : State.Manual;
this._onDidTrigger.fire({ auto });
// Capture context when request was sent
this.context = ctx;
this.requestPromise = provideSuggestionItems(model, this.editor.getPosition(),
this.editor.getConfiguration().contribInfo.snippetSuggestions,
onlyFrom
).then(items => {
this.requestPromise = null;
if (this._state === State.Idle) {
return;
}
const model = this.editor.getModel();
if (!model) {
return;
}
if (!isFalsyOrEmpty(existingItems)) {
const cmpFn = getSuggestionComparator(this.editor.getConfiguration().contribInfo.snippetSuggestions);
items = items.concat(existingItems).sort(cmpFn);
}
const ctx = new LineContext(model, this.editor.getPosition(), auto);
this.completionModel = new CompletionModel(items, this.context.column, {
leadingLineContent: ctx.leadingLineContent,
characterCountDelta: this.context ? ctx.column - this.context.column : 0
}, this.editor.getConfiguration().contribInfo.snippetSuggestions);
this.onNewContext(ctx);
}).then(null, onUnexpectedError);
}
private onNewContext(ctx: LineContext): void {
if (!this.context) {
// happens when 24x7 IntelliSense is enabled and still in its delay
return;
}
if (ctx.lineNumber !== this.context.lineNumber) {
// e.g. happens when pressing Enter while IntelliSense is computed
this.cancel();
return;
}
if (ctx.column < this.context.column) {
// typed -> moved cursor LEFT -> retrigger if still on a word
if (ctx.leadingWord.word) {
this.trigger(this.context.auto, true);
} else {
this.cancel();
}
return;
}
if (!this.completionModel) {
// happens when IntelliSense is not yet computed
return;
}
if (ctx.column > this.context.column && this.completionModel.incomplete && ctx.leadingWord.word.length !== 0) {
// typed -> moved cursor RIGHT & incomple model & still on a word -> retrigger
const { complete, incomplete } = this.completionModel.resolveIncompleteInfo();
this.trigger(this._state === State.Auto, true, incomplete, complete);
} else {
// typed -> moved cursor RIGHT -> update UI
let oldLineContext = this.completionModel.lineContext;
let isFrozen = false;
this.completionModel.lineContext = {
leadingLineContent: ctx.leadingLineContent,
characterCountDelta: ctx.column - this.context.column
};
if (this.completionModel.items.length === 0) {
if (LineContext.shouldAutoTrigger(this.editor) && this.context.leadingWord.endColumn < ctx.leadingWord.startColumn) {
// retrigger when heading into a new word
this.trigger(this.context.auto, true);
return;
}
if (!this.context.auto) {
// freeze when IntelliSense was manually requested
this.completionModel.lineContext = oldLineContext;
isFrozen = this.completionModel.items.length > 0;
if (isFrozen && ctx.leadingWord.word.length === 0) {
// there were results before but now there aren't
// and also we are not on a word anymore -> cancel
this.cancel();
return;
}
} else {
// nothing left
this.cancel();
return;
}
}
this._onDidSuggest.fire({
completionModel: this.completionModel,
auto: this.context.auto,
isFrozen,
});
}
}
}
|
auto: boolean;
}
|
multiplex.go
|
package logging
import (
"net"
"time"
)
type tracerMultiplexer struct {
tracers []Tracer
}
var _ Tracer = &tracerMultiplexer{}
// NewMultiplexedTracer creates a new tracer that multiplexes all events to multiple tracers.
func NewMultiplexedTracer(tracers ...Tracer) Tracer {
if len(tracers) == 0 {
return nil
}
if len(tracers) == 1 {
return tracers[0]
}
return &tracerMultiplexer{tracers}
}
func (m *tracerMultiplexer) TracerForConnection(p Perspective, odcid ConnectionID) ConnectionTracer {
var connTracers []ConnectionTracer
for _, t := range m.tracers {
if ct := t.TracerForConnection(p, odcid); ct != nil {
connTracers = append(connTracers, ct)
}
}
return newConnectionMultiplexer(connTracers...)
}
func (m *tracerMultiplexer) SentPacket(remote net.Addr, hdr *Header, size ByteCount, frames []Frame) {
for _, t := range m.tracers {
t.SentPacket(remote, hdr, size, frames)
}
}
func (m *tracerMultiplexer) DroppedPacket(remote net.Addr, typ PacketType, size ByteCount, reason PacketDropReason) {
for _, t := range m.tracers {
t.DroppedPacket(remote, typ, size, reason)
}
}
type connTracerMultiplexer struct {
tracers []ConnectionTracer
}
var _ ConnectionTracer = &connTracerMultiplexer{}
func
|
(tracers ...ConnectionTracer) ConnectionTracer {
if len(tracers) == 0 {
return nil
}
if len(tracers) == 1 {
return tracers[0]
}
return &connTracerMultiplexer{tracers: tracers}
}
func (m *connTracerMultiplexer) StartedConnection(local, remote net.Addr, version VersionNumber, srcConnID, destConnID ConnectionID) {
for _, t := range m.tracers {
t.StartedConnection(local, remote, version, srcConnID, destConnID)
}
}
func (m *connTracerMultiplexer) ClosedConnection(reason CloseReason) {
for _, t := range m.tracers {
t.ClosedConnection(reason)
}
}
func (m *connTracerMultiplexer) SentTransportParameters(tp *TransportParameters) {
for _, t := range m.tracers {
t.SentTransportParameters(tp)
}
}
func (m *connTracerMultiplexer) ReceivedTransportParameters(tp *TransportParameters) {
for _, t := range m.tracers {
t.ReceivedTransportParameters(tp)
}
}
func (m *connTracerMultiplexer) SentPacket(hdr *ExtendedHeader, size ByteCount, ack *AckFrame, frames []Frame) {
for _, t := range m.tracers {
t.SentPacket(hdr, size, ack, frames)
}
}
func (m *connTracerMultiplexer) ReceivedVersionNegotiationPacket(hdr *Header, versions []VersionNumber) {
for _, t := range m.tracers {
t.ReceivedVersionNegotiationPacket(hdr, versions)
}
}
func (m *connTracerMultiplexer) ReceivedRetry(hdr *Header) {
for _, t := range m.tracers {
t.ReceivedRetry(hdr)
}
}
func (m *connTracerMultiplexer) ReceivedPacket(hdr *ExtendedHeader, size ByteCount, frames []Frame) {
for _, t := range m.tracers {
t.ReceivedPacket(hdr, size, frames)
}
}
func (m *connTracerMultiplexer) BufferedPacket(typ PacketType) {
for _, t := range m.tracers {
t.BufferedPacket(typ)
}
}
func (m *connTracerMultiplexer) DroppedPacket(typ PacketType, size ByteCount, reason PacketDropReason) {
for _, t := range m.tracers {
t.DroppedPacket(typ, size, reason)
}
}
func (m *connTracerMultiplexer) UpdatedCongestionState(state CongestionState) {
for _, t := range m.tracers {
t.UpdatedCongestionState(state)
}
}
func (m *connTracerMultiplexer) UpdatedMetrics(rttStats *RTTStats, cwnd, bytesInFLight ByteCount, packetsInFlight int) {
for _, t := range m.tracers {
t.UpdatedMetrics(rttStats, cwnd, bytesInFLight, packetsInFlight)
}
}
func (m *connTracerMultiplexer) LostPacket(encLevel EncryptionLevel, pn PacketNumber, reason PacketLossReason) {
for _, t := range m.tracers {
t.LostPacket(encLevel, pn, reason)
}
}
func (m *connTracerMultiplexer) UpdatedPTOCount(value uint32) {
for _, t := range m.tracers {
t.UpdatedPTOCount(value)
}
}
func (m *connTracerMultiplexer) UpdatedKeyFromTLS(encLevel EncryptionLevel, perspective Perspective) {
for _, t := range m.tracers {
t.UpdatedKeyFromTLS(encLevel, perspective)
}
}
func (m *connTracerMultiplexer) UpdatedKey(generation KeyPhase, remote bool) {
for _, t := range m.tracers {
t.UpdatedKey(generation, remote)
}
}
func (m *connTracerMultiplexer) DroppedEncryptionLevel(encLevel EncryptionLevel) {
for _, t := range m.tracers {
t.DroppedEncryptionLevel(encLevel)
}
}
func (m *connTracerMultiplexer) DroppedKey(generation KeyPhase) {
for _, t := range m.tracers {
t.DroppedKey(generation)
}
}
func (m *connTracerMultiplexer) SetLossTimer(typ TimerType, encLevel EncryptionLevel, exp time.Time) {
for _, t := range m.tracers {
t.SetLossTimer(typ, encLevel, exp)
}
}
func (m *connTracerMultiplexer) LossTimerExpired(typ TimerType, encLevel EncryptionLevel) {
for _, t := range m.tracers {
t.LossTimerExpired(typ, encLevel)
}
}
func (m *connTracerMultiplexer) LossTimerCanceled() {
for _, t := range m.tracers {
t.LossTimerCanceled()
}
}
func (m *connTracerMultiplexer) Debug(name, msg string) {
for _, t := range m.tracers {
t.Debug(name, msg)
}
}
func (m *connTracerMultiplexer) Close() {
for _, t := range m.tracers {
t.Close()
}
}
|
newConnectionMultiplexer
|
CalendarScreen.js
|
import React, { useState, useEffect } from 'react';
import { useDispatch, useSelector } from 'react-redux';
import { Calendar, momentLocalizer } from 'react-big-calendar';
import moment from 'moment';
import 'moment/locale/es';
import 'react-big-calendar/lib/css/react-big-calendar.css';
import { messages } from 'helpers/calendar-messages';
import { uiOpenModal } from 'actions/ui';
import {
setActiveEvent,
clearActiveEvent,
eventStartLoading,
} from 'actions/events';
import Navbar from '../ui/Navbar';
import CalendarEvent from './CalendarEvent';
import CalendarModal from './CalendarModal';
import AddNewFab from 'components/ui/AddNewFab';
moment.locale('es');
const localizer = momentLocalizer(moment);
export default function
|
() {
const dispatch = useDispatch();
const { events } = useSelector((state) => state.calendar);
const [lastView, setLastView] = useState(
localStorage.getItem('lastView') || 'month'
);
const onSelectEvent = (event) => {
dispatch(setActiveEvent(event));
dispatch(uiOpenModal());
};
const onViewChange = (event) => {
setLastView(event);
localStorage.setItem('lastView', event);
};
const eventStyleGetter = (event, start, end, isSelected) => {
const style = {
backgroundColor: '#367CF7',
borderRadius: '0px',
opacity: 0.8,
display: 'block',
color: 'white',
};
return {
style,
};
};
const onSelectSlot = () => {
dispatch(clearActiveEvent());
};
useEffect(() => {
dispatch(eventStartLoading());
}, [dispatch]);
return (
<div className='calendar-screen'>
<Navbar />
<div className='calendar-main'>
<Calendar
localizer={localizer}
events={events}
startAccessor='start'
endAccessor='end'
messages={messages}
eventPropGetter={eventStyleGetter}
onSelectEvent={onSelectEvent}
onSelectSlot={onSelectSlot}
selectable={true}
onView={onViewChange}
view={lastView}
components={{ event: CalendarEvent }}
/>
<AddNewFab />
</div>
<CalendarModal />
</div>
);
}
|
CalendarScreen
|
mouse_dblclick.py
|
import pytest
from tests.actions.support.mouse import assert_move_to_coordinates, get_center
from tests.actions.support.refine import get_events, filter_dict
_DBLCLICK_INTERVAL = 640
# Using local fixtures because we want to start a new session between
# each test, otherwise the clicks in each test interfere with each other.
@pytest.fixture(autouse=True)
def release_actions(dblclick_session, request):
# release all actions after each test
# equivalent to a teardown_function, but with access to session fixture
request.addfinalizer(dblclick_session.actions.release)
@pytest.fixture
def dblclick_session(new_session, url, add_browser_capabilites):
_, session = new_session({"capabilities": {"alwaysMatch": add_browser_capabilites({})}})
session.url = url("/webdriver/tests/actions/support/test_actions_wdspec.html")
return session
@pytest.fixture
def mouse_chain(dblclick_session):
return dblclick_session.actions.sequence(
"pointer",
"pointer_id",
{"pointerType": "mouse"})
@pytest.mark.parametrize("click_pause", [0, 200])
def test_dblclick_at_coordinates(dblclick_session, mouse_chain, click_pause):
div_point = {
"x": 82,
"y": 187,
}
mouse_chain \
.pointer_move(div_point["x"], div_point["y"]) \
.click() \
.pause(click_pause) \
.click() \
.perform()
events = get_events(dblclick_session)
assert_move_to_coordinates(div_point, "outer", events)
expected = [
{"type": "mousedown", "button": 0},
{"type": "mouseup", "button": 0},
{"type": "click", "button": 0},
{"type": "mousedown", "button": 0},
{"type": "mouseup", "button": 0},
{"type": "click", "button": 0},
{"type": "dblclick", "button": 0},
]
assert len(events) == 8
filtered_events = [filter_dict(e, expected[0]) for e in events]
assert expected == filtered_events[1:]
def test_dblclick_with_pause_after_second_pointerdown(dblclick_session, mouse_chain):
outer = dblclick_session.find.css("#outer", all=False)
center = get_center(outer.rect)
mouse_chain \
.pointer_move(int(center["x"]), int(center["y"])) \
.click() \
.pointer_down() \
.pause(_DBLCLICK_INTERVAL + 10) \
.pointer_up() \
.perform()
events = get_events(dblclick_session)
expected = [
{"type": "mousedown", "button": 0},
{"type": "mouseup", "button": 0},
{"type": "click", "button": 0},
{"type": "mousedown", "button": 0},
{"type": "mouseup", "button": 0},
{"type": "click", "button": 0},
{"type": "dblclick", "button": 0},
]
assert len(events) == 8
filtered_events = [filter_dict(e, expected[0]) for e in events]
assert expected == filtered_events[1:]
def test_no_dblclick(dblclick_session, mouse_chain):
|
outer = dblclick_session.find.css("#outer", all=False)
center = get_center(outer.rect)
mouse_chain \
.pointer_move(int(center["x"]), int(center["y"])) \
.click() \
.pause(_DBLCLICK_INTERVAL + 10) \
.click() \
.perform()
events = get_events(dblclick_session)
expected = [
{"type": "mousedown", "button": 0},
{"type": "mouseup", "button": 0},
{"type": "click", "button": 0},
{"type": "mousedown", "button": 0},
{"type": "mouseup", "button": 0},
{"type": "click", "button": 0},
]
assert len(events) == 7
filtered_events = [filter_dict(e, expected[0]) for e in events]
assert expected == filtered_events[1:]
|
|
DialogCloseButton.tsx
|
import React, { useRef, cloneElement } from 'react'
import { useButton } from '@react-aria/button'
import { Cross2Icon } from '@radix-ui/react-icons'
import { useDialogContext } from './utils'
import { StyledCloseButton } from './styles'
import { DialogCloseButtonProps } from './types'
import { flattenChildren } from '@/utils/flattenChildren'
const DialogCloseButton = ({ children, css, element: Component = 'div', ...props }: DialogCloseButtonProps) => {
const { state, variant, onAction } = useDialogContext()
const closeButtonRef = useRef<HTMLButtonElement>()
const handlePress = () => {
onAction('x')
state.close()
}
const { buttonProps } = useButton({
onPress: () => handlePress()
}, closeButtonRef)
return (
<Component {...props}>
{children ? (
cloneElement(flattenChildren(children)[0], {
onPress: handlePress()
})
) : (
<StyledCloseButton {...buttonProps} ref={closeButtonRef}>
<Cross2Icon />
|
)}
</Component>
)
}
DialogCloseButton.displayName = 'DialogCloseButton'
export default DialogCloseButton
|
</StyledCloseButton>
|
ContextLogger.py
|
###############################################################################
# PyDial: Multi-domain Statistical Spoken Dialogue System Software
###############################################################################
#
# Copyright 2015 - 2017
# Cambridge University Engineering Department Dialogue Systems Group
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
'''
ContextLogger.py - wrapper for Python logging API
==========================================================================
Copyright CUED Dialogue Systems Group 2015 - 2017
**Relevant Config variables** [Default values]::
[logging]
screen_level=info
file_level=debug
file=logFileName.txt
usecolor = False
**Basic Usage**:
>>> from utils import ContextLogger
>>> ContextLogger.createLoggingHandlers()
>>> logger = ContextLogger.getLogger('Name')
then within any script issue debug, info, warning and error messages, eg
>>> logger.warning("String too long [%d]", 100)
issuing an error message generates ``ExceptionRaisedByLogger``.
Logger can if required be configured via a config section.
Then pass config info to ``createLoggingHandlers``
>>> ContextLogger.createLoggingHandlers(config)
************************
'''
__author__ = "cued_dialogue_systems_group"
import contextlib, logging, inspect, copy, sys, traceback, time
import os.path
# ----------------------------------------------
# Configure the standard Python logging API
# ----------------------------------------------
msg_format = '%(levelname)-7s:: %(asctime)s: %(name)4s %(message)s'
class NOcolors:
'''
ASCII escape chars just print junk when dumping logger output to file. Can use the config setting usecolor.
'''
HEADER = ''
OKBLUE = ''
OKGREEN = ''
WARNING = ''
FAIL = ''
ENDC = ''
BOLD = ''
CYAN = ''
MAGENTA = ''
class bcolors:
'''
Color specification for logger output.
'''
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = "\033[1m"
CYAN = '\033[96m'
MAGENTA = '\033[95m'
class ConsoleFormatter(logging.Formatter):
'''
Class to format logger output to console.
'''
def __init__(self,*args, **kwargs) :
#NB: import coloredlogs may also offer a solution
self.color_choice = bcolors() if kwargs['colors'] in [True, 'True'] else NOcolors()
del kwargs['colors']
kwargs['datefmt']='%H:%M:%S'
logging.Formatter.__init__(self, msg_format, *args, **kwargs)
self.mapping = {
logging.WARNING: self.color_choice.WARNING,
logging.ERROR: self.color_choice.FAIL,
logging.INFO: self.color_choice.OKGREEN,
logging.DEBUG: self.color_choice.OKBLUE,
25: self.color_choice.CYAN, # logging.DIAL
35: self.color_choice.MAGENTA # logging.RESULTS
}
def format(self, record):
record2 = copy.copy(record)
if record.levelno in self.mapping:
record2.levelname = self.mapping[record.levelno] + \
record.levelname.center(7) + self.color_choice.ENDC
# get actual message:
msg_split = record.msg.split('\n')
msg = '\n'.join(msg_split[1:])
#record2.msg = msg_split[0] + '\n' + self.color_choice.BOLD + msg + self.color_choice.ENDC
record2.msg = msg_split[0] + self.color_choice.BOLD + msg + self.color_choice.ENDC
try:
return super(ConsoleFormatter , self).format(record2)
except TypeError:
print('except TypeError: in ContextLogger.ConsoleFormatter(). Known minor issue with message format of logger')
# Note: this might be more serious - it may be stopping the individual module logging level specification...
cl = {} # current set of context loggers indexed by module name
module_level = {} # logging level for each logger in cl
def resetLoggingHandlers():
top_logger = logging.getLogger('')
top_logger.handlers = []
def createLoggingHandlers(config=None, screen_level = "INFO", \
log_file = None, file_level = "DEBUG", use_color = True):
"""
Create a top level logger and configure logging handlers
:param config: a config structure as returned by the std ConfigParser |.|
:param screen_level: default screen logging level if no config |.|
:type screen_level: str
:param log_file: default log file if no config |.|
:type log_file: str
:param file_level: default file logging level if no config
:type file_level: str
:returns: None
.. note::
Valid logging levels are "DEBUG", "INFO", "WARNING", "ERROR"
|
top_logger = logging.getLogger('')
top_logger.setLevel(logging.DEBUG)
# levels for logging
file_append = False
if config:
if config.has_option("logging", "file") :
log_file = config.get("logging", "file")
if config.has_option("logging", "file_level") :
file_level = config.get("logging", "file_level").upper()
if config.has_option("logging", "file_append") :
file_append = config.get("logging", "file_append").upper()
if config.has_option("logging", "screen_level") :
screen_level = config.get("logging", "screen_level").upper()
if config.has_option("logging", "usecolor"):
use_color = config.get("logging", "usecolor")
for option in config.options('logging'):
if option not in ['usecolor','file', 'file_level', 'screen_level'] and option not in config.defaults():
logger_name = option.lower()
module_level[logger_name] = config.get('logging', option)
if logger_name in cl:
cl[logger_name].setLevel(module_level[logger_name])
# configure console output:
"""
There was a problem with dumping logger output to file - print() statements and logger comments get separated.
StreamHandler now sends to sys.stdout
"""
logging.addLevelName(25, "DIAL")
logging.addLevelName(35, "RESULTS")
ch = logging.StreamHandler(sys.stdout) # NB: originally took no arguments
if screen_level == "DIAL":
ch.setLevel(25)
elif screen_level == "RESULTS":
ch.setLevel(35)
else:
ch.setLevel(getattr(logging, screen_level.upper()))
ch.setFormatter(ConsoleFormatter(colors=use_color))
# add the handlers to logger
top_logger.addHandler(ch)
# configure file output:
if log_file :
# check that log file directory exists and if necessary create it
dname = os.path.dirname(log_file)
if not os.path.isdir(dname) and dname != '':
try:
os.mkdir(dname)
except OSError:
top_logger.error("Logging directory {} cannot be created.".format(dname))
raise
# create file handler which logs even debug messages
formatter = logging.Formatter(msg_format, datefmt='%H:%M:%S',)
file_mode = 'w'
if file_append:
file_mode = 'a'
fh = logging.FileHandler(log_file, mode=file_mode)
if file_level.upper() == 'DIAL':
lvl = 25
elif file_level.upper() == 'RESULTS':
lvl = 35
else:
lvl = getattr(logging, file_level.upper())
fh.setLevel(lvl)
fh.setFormatter(formatter)
top_logger.addHandler(fh)
# ----------------------------------------------
# Interface to the standard Python logging API
# ----------------------------------------------
class ExceptionRaisedByLogger(Exception) :
pass
class ContextLogger:
"""
Wrapper for Python logging class.
"""
def __init__(self, module_name=None, *args):
self.logger = logging.getLogger(module_name)
self.stack = args
self._log = []
sys.excepthook = self._exceptHook
def setLevel(self, level):
"""
Set the logging level of this logger.
:param level: default screen logging level if no config
:type level: str
:returns: None
"""
self.logger.setLevel(getattr(logging, level.upper()))
def _exceptHook(self, etype, value, tb) :
if etype != ExceptionRaisedByLogger :
msg = self._convertMsg("Uncaught exception: "+str(etype) + "( "+str(value)+" )\n")
tb_msg = "".join( traceback.format_exception(etype, value, tb))
tb_msg = "\n".join([(" "*10)+line for line in tb_msg.split("\n")])
msg += tb_msg
self.logger.error(msg)
sys.__excepthook__(etype, value, tb)
@contextlib.contextmanager
def addContext(self, *args) :
"""
Create a nested named context for use in a ``with`` statement.
:param args: list of one or more context names (str)
:returns: ContextManager
Example:
>>> with mylogger.addContext("Session 1") :
... mylogger.warning("Warn Message from Session 1")
"""
n = len(self.stack)
self.stack += args
yield self.stack
self.stack = self.stack[:n]
@contextlib.contextmanager
def addTimedContext(self, *args) :
"""
Create a timed nested named context for use in a ``with`` statement.
:param args: list of one or more context names (str)
:returns: ContextManager
Example:
>>> with mylogger.addContext("Session 1") :
... Dostuff()
On exit from the ``with`` statement, the elapsed time is logged.
"""
t0 = time.time()
n = len(self.stack)
self.stack += args
yield self.stack
t1 = time.time()
self.info("Timer %.4fs"%(t1-t0))
self.stack = self.stack[:n]
def _callLocString(self, ):
inspected = inspect.getouterframes(inspect.currentframe())
frame, filename, line_number, function_name, lines, index = inspected[min(3, len(inspected)-1)]
filename = filename.split("/")[-1]
return filename + ":" + function_name + ">" + str(line_number)
def _stackString(self) :
if len(self.stack) == 0:
return ""
return "(" + ", ".join(map(str, self.stack)) + "): "
def _convertMsg(self, msg) :
#return self._callLocString() + ": " + self._stackString() + "\n "+msg
s = self._callLocString().split(':')
calls = s[0][0:30]+" <"+s[1][0:30]
stacks = self._stackString()
return "%62s : %s %s" % (calls,stacks,msg)
def debug(self,msg,*args,**kwargs):
"""
Log a DEBUG message.
:param msg: message string
:type msg: formatted-str
:param args: args to formatted message string if any
:returns: None
"""
msg = self._convertMsg(msg)
self.logger.debug(msg,*args,**kwargs)
def info(self,msg,*args,**kwargs):
""" Log an INFO message.
:param msg: message string
:type msg: formatted-str
:param args: args to formatted message string if any
:returns: None
"""
msg = self._convertMsg(msg)
self.logger.info(msg,*args,**kwargs)
def warning(self,msg,*args,**kwargs):
"""
Log a WARNING message.
:param msg: message string
:type msg: formatted-str
:param args: args to formatted message string if any
:returns: None
"""
msg = self._convertMsg(msg)
self.logger.warning(msg,*args,**kwargs)
def error(self,msg,*args,**kwargs):
"""
Log an ERROR message.
:param msg: message string
:type msg: formatted-str
:param args: args to formatted message string if any
:returns: None
.. note::
Issuing an error message also raises exception ``ExceptionRaisedByLogger``
"""
msg0 = msg
msg = self._convertMsg(msg)
self.logger.error(msg,*args,**kwargs)
raise ExceptionRaisedByLogger(msg0)
def dial(self, msg, *args, **kwargs):
msg = self._convertMsg(msg)
self.logger.log(25,msg,*args,**kwargs)
def results(self, msg, *args, **kwargs):
msg = self._convertMsg(msg)
self.logger.log(35,msg,*args,**kwargs)
def getLogger(name):
"""
Retrieve or if necessary create a context logger with specified name.
:param name: name of logger to create or retrieve
:type name: str
:returns: logger (ContextLogger.ContextLogger)
.. note::
Use **only** this function to create instances of the ContextLogger class
"""
global cl
name = name.lower()
if name not in cl:
cl[name] = ContextLogger(name)
if name in module_level:
cl[name].setLevel(module_level[name])
return cl[name]
if __name__ == '__main__':
# creates upside down traffic lights
createLoggingHandlers()
cl = ContextLogger(__name__)
cl.info("starting test")
with cl.addContext("session 1") :
cl.warning("warning!")
try :
cl.error("error")
except ExceptionRaisedByLogger :
cl.info("ignoring the exception raised by the logger")
with cl.addContext("session 2"):
# try raising an exception
x = {}
print(x["door"])
|
"""
global cl
global module_level
|
QuestQuery.ts
|
import { BaseFilter, BaseQuery, ParamAllocator } from '@cubejs-backend/schema-compiler';
const GRANULARITY_TO_INTERVAL: Record<string, string> = {
second: 's',
minute: 'm',
hour: 'h',
day: 'd',
month: 'M',
year: 'Y'
};
class QuestParamAllocator extends ParamAllocator {
public paramPlaceHolder(paramIndex: number) {
return `$${paramIndex + 1}`;
}
}
class QuestFilter extends BaseFilter {
public orIsNullCheck(column: string, not: string): string {
return `${this.shouldAddOrIsNull(not) ? ` OR ${column} = NULL` : ''}`;
}
public setWhere(column: string): string {
return `${column} != NULL`;
}
public notSetWhere(column: string): string {
return `${column} = NULL`;
}
}
export class
|
extends BaseQuery {
public newFilter(filter: any) {
return new QuestFilter(this, filter);
}
public newParamAllocator(): ParamAllocator {
return new QuestParamAllocator();
}
public concatStringsSql(strings: string[]): string {
return `concat(${strings.join(', ')})`;
}
public convertTz(field: string): string {
return `to_timezone(${field}, '${this.timezone}')`;
}
public timeStampCast(value: string) {
return value;
}
public dateTimeCast(value: string) {
return value;
}
public subtractInterval(date: string, interval: string): string {
const [number, type] = this.parseInterval(interval);
return `dateadd('${type}', ${-number}, ${date})`;
}
public addInterval(date: string, interval: string): string {
const [number, type] = this.parseInterval(interval);
return `dateadd('${type}', ${number}, ${date})`;
}
public unixTimestampSql(): string {
// QuestDB's now() function returns epoch timestamp with microsecond granularity.
return 'now() / 1000000';
}
public timeGroupedColumn(granularity: string, dimension: string): string {
const interval = GRANULARITY_TO_INTERVAL[granularity];
if (interval === undefined) {
throw new Error(`${granularity} granularity is not supported`);
}
return `timestamp_floor('${GRANULARITY_TO_INTERVAL[granularity]}', ${dimension})`;
}
public dimensionsJoinCondition(leftAlias: string, rightAlias: string): string {
const dimensionAliases = this.dimensionAliasNames();
if (!dimensionAliases.length) {
return '1 = 1';
}
return dimensionAliases
.map(alias => `(${leftAlias}.${alias} = ${rightAlias}.${alias} OR (${leftAlias}.${alias} = NULL AND ${rightAlias}.${alias} = NULL))`)
.join(' AND ');
}
public renderSqlMeasure(name: string, evaluateSql: string, symbol: any, cubeName: string, parentMeasure: string): string {
// QuestDB doesn't support COUNT(column_name) syntax.
// COUNT() or COUNT(*) should be used instead.
if (symbol.type === 'count') {
return 'count(*)';
}
return super.renderSqlMeasure(name, evaluateSql, symbol, cubeName, parentMeasure);
}
public primaryKeyCount(cubeName: string, distinct: boolean): string {
const primaryKeys: string[] = this.cubeEvaluator.primaryKeys[cubeName];
const primaryKeySql = primaryKeys.length > 1 ?
this.concatStringsSql(primaryKeys.map((pk) => this.castToString(this.primaryKeySql(pk, cubeName)))) :
this.primaryKeySql(primaryKeys[0], cubeName);
if (distinct) {
return `count_distinct(${primaryKeySql})`;
} else {
return 'count(*)';
}
}
public orderHashToString(hash: any): string | null {
// QuestDB has partial support for order by index column, so map these to the alias names.
// So, instead of:
// SELECT col_a as "a", col_b as "b" FROM tab ORDER BY 2 ASC
//
// the query should be:
// SELECT col_a as "a", col_b as "b" FROM tab ORDER BY "b" ASC
if (!hash || !hash.id) {
return null;
}
const fieldAlias = this.getFieldAlias(hash.id);
if (fieldAlias === null) {
return null;
}
const direction = hash.desc ? 'DESC' : 'ASC';
return `${fieldAlias} ${direction}`;
}
private getFieldAlias(id: string): string | null {
const equalIgnoreCase = (a: any, b: any) => (
typeof a === 'string' && typeof b === 'string' && a.toUpperCase() === b.toUpperCase()
);
let field;
field = this.dimensionsForSelect().find(
(d: any) => equalIgnoreCase(d.dimension, id),
);
if (field) {
return field.aliasName();
}
field = this.measures.find(
(d: any) => equalIgnoreCase(d.measure, id) || equalIgnoreCase(d.expressionName, id),
);
if (field) {
return field.aliasName();
}
return null;
}
public groupByClause(): string {
// QuestDB doesn't support group by index column, so map these to the alias names.
// So, instead of:
// SELECT col_a as "a", count() as "c" FROM tab GROUP BY 1
//
// the query should be:
// SELECT col_a as "a", count() as "c" FROM tab GROUP BY "a"
if (this.ungrouped) {
return '';
}
const names = this.dimensionAliasNames();
return names.length ? ` GROUP BY ${names.join(', ')}` : '';
}
}
|
QuestQuery
|
conf.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# aioredis_timeseries documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import aioredis_timeseries
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Redis Timeseries'
copyright = u"2017, Ryan Anguiano"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = aioredis_timeseries.__version__
# The full version, including alpha/beta/rc tags.
release = aioredis_timeseries.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
|
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'aioredis_timeseriesdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'aioredis_timeseries.tex',
u'Redis Timeseries Documentation',
u'Ryan Anguiano', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'aioredis_timeseries',
u'Redis Timeseries Documentation',
[u'Ryan Anguiano'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'aioredis_timeseries',
u'Redis Timeseries Documentation',
u'Ryan Anguiano',
'aioredis_timeseries',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
#default_role = None
|
BEP20Token.ts
|
import BEP20 from "@coreproject/networks";
|
import Wallet from "@coreproject/wallets";
import { getTransactionAndPrices } from "../../services/unmarshalServices";
export class BEP20Token extends Token {
_contract: Contract; // ether contract
constructor(
name: string,
symbol: string,
decimals: string,
iconURL: string,
address: string,
abi: Array<object>,
network: BEP20){
super(
name,
symbol,
decimals,
iconURL,
address,
abi,
network
);
this._contract = new ethers.Contract(address, abi, network.getProvider());
}
async call(nameFunction: string, ...args: any): Promise<any> {
return await this._contract.functions[nameFunction](args);
}
async balanceOf(address: string): Promise<string> {
const result = await this._contract.functions.balanceOf(address);
return ethers.utils.formatUnits(result[0], this.decimals);
}
async createTransferOrder(
wallet: Wallet,
to: string,
value: string
): Promise<_type.BEP20TransferTransactionRequest> {
const transactionValue = ethers.utils.parseUnits(value.toString(), this.decimals);
wallet = this.network.getSigner(wallet.provider);
const transaction = await this._contract.populateTransaction.transfer(to, transactionValue);
const transactionRequest = await wallet.populateTransaction(transaction);
return {
from: transactionRequest.from,
to: to,
contractAddress: transactionRequest.to,
nonce: transactionRequest.nonce,
value: transactionValue.toString(),
data: transactionRequest.data.toString(),
gasPrice: BigNumber.from(transactionRequest.gasPrice).toString(),
gasLimit: BigNumber.from(transactionRequest.gasLimit).toString(),
gasFee: BigNumber.from(transactionRequest.gasPrice).mul(transactionRequest.gasLimit).toString(),
};
}
async transfer(
wallet: Wallet,
transferTransactionRequest: _type.BEP20TransferTransactionRequest,
): Promise<_type.BEP20TransferTransactionResponse> {
const transactionRequest = {
from: transferTransactionRequest.from,
to: transferTransactionRequest.contractAddress,
nonce: Number.parseInt(transferTransactionRequest.nonce),
data: transferTransactionRequest.data?.toString(),
gasPrice: BigNumber.from(transferTransactionRequest.gasPrice.toString()),
gasLimit: BigNumber.from(transferTransactionRequest.gasLimit.toString()),
};
wallet = this.network.getSigner(wallet.provider);
const transactionResponse = await wallet.sendTransaction(transactionRequest);
return {
tx: transactionResponse.hash,
from: transactionResponse.from,
to: transferTransactionRequest.to,
nonce: transactionResponse.nonce,
value: transferTransactionRequest.value.toString(),
data: transactionResponse.data.toString(),
gasPrice: transactionResponse.gasPrice.toString(),
gasLimit: transactionResponse.gasLimit.toString(),
gasFee: BigNumber.from(transferTransactionRequest.gasPrice)
.mul(transactionResponse.gasLimit).toString(),
};
}
async getTransactions(walletAddress: string): Promise<void> {
return await getTransactionAndPrices(this.network.symbol, walletAddress, this.address);
}
async allowance(walletAddress: string, spenderAddress: string): Promise<string> {
const data = await this._contract.allowance(walletAddress, spenderAddress);
return data.toString();
}
async approve(
wallet: Wallet,
spenderAddress: string,
amount = "1000000000000000000"
): Promise<string> {
wallet = this.network.getSigner(wallet.provider);
const amountApprove = ethers.utils.parseUnits(amount.toString(), this.decimals).toString();
const contract = this._contract.connect(wallet);
const transRequest = await contract.approve(spenderAddress, amountApprove);
await transRequest.wait();
return amountApprove.toString();
}
}
|
import * as _type from "./types";
import { BigNumber, Contract, ethers } from "ethers";
import { Token } from "./Token";
|
combinations_test.go
|
package problem
import (
"reflect"
"testing"
)
// TestCombine 测试组合
func TestComb
|
ing.T) {
type args struct {
n int
k int
}
tests := []struct {
name string
args args
want [][]int
}{
{
name: "01",
args: args{
n: 4,
k: 2,
},
want: [][]int{
{1, 2},
{1, 3},
{1, 4},
{2, 3},
{2, 4},
{3, 4},
},
},
{
name: "02",
args: args{
n: 1,
k: 1,
},
want: [][]int{
{1},
},
},
}
for _, tt := range tests {
t.Run(
tt.name, func(t *testing.T) {
if got := combine(tt.args.n, tt.args.k); !reflect.DeepEqual(got, tt.want) {
t.Errorf("combine() = %v, want %v", got, tt.want)
}
},
)
}
}
|
ine(t *test
|
host.go
|
package v2
import (
"github.com/gin-gonic/gin"
"github.com/wujie1993/waves/pkg/controller"
"github.com/wujie1993/waves/pkg/orm/v2"
)
type HostController struct {
controller.BaseController
}
// @summary 获取所有主机
|
// @failure 500 {object} controller.Response
// @router /api/v2/hosts [get]
func (c *HostController) GetHosts(ctx *gin.Context) {
c.List(ctx)
}
// @summary 获取单个主机
// @tags Host
// @produce json
// @accept json
// @param name path string true "主机名称"
// @success 200 {object} controller.Response{Data=v2.Host}
// @failure 500 {object} controller.Response
// @router /api/v2/hosts/{name} [get]
func (c *HostController) GetHost(ctx *gin.Context) {
c.Get(ctx)
}
// @summary 创建单个主机
// @tags Host
// @produce json
// @accept json
// @param body body v2.Host true "主机信息"
// @success 200 {object} controller.Response{Data=v2.Host}
// @failure 500 {object} controller.Response
// @router /api/v2/hosts [post]
func (c *HostController) PostHost(ctx *gin.Context) {
c.Create(ctx)
}
// @summary 更新单个主机
// @tags Host
// @produce json
// @accept json
// @param name path string true "主机名称"
// @param body body v2.Host true "主机信息"
// @success 200 {object} controller.Response{Data=v2.Host}
// @failure 500 {object} controller.Response
// @router /api/v2/hosts/{name} [put]
func (c *HostController) PutHost(ctx *gin.Context) {
c.Update(ctx)
}
// @summary 删除单个主机
// @tags Host
// @produce json
// @accept json
// @param name path string true "主机名称"
// @success 200 {object} controller.Response{Data=v2.Host}
// @failure 500 {object} controller.Response
// @router /api/v2/hosts/{name} [delete]
func (c *HostController) DeleteHost(ctx *gin.Context) {
c.Delete(ctx)
}
func NewHostController() HostController {
return HostController{
BaseController: controller.NewController(v2.NewHostRegistry()),
}
}
|
// @tags Host
// @produce json
// @accept json
// @success 200 {object} controller.Response{Data=[]v2.Host}
|
EntityRelationshipModelToDatabaseCodeConverter.unit.spec.ts
|
import {EntityRelationshipModel} from '@/erdiagram/parser/types/entity-relationship-model-types';
import {DatabaseModel} from '@/erdiagram/converter/database/model/database-model-types';
import DatabaseModelGenerator from '@/erdiagram/converter/database/model/DatabaseModelGenerator';
import EntityRelationshipModelToDatabaseCodeConverter
from '@/erdiagram/converter/database/code-converter/EntityRelationshipModelToDatabaseCodeConverter';
test('convertToCode() calls dependencies', () => {
const mockValues = {
entityRelationshipModel: <EntityRelationshipModel>{
entities: [],
relationships: []
},
databaseModel: <DatabaseModel>{
tables: []
},
outputCode: 'Output_code'
}
const databaseModelGeneratorMock = {
generateDatabaseModel: jest.fn(() => mockValues.databaseModel)
};
const convertToCodeMockFunction = jest.fn(() => mockValues.outputCode);
const databaseModelToCodeConverterMock = {
convertToCode: convertToCodeMockFunction
};
const entityRelationshipModelToDatabaseCodeConverter = new EntityRelationshipModelToDatabaseCodeConverter(
databaseModelGeneratorMock as unknown as DatabaseModelGenerator,
databaseModelToCodeConverterMock
);
|
const result = entityRelationshipModelToDatabaseCodeConverter.convertToCode(mockValues.entityRelationshipModel);
expect(result).toBe(mockValues.outputCode);
const generateDatabaseModelCalls = databaseModelGeneratorMock.generateDatabaseModel.mock.calls;
expect(generateDatabaseModelCalls.length).toBe(1);
const generateDatabaseModelCallArgs = generateDatabaseModelCalls[0] as any[];
expect(generateDatabaseModelCallArgs[0]).toBe(mockValues.entityRelationshipModel);
const convertToCodeCalls = databaseModelToCodeConverterMock.convertToCode.mock.calls;
expect(convertToCodeCalls.length).toBe(1);
const convertToCodeCallArgs = convertToCodeCalls[0] as any[];
expect(convertToCodeCallArgs[0]).toBe(mockValues.databaseModel);
});
| |
mount_test.go
|
package test
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/hanwen/go-fuse/fuse"
"github.com/hanwen/go-fuse/fuse/nodefs"
"github.com/hanwen/go-fuse/fuse/pathfs"
)
func TestMountOnExisting(t *testing.T) {
ts := NewTestCase(t)
defer ts.Cleanup()
err := os.Mkdir(ts.mnt+"/mnt", 0777)
if err != nil {
t.Fatalf("Mkdir failed: %v", err)
}
nfs := nodefs.NewDefaultNode()
code := ts.connector.Mount(ts.rootNode(), "mnt", nfs, nil)
if code != fuse.EBUSY {
t.Fatal("expect EBUSY:", code)
}
err = os.Remove(ts.mnt + "/mnt")
if err != nil {
t.Fatalf("Remove failed: %v", err)
}
code = ts.connector.Mount(ts.rootNode(), "mnt", nfs, nil)
if !code.Ok() {
t.Fatal("expect OK:", code)
}
code = ts.pathFs.Unmount("mnt")
if !code.Ok() {
t.Errorf("Unmount failed: %v", code)
}
}
func
|
(t *testing.T) {
ts := NewTestCase(t)
defer ts.Cleanup()
fs := pathfs.NewPathNodeFs(pathfs.NewLoopbackFileSystem(ts.orig), nil)
code := ts.connector.Mount(ts.rootNode(), "mnt", fs.Root(), nil)
if !code.Ok() {
t.Fatal("mount should succeed")
}
err := os.Rename(ts.mnt+"/mnt", ts.mnt+"/foobar")
if fuse.ToStatus(err) != fuse.EBUSY {
t.Fatal("rename mount point should fail with EBUSY:", err)
}
ts.pathFs.Unmount("mnt")
}
func TestMountReaddir(t *testing.T) {
ts := NewTestCase(t)
defer ts.Cleanup()
fs := pathfs.NewPathNodeFs(pathfs.NewLoopbackFileSystem(ts.orig), nil)
code := ts.connector.Mount(ts.rootNode(), "mnt", fs.Root(), nil)
if !code.Ok() {
t.Fatal("mount should succeed")
}
entries, err := ioutil.ReadDir(ts.mnt)
if err != nil {
t.Fatalf("ReadDir failed: %v", err)
}
if len(entries) != 1 || entries[0].Name() != "mnt" {
t.Error("wrong readdir result", entries)
}
ts.pathFs.Unmount("mnt")
}
func TestRecursiveMount(t *testing.T) {
ts := NewTestCase(t)
defer ts.Cleanup()
err := ioutil.WriteFile(ts.orig+"/hello.txt", []byte("blabla"), 0644)
if err != nil {
t.Fatalf("WriteFile failed: %v", err)
}
fs := pathfs.NewPathNodeFs(pathfs.NewLoopbackFileSystem(ts.orig), nil)
code := ts.connector.Mount(ts.rootNode(), "mnt", fs.Root(), nil)
if !code.Ok() {
t.Fatal("mount should succeed")
}
submnt := ts.mnt + "/mnt"
_, err = os.Lstat(submnt)
if err != nil {
t.Fatalf("Lstat failed: %v", err)
}
_, err = os.Lstat(filepath.Join(submnt, "hello.txt"))
if err != nil {
t.Fatalf("Lstat failed: %v", err)
}
f, err := os.Open(filepath.Join(submnt, "hello.txt"))
if err != nil {
t.Fatalf("Open failed: %v", err)
}
t.Log("Attempting unmount, should fail")
code = ts.pathFs.Unmount("mnt")
if code != fuse.EBUSY {
t.Error("expect EBUSY")
}
f.Close()
t.Log("Waiting for kernel to flush file-close to fuse...")
time.Sleep(testTtl)
t.Log("Attempting unmount, should succeed")
code = ts.pathFs.Unmount("mnt")
if code != fuse.OK {
t.Error("umount failed.", code)
}
}
func TestDeletedUnmount(t *testing.T) {
ts := NewTestCase(t)
defer ts.Cleanup()
submnt := filepath.Join(ts.mnt, "mnt")
pfs2 := pathfs.NewPathNodeFs(pathfs.NewLoopbackFileSystem(ts.orig), nil)
code := ts.connector.Mount(ts.rootNode(), "mnt", pfs2.Root(), nil)
if !code.Ok() {
t.Fatal("Mount error", code)
}
f, err := os.Create(filepath.Join(submnt, "hello.txt"))
if err != nil {
t.Fatalf("Create failed: %v", err)
}
t.Log("Removing")
err = os.Remove(filepath.Join(submnt, "hello.txt"))
if err != nil {
t.Fatalf("Remove failed: %v", err)
}
t.Log("Removing")
_, err = f.Write([]byte("bla"))
if err != nil {
t.Fatalf("Write failed: %v", err)
}
code = ts.pathFs.Unmount("mnt")
if code != fuse.EBUSY {
t.Error("expect EBUSY for unmount with open files", code)
}
f.Close()
time.Sleep((3 * testTtl) / 2)
code = ts.pathFs.Unmount("mnt")
if !code.Ok() {
t.Error("should succeed", code)
}
}
func TestDefaultNodeMount(t *testing.T) {
dir, err := ioutil.TempDir("", "go-fuse")
if err != nil {
t.Fatalf("TempDir: %v", err)
}
defer os.RemoveAll(dir)
root := nodefs.NewDefaultNode()
s, conn, err := nodefs.MountRoot(dir, root, nil)
if err != nil {
t.Fatalf("MountRoot: %v", err)
}
go s.Serve()
defer s.Unmount()
if err := conn.Mount(root.Inode(), "sub", nodefs.NewDefaultNode(), nil); !err.Ok() {
t.Fatalf("Mount: %v", err)
}
if entries, err := ioutil.ReadDir(dir); err != nil {
t.Fatalf("ReadDir: %v", err)
} else if len(entries) != 1 {
t.Fatalf("got %d entries", len(entries))
} else if entries[0].Name() != "sub" {
t.Fatalf("got %q, want %q", entries[0].Name(), "sub")
}
}
|
TestMountRename
|
cvc_clinic_db.py
|
from deeply.datasets.util import image_mask
from tensorflow_datasets.core import (
Version,
GeneratorBasedBuilder
)
_DATASET_HOMEPAGE = "https://polyp.grand-challenge.org/CVCClinicDB/"
_DATASET_KAGGLE = "achillesrasquinha/cvcclinicdb"
_DATASET_DESCRIPTION = """
CVC-ClinicDB is a database of frames extracted from colonoscopy videos. These frames contain several examples of polyps. In addition to the frames, we provide the ground truth for the polyps. This ground truth consists of a mask corresponding to the region covered by the polyp in the image
"""
_DATASET_CITATION = """\
Bernal, J., Sánchez, F. J., Fernández-Esparrach, G., Gil, D., Rodríguez, C., & Vilariño, F. (2015). WM-DOVA maps for accurate polyp highlighting in colonoscopy: Validation vs. saliency maps from physicians. Computerized Medical Imaging and Graphics, 43, 99-111
"""
class CVCClinicDB(GeneratorBasedBuilder):
"""
The CVC-ClinicDB Dataset.
"""
VERSION = Version("1.0.0")
RELEASE_NOTES = {
"1.0.0": "Initial Release"
}
def _info(self, *args, **kwargs):
return image_mask._info(self,
description = _DATASET_DESCRIPTION,
|
*args, **kwargs
)
def _split_generators(self, *args, **kwargs):
return image_mask._split_generators(self, kaggle = _DATASET_KAGGLE, *args, **kwargs)
def _generate_examples(self, *args, **kwargs):
return image_mask._generate_examples(self, *args, **kwargs)
|
homepage = _DATASET_HOMEPAGE,
citation = _DATASET_CITATION,
|
starlink_oem.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import logging
import zipfile
from typing import List, IO, Tuple
from datetime import datetime
import arcade.models.cos as cos
import arcade.models.graph as graph
from arcade.importers.cos_oem.cos_oem import (BaseOEMCOSImporter,
OEMData, EphemerisLine)
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
logger = logging.getLogger(__name__)
class StarlinkOEMCOSImporter(BaseOEMCOSImporter):
"""A class for fetching OEM data from the Starlink constellation in cloud
object storage and loading it into neo4j.
:param oem_bucket: The COS bucket where the OEM files are stored
"""
def __init__(self, oem_bucket: cos.COSBucket) -> None:
super().__init__(oem_bucket,
data_source_name='Starlink - OEM',
oem_file_fmt='[0-9]{20}.oem',
data_source_public=True)
def _convert_header_time(self, time_str: str) -> str:
"""Converts the time strings in the header of the Starlink OEM files
into the standard format used in the graph.
:param time_str: The time string to conver
:return: The normalized time string
"""
input_time_fmt = '%Y-%m-%d %H:%M:%S %Z'
output_time_fmt = '%Y-%m-%dT%H:%M:%S'
dt_obj = datetime.strptime(time_str.strip(), input_time_fmt)
return dt_obj.strftime(output_time_fmt)
def _convert_ephem_time(self, time_str: str) -> str:
"""Converts the epoch time strings in the ephemeris lines of the
Starlink OEM files into the standard format used in the graph.
:param time_str: The time string to conver
:return: The normalized time string
"""
input_time_fmt = '%Y%j%H%M%S.%f'
output_time_fmt = '%Y-%m-%dT%H:%M:%S.%f'
dt_obj = datetime.strptime(time_str.strip(), input_time_fmt)
return dt_obj.strftime(output_time_fmt)
def
|
(self,
zip_file: zipfile.ZipFile,
oem_file_name: str) -> OEMData:
"""Parses the OEM data in text file contained in the passed zip
archive.
:param zip_file: The zip archive containing the OEM text files
:param oem_file_name: The text file in the zip archive to parse
return: The parsed OEM data
"""
ephemeris_lines: List[EphemerisLine] = []
# Message data not contained in the OEM files
oem_data: OEMData = {
'originator': 'Starlink',
'center_name': 'EARTH',
'ref_frame': 'EME2000',
'time_system': 'UTC'
}
with io.TextIOWrapper(zip_file.open(oem_file_name),
encoding='utf8') as oem_file:
for line_no, line in enumerate(oem_file):
if len(line.strip()) == 0:
break
# Header information is on the first 2 lines of the file
if line_no == 0:
ts = line[8:]
oem_data['creation_date'] = self._convert_header_time(ts)
elif line_no == 1:
start = line[16:39]
stop = line[55:78]
oem_data['start_time'] = self._convert_header_time(start)
oem_data['stop_time'] = self._convert_header_time(stop)
else:
# The state vectors are on every 4th line
if not line_no % 4 == 0:
continue
ephem_data = line.split(' ')
epoch = self._convert_ephem_time(ephem_data[0])
state_vector = [float(s) for s in ephem_data[1:]]
ephemeris_line: EphemerisLine
ephemeris_line = dict(epoch=epoch,
state_vector=state_vector)
ephemeris_lines.append(ephemeris_line)
oem_data['ephemeris_lines'] = ephemeris_lines
return oem_data
def _get_aso_id_name(self, file_name: str) -> Tuple[str, str]:
"""Gets the Starlink satellite's name and NORAD ID from the text
file name.
:param file_name: The name of the text file containing the OEM data
:return: The NORAD ID and name of the satellite
"""
data_parts = file_name.split('_')
aso_id = data_parts[1]
object_name = data_parts[2]
return aso_id, object_name
def _process_fileobj(self,
fileobj: IO[bytes],
object_node: graph.COSObject) -> None:
"""Extracts and parses the OEM data from the given tar archive file.
:param tar_file_obj: The file object of the tar archive to extract OEM
data out of
:param object_node: The node in the graph representing the COS object
the OEM is stored in
"""
with zipfile.ZipFile(fileobj) as zip_file:
txt_file_names = [f for f in zip_file.namelist()
if f.endswith('.txt')]
for txt_file_name in txt_file_names:
oem_data = self._parse_oem_data(zip_file, txt_file_name)
aso_id, object_name = self._get_aso_id_name(txt_file_name)
oem_data['object_name'] = object_name
self._save_oem(oem_data, aso_id, object_node)
|
_parse_oem_data
|
mixed-data.js
|
import Dump from '@wesbos/dump'
import axios from 'axios'
import React, { useEffect, useState } from 'react'
import styled from 'styled-components'
import { CryptoCard } from '../components/crypto-card'
import { Layout } from '../components/layout'
import { useTimeDifference } from '../hooks/use-time-difference'
export const autoGrid = (minColumnWidth = 250, gridGap = 0) => ({
display: 'grid',
gridTemplateColumns: `repeat(auto-fill, minmax(${minColumnWidth}px,1fr))`,
gridGap,
})
const Gallery = styled.div({
...autoGrid(220, 20),
})
export default ({ data }) => {
const { data: coinData } = data.coinloreCoinlore
const timeDifference = useTimeDifference()
const [runTimeData, setRunTimeData] = useState({})
const [loading, setLoading] = useState(true)
useEffect(() => {
async function
|
() {
const res = await axios(
'https://api.coinlore.com/api/tickers/?start=1&limit=10'
)
const { data } = res.data
setRunTimeData(data)
setLoading(false)
}
getRunTimeData()
}, [])
const timeSinceBuild = 3
if (loading && timeDifference > timeSinceBuild)
return (
<Layout>
<p>Build-time data stale fetching new data...</p>
</Layout>
)
let coinloreData = {}
if (timeDifference > timeSinceBuild) {
// use runtime data
coinloreData = runTimeData
} else {
// use build time data
coinloreData = coinData
}
return (
<Layout>
<p>Minutes since build: {timeDifference}</p>
<Gallery>
{coinloreData.map(coin => (
<CryptoCard
id={coin.id}
symbol={coin.symbol}
name={coin.name}
price={coin.price_usd}
pcChange1h={coin.percent_change_1h}
pcChange24h={coin.percent_change_24h}
pcChange7d={coin.percent_change_7d}
/>
))}
</Gallery>
<Dump data={coinloreData} />
</Layout>
)
}
export const GatsbyQuery = graphql`
query CoinloreQuery {
coinloreCoinlore {
data {
symbol
name
nameid
rank
price_usd
percent_change_24h
percent_change_1h
percent_change_7d
price_btc
market_cap_usd
volume24
volume24a
csupply
tsupply
msupply
}
}
}
`
|
getRunTimeData
|
面试题31. 栈的压入、弹出序列.go
|
package stackandqueue
import "strings"
func validateStackSequences(pushed []int, popped []int) bool {
if len(pushed) == 0
|
j := 0
stack := make([]int, 0)
for i := 0; i < len(pushed); i++ {
stack = append(stack, pushed[i])
for j < len(popped) && len(stack) != 0 && stack[len(stack) - 1] == popped[j] {
stack = stack[:len(stack) - 1]
j++
}
}
return len(stack) == 0
}
func test() {
strings.Join()
}
|
{
return true
}
|
swiper-class.d.ts
|
import { Dom7Array } from 'dom7';
import { SwiperOptions } from './swiper-options';
import { CSSSelector, SwiperComponent } from './shared';
import { SwiperEvents } from './swiper-events';
import { A11yMethods } from './components/a11y';
import { AutoplayMethods } from './components/autoplay';
import { ControllerMethods } from './components/controller';
import { CoverflowEffectMethods } from './components/effect-coverflow';
import { CubeEffectMethods } from './components/effect-cube';
import { FadeEffectMethods } from './components/effect-fade';
import { FlipEffectMethods } from './components/effect-flip';
import { HashNavigationMethods } from './components/hash-navigation';
import { HistoryMethods } from './components/history';
import { KeyboardMethods } from './components/keyboard';
import { LazyMethods } from './components/lazy';
import { MousewheelMethods } from './components/mousewheel';
import { NavigationMethods } from './components/navigation';
import { PaginationMethods } from './components/pagination';
import { ParallaxMethods } from './components/parallax';
import { ScrollbarMethods } from './components/scrollbar';
import { ThumbsMethods } from './components/thumbs';
import { VirtualMethods } from './components/virtual';
import { ZoomMethods } from './components/zoom';
interface SwiperClass<Events> {
/** Add event handler */
on<E extends keyof Events>(event: E, handler: Events[E]): void;
/** Add event handler that will be removed after it was fired */
once<E extends keyof Events>(event: E, handler: Events[E]): void;
/** Remove event handler */
off<E extends keyof Events>(event: E, handler: Events[E]): void;
/** Remove all handlers for specified event */
off<E extends keyof Events>(event: E): void;
/** Fire event on instance */
emit<E extends keyof Events>(event: E, ...args: any[]): void;
}
interface Swiper extends SwiperClass<SwiperEvents> {
/**
* Object with passed initialization parameters
*/
params: SwiperOptions;
/**
* Dom7 element with slider container HTML element. To get vanilla HTMLElement use mySwiper.el
*/
$el: Dom7Array;
/**
* Dom7 element with slider wrapper HTML element. To get vanilla HTMLElement use mySwiper.wrapperEl
*/
$wrapperEl: Dom7Array;
/**
* Dom7 array-like collection of slides HTML elements. To get specific slide HTMLElement use mySwiper.slides[1]
*/
slides: Dom7Array;
/**
* Width of container
*/
width: number;
/**
* Height of container
*/
height: number;
/**
* Current value of wrapper translate
*/
translate: number;
/**
* Current progress of wrapper translate (from 0 to 1)
*/
progress: number;
/**
* Index number of currently active slide
*
* @note Note, that in loop mode active index value will be always shifted on a number of looped/duplicated slides
*/
activeIndex: number;
/**
* Index number of currently active slide considering duplicated slides in loop mode
*/
realIndex: number;
/**
* Index number of previously active slide
*/
previousIndex: number;
/**
* true if slider on most "left"/"top" position
*/
isBeginning: boolean;
/**
* true if slider on most "right"/"bottom" position
*/
isEnd: boolean;
/**
* true if swiper is in transition
*/
animating: boolean;
/**
* Object with the following touch event properties:
*/
touches: {
startX: number;
startY: number;
currentX: number;
currentY: number;
diff: number;
};
/**
* Index number of last clicked slide
*/
clickedIndex: number;
/**
* Link to last clicked slide (HTMLElement)
*/
clickedSlide: HTMLElement;
/**
* Disable / enable ability to slide to the next slides by assigning false/true to this property
*/
allowSlideNext: boolean;
/**
* Disable / enable ability to slide to the previous slides by assigning false/true to this property
*/
allowSlidePrev: boolean;
/**
* Disable / enable ability move slider by grabbing it with mouse or by touching it with finger (on touch screens) by assigning false/true to this property
*/
allowTouchMove: boolean;
/**
* Run transition to next slide.
*
* @param speed Transition duration (in ms).
* @param runCallbacks Set it to false (by default it is true) and transition will
* not produce transition events.
*/
slideNext(speed?: number, runCallbacks?: boolean): void;
/**
* Run transition to previous slide.
*
* @param speed Transition duration (in ms).
* @param runCallbacks Set it to false (by default it is true) and transition will
* not produce transition events.
*/
slidePrev(speed?: number, runCallbacks?: boolean): void;
/**
* Run transition to the slide with index number equal to 'index' parameter for the
* duration equal to 'speed' parameter.
*
* @param index Index number of slide.
* @param speed Transition duration (in ms).
* @param runCallbacks Set it to false (by default it is true) and transition will
* not produce transition events.
*/
slideTo(index: number, speed?: number, runCallbacks?: boolean): void;
/**
* Does the same as .slideTo but for the case when used with enabled loop. So this
* method will slide to slides with realIndex matching to passed index
*
* @param index Index number of slide.
* @param speed Transition duration (in ms).
* @param runCallbacks Set it to false (by default it is true) and transition will
* not produce transition events.
*/
slideToLoop(index: number, speed?: number, runCallbacks?: boolean): void;
/**
* Reset swiper position to currently active slide for the duration equal to 'speed'
* parameter.
*
* @param speed Transition duration (in ms).
* @param runCallbacks Set it to false (by default it is true) and transition will
* not produce transition events.
*/
slideReset(speed?: number, runCallbacks?: boolean): void;
/**
* Reset swiper position to closest slide/snap point for the duration equal to 'speed' parameter.
*
* @param speed Transition duration (in ms).
* @param runCallbacks Set it to false (by default it is true) and transition will
* not produce transition events.
*/
slideToClosest(speed?: number, runCallbacks?: boolean): void;
/**
* Force swiper to update its height (when autoHeight enabled) for the duration equal to
* 'speed' parameter
*
* @param speed Transition duration (in ms).
*/
updateAutoHeight(speed?: number): void;
/**
* You should call it after you add/remove slides
* manually, or after you hide/show it, or do any
* custom DOM modifications with Swiper
* This method also includes subcall of the following
* methods which you can use separately:
*/
update(): void;
/**
* recalculate size of swiper container
*/
updateSize(): void;
/**
* recalculate number of slides and their offsets. Useful after you add/remove slides with JavaScript
*/
updateSlides(): void;
/**
* recalculate swiper progress
*/
updateProgress(): void;
/**
* update active/prev/next classes on slides and bullets
*/
updateSlidesClasses(): void;
/**
* Changes slider direction from horizontal to vertical and back.
*
* @param direction New direction. If not specified, then will automatically changed to opposite direction
* @param needUpdate Will call swiper.update(). Default true
*/
changeDirection(direction?: 'horizontal' | 'vertical', needUpdate?: boolean): void;
/**
* Detach all events listeners
*/
detachEvents(): void;
/**
* Attach all events listeners again
*/
attachEvents(): void;
/**
* Destroy slider instance and detach all events listeners
*
* @param deleteInstance Set it to false (by default it is true) to not to delete Swiper instance
* @param cleanStyles Set it to true (by default it is true) and all custom styles will be removed from slides, wrapper and container.
* Useful if you need to destroy Swiper and to init again with new options or in different direction
*/
destroy(deleteInstance?: boolean, cleanStyles?: boolean): void;
/**
* Add new slides to the end. slides could be
* HTMLElement or HTML string with new slide or
* array with such slides, for example:
*
* @example appendSlide('<div class="swiper-slide">Slide 10"</div>')
* @example
* appendSlide([
* '<div class="swiper-slide">Slide 10"</div>',
* '<div class="swiper-slide">Slide 11"</div>'
* ]);
*/
appendSlide(slides: HTMLElement | string | string[] | HTMLElement[]): void;
/**
* Add new slides to the beginning. slides could be
* HTMLElement or HTML string with new slide or array with such slides, for example:
*
* @example prependSlide('<div class="swiper-slide">Slide 0"</div>')
* @example prependSlide([
* '<div class="swiper-slide">Slide 1"</div>',
* '<div class="swiper-slide">Slide 2"</div>'
* ]);
*/
prependSlide(slides: HTMLElement | string | string[] | HTMLElement[]): void;
/**
* Add new slides to the required index. slides could be HTMLElement or HTML string with new slide or array with such slides, for example:
*
* @example addSlide(1, '<div class="swiper-slide">Slide 10"</div>')
* @example addSlide(1, [
* '<div class="swiper-slide">Slide 10"</div>',
* '<div class="swiper-slide">Slide 11"</div>'
* ]);
*/
addSlide(index: number, slides: HTMLElement | string | string[] | HTMLElement[]): void;
/**
* Remove selected slides. slideIndex could be a number with slide index to remove or array with indexes.
*
* @example removeSlide(0); // remove first slide
* @example removeSlide([0, 1]); // remove first and second slides
* @example removeAllSlides(); // Remove all slides
*/
removeSlide(slideIndex: number | number[]): void;
/**
* Remove all slides
*/
removeAllSlides(): void;
/**
* Set custom css3 transform's translate value for swiper wrapper
*/
setTranslate(translate: any): void;
/**
* Get current value of swiper wrapper css3 transform translate
*/
getTranslate(): any;
/**
* Animate custom css3 transform's translate value for swiper wrapper
*
* @param translate Translate value (in px)
* @param speed Transition duration (in ms)
* @param runCallbacks Set it to false (by default it is true) and transition will not produce transition events
* @param translateBounds Set it to false (by default it is true) and transition value can extend beyond min and max translate
*
*/
translateTo(
translate: number,
speed: number,
runCallbacks?: boolean,
translateBounds?: boolean,
): any;
/**
* Unset grab cursor
*/
unsetGrabCursor(): void;
/**
* Set grab cursor
*/
setGrabCursor(): void;
/**
* Add event listener that will be fired on all events
*/
onAny(handler: (eventName: string, ...args: any[]) => void): void;
/**
* Remove event listener that will be fired on all events
*/
offAny(handler: (eventName: string, ...args: any[]) => void): void;
destroyed: boolean;
modules: Array<any>; //TODO: add typing
a11y: A11yMethods;
autoplay: AutoplayMethods;
controller: ControllerMethods;
coverflowEffect: CoverflowEffectMethods;
cubeEffect: CubeEffectMethods;
fadeEffect: FadeEffectMethods;
flipEffect: FlipEffectMethods;
hashNavigation: HashNavigationMethods;
history: HistoryMethods;
keyboard: KeyboardMethods;
lazy: LazyMethods;
mousewheel: MousewheelMethods;
navigation: NavigationMethods;
pagination: PaginationMethods;
parallax: ParallaxMethods;
scrollbar: ScrollbarMethods;
thumbs: ThumbsMethods;
virtual: VirtualMethods;
zoom: ZoomMethods;
}
declare class Swiper implements Swiper {
/**
* Constructs a new Swiper instance.
*
* @param container Where Swiper applies to.
* @param options Instance options.
*/
constructor(container: CSSSelector | HTMLElement, options?: SwiperOptions);
/**
* Installs modules on Swiper in runtime.
*/
static use(modules: SwiperComponent[]): void;
/**
* Swiper default options
*/
static defaults: SwiperOptions;
|
* Extend global Swiper defaults
*/
static extendDefaults(options: SwiperOptions): void;
/**
* Object with global Swiper exntended options
*/
static extendedDefaults: SwiperOptions;
}
export default Swiper;
|
/**
|
evaluation_proof.rs
|
use crate::commitment::*;
use crate::srs::SRS;
use ark_ec::{msm::VariableBaseMSM, AffineCurve, ProjectiveCurve};
use ark_ff::{Field, One, PrimeField, UniformRand, Zero};
use ark_poly::univariate::DensePolynomial;
use o1_utils::{
math,
types::{BaseField, ScalarField},
};
use oracle::{sponge::ScalarChallenge, FqSponge};
use rand_core::{CryptoRng, RngCore};
use rayon::prelude::*;
use std::iter::Iterator;
impl<G: CommitmentCurve> SRS<G> {
/// This function opens polynomial commitments in batch
/// plnms: batch of polynomials to open commitments for with, optionally, max degrees
/// elm: evaluation point vector to open the commitments at
/// polyscale: polynomial scaling factor for opening commitments in batch
/// evalscale: eval scaling factor for opening commitments in batch
/// oracle_params: parameters for the random oracle argument
/// RETURN: commitment opening proof
#[allow(clippy::too_many_arguments)]
#[allow(clippy::type_complexity)]
#[allow(clippy::many_single_char_names)]
pub fn open<EFqSponge, RNG>(
&self,
group_map: &G::Map,
// TODO(mimoo): create a type for that entry
plnms: &[(
&DensePolynomial<ScalarField<G>>,
Option<usize>,
PolyComm<ScalarField<G>>,
)], // vector of polynomial with optional degree bound and commitment randomness
elm: &[ScalarField<G>], // vector of evaluation points
polyscale: ScalarField<G>, // scaling factor for polynoms
evalscale: ScalarField<G>, // scaling factor for evaluation point powers
mut sponge: EFqSponge, // sponge
rng: &mut RNG,
) -> OpeningProof<G>
where
EFqSponge: Clone + FqSponge<BaseField<G>, G, ScalarField<G>>,
RNG: RngCore + CryptoRng,
G::BaseField: PrimeField,
{
let rounds = math::ceil_log2(self.g.len());
let padded_length = 1 << rounds;
// TODO: Trim this to the degree of the largest polynomial
let padding = padded_length - self.g.len();
let mut g = self.g.clone();
g.extend(vec![G::zero(); padding]);
let (p, blinding_factor) = {
let mut plnm = ChunkedPolynomial::<ScalarField<G>, &[ScalarField<G>]>::default();
// let mut plnm_chunks: Vec<(ScalarField<G>, OptShiftedPolynomial<_>)> = vec![];
let mut omega = ScalarField::<G>::zero();
let mut scale = ScalarField::<G>::one();
// iterating over polynomials in the batch
for (p_i, degree_bound, omegas) in plnms.iter().filter(|p| !p.0.is_zero()) {
let mut offset = 0;
let mut j = 0;
// iterating over chunks of the polynomial
if let Some(m) = degree_bound {
assert!(p_i.coeffs.len() <= m + 1);
while j < omegas.unshifted.len() {
let segment = &p_i.coeffs[offset
..if offset + self.g.len() > p_i.coeffs.len() {
p_i.coeffs.len()
} else {
offset + self.g.len()
}];
// always mixing in the unshifted segments
plnm.add_unshifted(scale, segment);
omega += &(omegas.unshifted[j] * scale);
j += 1;
scale *= &polyscale;
offset += self.g.len();
if offset > *m {
// mixing in the shifted segment since degree is bounded
plnm.add_shifted(scale, self.g.len() - m % self.g.len(), segment);
omega += &(omegas.shifted.unwrap() * scale);
scale *= &polyscale;
}
}
} else {
assert!(omegas.shifted.is_none());
while j < omegas.unshifted.len() {
let segment = &p_i.coeffs[offset
..if offset + self.g.len() > p_i.coeffs.len() {
p_i.coeffs.len()
} else {
offset + self.g.len()
}];
// always mixing in the unshifted segments
plnm.add_unshifted(scale, segment);
omega += &(omegas.unshifted[j] * scale);
j += 1;
scale *= &polyscale;
offset += self.g.len();
}
}
assert_eq!(j, omegas.unshifted.len());
}
(plnm.to_dense_polynomial(), omega)
};
let rounds = math::ceil_log2(self.g.len());
// b_j = sum_i r^i elm_i^j
let b_init = {
// randomise/scale the eval powers
let mut scale = ScalarField::<G>::one();
let mut res: Vec<ScalarField<G>> = (0..padded_length)
.map(|_| ScalarField::<G>::zero())
.collect();
for e in elm {
for (i, t) in pows(padded_length, *e).iter().enumerate() {
res[i] += &(scale * t);
}
scale *= &evalscale;
}
res
};
let combined_inner_product = p
.coeffs
.iter()
.zip(b_init.iter())
.map(|(a, b)| *a * b)
.fold(ScalarField::<G>::zero(), |acc, x| acc + x);
sponge.absorb_fr(&[shift_scalar::<G>(combined_inner_product)]);
let t = sponge.challenge_fq();
let u: G = to_group(group_map, t);
let mut a = p.coeffs;
assert!(padded_length >= a.len());
a.extend(vec![ScalarField::<G>::zero(); padded_length - a.len()]);
let mut b = b_init;
let mut lr = vec![];
let mut blinders = vec![];
let mut chals = vec![];
let mut chal_invs = vec![];
for _ in 0..rounds {
let n = g.len() / 2;
let (g_lo, g_hi) = (g[0..n].to_vec(), g[n..].to_vec());
let (a_lo, a_hi) = (&a[0..n], &a[n..]);
let (b_lo, b_hi) = (&b[0..n], &b[n..]);
let rand_l = ScalarField::<G>::rand(rng);
let rand_r = ScalarField::<G>::rand(rng);
let l = VariableBaseMSM::multi_scalar_mul(
&[&g[0..n], &[self.h, u]].concat(),
&[&a[n..], &[rand_l, inner_prod(a_hi, b_lo)]]
.concat()
.iter()
.map(|x| x.into_repr())
.collect::<Vec<_>>(),
)
.into_affine();
let r = VariableBaseMSM::multi_scalar_mul(
&[&g[n..], &[self.h, u]].concat(),
&[&a[0..n], &[rand_r, inner_prod(a_lo, b_hi)]]
.concat()
.iter()
.map(|x| x.into_repr())
.collect::<Vec<_>>(),
)
.into_affine();
lr.push((l, r));
blinders.push((rand_l, rand_r));
sponge.absorb_g(&[l]);
sponge.absorb_g(&[r]);
let u_pre = squeeze_prechallenge(&mut sponge);
let u = u_pre.to_field(&self.endo_r);
let u_inv = u.inverse().unwrap();
chals.push(u);
chal_invs.push(u_inv);
a = a_hi
.par_iter()
.zip(a_lo)
.map(|(&hi, &lo)| {
// lo + u_inv * hi
let mut res = hi;
res *= u_inv;
res += &lo;
res
})
.collect();
b = b_lo
.par_iter()
.zip(b_hi)
.map(|(&lo, &hi)| {
// lo + u * hi
let mut res = hi;
res *= u;
res += &lo;
res
})
.collect();
g = G::combine_one_endo(self.endo_r, self.endo_q, &g_lo, &g_hi, u_pre);
}
assert!(g.len() == 1);
let a0 = a[0];
let b0 = b[0];
let g0 = g[0];
let r_prime = blinders
.iter()
.zip(chals.iter().zip(chal_invs.iter()))
.map(|((l, r), (u, u_inv))| ((*l) * u_inv) + (*r * u))
.fold(blinding_factor, |acc, x| acc + x);
let d = ScalarField::<G>::rand(rng);
let r_delta = ScalarField::<G>::rand(rng);
let delta = ((g0.into_projective() + (u.mul(b0))).into_affine().mul(d)
+ self.h.mul(r_delta))
.into_affine();
sponge.absorb_g(&[delta]);
let c = ScalarChallenge(sponge.challenge()).to_field(&self.endo_r);
let z1 = a0 * c + d;
let z2 = c * r_prime + r_delta;
OpeningProof {
delta,
lr,
z1,
z2,
sg: g0,
}
}
}
#[derive(Clone, Debug)]
pub struct OpeningProof<G: AffineCurve> {
/// vector of rounds of L & R commitments
pub lr: Vec<(G, G)>,
pub delta: G,
pub z1: G::ScalarField,
|
}
pub struct Challenges<F> {
pub chal: Vec<F>,
pub chal_inv: Vec<F>,
}
impl<G: AffineCurve> OpeningProof<G> {
pub fn prechallenges<EFqSponge: FqSponge<BaseField<G>, G, ScalarField<G>>>(
&self,
sponge: &mut EFqSponge,
) -> Vec<ScalarChallenge<ScalarField<G>>> {
let _t = sponge.challenge_fq();
self.lr
.iter()
.map(|(l, r)| {
sponge.absorb_g(&[*l]);
sponge.absorb_g(&[*r]);
squeeze_prechallenge(sponge)
})
.collect()
}
pub fn challenges<EFqSponge: FqSponge<BaseField<G>, G, ScalarField<G>>>(
&self,
endo_r: &ScalarField<G>,
sponge: &mut EFqSponge,
) -> Challenges<ScalarField<G>> {
let chal: Vec<_> = self
.lr
.iter()
.map(|(l, r)| {
sponge.absorb_g(&[*l]);
sponge.absorb_g(&[*r]);
squeeze_challenge(endo_r, sponge)
})
.collect();
let chal_inv = {
let mut cs = chal.clone();
ark_ff::batch_inversion(&mut cs);
cs
};
Challenges { chal, chal_inv }
}
}
|
pub z2: G::ScalarField,
pub sg: G,
|
App.js
|
import React, { Component } from 'react';
import axios from 'axios'
import logo from './logo.svg';
import './App.css';
const url = 'https://api.nasa.gov/planetary/apod?api_key=DEMO_KEY'
class App extends Component {
render() {
return (
<div className="App">
<div className="App-header">
<img src={logo} className="App-logo" alt="logo" />
<h2>Welcome to React</h2>
</div>
<p className="App-intro">
To get started, edit <code>src/App.js</code> and save to reload.
</p>
</div>
);
}
}
class
|
extends Component {
constructor(props) {
super(props)
this.state = {
imgUrl: ''
}
this.getImage = this.getImage.bind(this)
}
getImage(event) {
var self = this
axios.get(url)
.then(function(response){
console.log(response.data.url)
self.setState({imgUrl: response.data.url})
})
.catch(function(error){
})
}
render() {
return(
<div>
<img src={this.state.imgUrl} />
<br/>
<button onClick={this.getImage}> PUSH ME </button>
</div>
)
}
}
export {App, NASA};
|
NASA
|
lexer.rs
|
use super::Tokenizer;
use super::matcher::*;
use super::token::{Token, TokenType, TokenPosition};
use super::block_tree::{ChunkValue, Branch, Chunk};
use std::str::Chars;
use std::rc::Rc;
pub fn lexer(data: &mut Chars) -> Lexer {
let tokenizer = Tokenizer::new(data);
let mut lexer = Lexer::new(tokenizer);
let symbols = vec![
"(",
")",
"[",
"]",
",",
":",
"{",
"}",
"!",
"|",
"=",
"..",
".",
].iter().map(|&x| x.to_string()).collect();
let operators = vec![
"++",
"+",
"-",
"*",
"/",
"/",
"%",
"^",
">",
"<",
">=",
"<=",
"==",
"!=",
"and",
"or",
].iter().map(|&x| x.to_string()).collect();
let keywords = vec![
"if", "else", "elif", "unless", "return", "fun",
].iter().map(|&x| x.to_string()).collect();
let types = vec![
"num", "str", "any", "bool",
].iter().map(|&x| x.to_string()).collect();
let boolean = vec![
"true",
"false",
].iter().map(|&x| x.to_string()).collect();
let matcher_types = KeyMatcher::new(TokenType::Type, types);
let matcher_symbol = ConstantMatcher::new(TokenType::Symbol, symbols);
let matcher_operator = ConstantMatcher::new(TokenType::Operator, operators);
let matcher_boolean = KeyMatcher::new(TokenType::BoolLiteral, boolean);
let matcher_keyword = KeyMatcher::new(TokenType::Keyword, keywords);
let matcher_whitespace = WhitespaceMatcher {};
let matcher_int_literal = IntLiteralMatcher {};
let matcher_float_literal = FloatLiteralMatcher {};
let matcher_identifier = IdentifierMatcher {};
let matcher_string_literal = StringLiteralMatcher {};
lexer.matchers_mut().push(Rc::new(matcher_whitespace));
lexer.matchers_mut().push(Rc::new(matcher_symbol));
lexer.matchers_mut().push(Rc::new(matcher_float_literal));
lexer.matchers_mut().push(Rc::new(matcher_int_literal));
lexer.matchers_mut().push(Rc::new(matcher_string_literal));
lexer.matchers_mut().push(Rc::new(matcher_types));
lexer.matchers_mut().push(Rc::new(matcher_operator));
lexer.matchers_mut().push(Rc::new(matcher_boolean));
lexer.matchers_mut().push(Rc::new(matcher_keyword));
lexer.matchers_mut().push(Rc::new(matcher_identifier));
lexer
}
pub fn lex_branch(branch: &Branch) -> Branch {
let mut lexed_branch = Branch::new(Vec::new());
for c in branch.value.iter() {
match c.value() {
&ChunkValue::Source(ref s) => {
let mut line: Vec<Token> = lexer(&mut s.clone().chars()).collect();
line.push(Token::new(TokenType::EOL, TokenPosition::default(), "\n".to_owned()));
let chunk = ChunkValue::Tokens(line);
lexed_branch.value.push(Chunk::new(chunk))
},
&ChunkValue::Block(ref b) => {
let chunk = ChunkValue::Block(lex_branch(&b));
lexed_branch.value.push(Chunk::new(chunk))
},
_ => (),
}
}
lexed_branch
}
pub fn flatten_branch(branch: &Branch) -> Vec<Token> {
let mut flat = Vec::new();
for c in branch.value.iter() {
match c.value() {
&ChunkValue::Tokens(ref t) => flat.append(&mut t.clone()),
&ChunkValue::Block(ref b) => flat.push(Token::new(TokenType::Block(flatten_branch(b)), TokenPosition::new(0, 0), "".to_string())),
_ => continue,
}
}
flat
}
pub fn
|
(branch: &Branch) -> Vec<Token> {
flatten_branch(&lex_branch(branch))
}
pub struct Lexer {
tokenizer: Tokenizer,
matchers: Vec<Rc<Matcher>>,
}
#[allow(dead_code)]
impl Lexer {
pub fn new(tokenizer: Tokenizer) -> Lexer {
Lexer {
tokenizer,
matchers: Vec::new(),
}
}
pub fn match_token(&mut self) -> Option<Token> {
for matcher in &mut self.matchers {
match self.tokenizer.try_match_token(matcher.as_ref()) {
Some(t) => return Some(t),
None => continue,
}
}
None
}
pub fn matchers(&self) -> &Vec<Rc<Matcher>> {
&self.matchers
}
pub fn matchers_mut(&mut self) -> &mut Vec<Rc<Matcher>> {
&mut self.matchers
}
}
impl Iterator for Lexer {
type Item = Token;
fn next(&mut self) -> Option<Token> {
let token = self.match_token().unwrap();
match token.token_type {
TokenType::EOF => None,
TokenType::Whitespace => {
match self.next() {
Some(t) => Some(t),
None => None,
}
}
_ => Some(token),
}
}
}
|
process_branch
|
gerrit_api_test.py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for gerrit_api.py"""
import copy
import json
import mock
import requests
import tempfile
import time
import unittest
from infra.libs import gerrit_api
GERRIT_JSON_HEADER = ')]}\'\n'
HEADERS = {
'Accept': 'application/json',
'Accept-encoding': 'gzip',
'Authorization': 'Basic Z2l0LWNvbW1pdC1ib3RAY2hyb21pdW0ub3JnOnNlY3JldA==',
}
HEADERS_WITH_CONTENT_TYPE = HEADERS.copy()
HEADERS_WITH_CONTENT_TYPE['Content-Type'] = 'application/json;charset=UTF-8'
TEST_PAYLOAD = {
'labels': {
'Code-Review': 1,
},
'message': 'Test message.',
'notify': 'NONE',
}
TEST_PAYLOAD_LABELS_ONLY = {
'labels': {
'Code-Review': 1,
},
'notify': 'OWNER',
}
TEST_CHANGE_INFO = {
'id': 'project~branch~12345~change',
'change_id': 12345,
'created': '2014-02-11 12:14:28.135200000',
'updated': '2014-03-11 00:20:08.946000000',
'current_revision': 'THIRD',
'owner': {
'name': 'Some Person',
},
'revisions': {
'THIRD': {
'_number': 3,
},
'SECOND': {
'_number': 2,
},
'FIRST': {
'_number': 1,
},
},
'labels': {
'Commit-Queue': {
'recommended': { '_account_id': 1 }
},
'Test-Label': {
'disliked': { '_account_id' : 42 }
},
'Code-Review': {
'approved': { '_account_id': 2 }
},
},
'messages': [
{
'id': 1,
'author': '[email protected]',
'date': '2014-02-11 12:10:14.311200000',
'message': 'MESSAGE1',
},
{
'id': 2,
'date': '2014-02-11 12:11:14.311200000',
'message': 'MESSAGE2',
'_revision_number': 2,
},
],
}
MOCK_AUTH=('[email protected]', 'secret')
def _create_mock_return(content, code):
r = requests.Response()
r._content = content
r.status_code = code
return r
# TODO(akuegel): Add more test cases and remove the pragma no covers.
class GerritAgentTestCase(unittest.TestCase):
def setUp(self):
self.gerrit = gerrit_api.Gerrit('chromium-review.googlesource.com',
gerrit_api.Credentials(auth=MOCK_AUTH))
self.gerrit_read_only = gerrit_api.Gerrit(
'chromium-review.googlesource.com',
gerrit_api.Credentials(auth=MOCK_AUTH),
read_only=True)
@mock.patch.object(requests.Session, 'request')
def test_request_no_leading_slash(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s[]' % GERRIT_JSON_HEADER, 200)
result = self.gerrit._request(method='GET',
request_path='changes/?q=query:no_results')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'?q=query:no_results'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEqual(result, (200, []))
@mock.patch.object(gerrit_api.Gerrit, '_sleep')
@mock.patch.object(time, 'time')
@mock.patch.object(requests.Session, 'request')
def test_request_throttled(self, mock_method, time_mock_method, sleep_mock):
gerrit_throttled = gerrit_api.Gerrit('chromium-review.googlesource.com',
gerrit_api.Credentials(auth=MOCK_AUTH),
0.1)
mock_method.return_value = _create_mock_return(None, 404)
time_mock_method.return_value = 100
gerrit_throttled._request(method='GET', request_path='/accounts/self')
# Call it twice to test the throttling.
gerrit_throttled._request(method='GET', request_path='/accounts/self')
sleep_mock.assert_called_once_with(0)
time_mock_method.return_value = 101
# Call it again after exceeding the throttle to cover the other branch.
gerrit_throttled._request(method='GET', request_path='/accounts/self')
@mock.patch.object(requests.Session, 'request')
def
|
(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s{"_account_id":1000096,"name":"John Doe","email":'
'"[email protected]","username":"john"}') % GERRIT_JSON_HEADER,
200)
result = self.gerrit.get_account('self')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url='https://chromium-review.googlesource.com/a/accounts/self',
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
expected_result = {
'_account_id': 1000096,
'name': 'John Doe',
'email': '[email protected]',
'username': 'john'
}
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_get_account_404(self, mock_method):
mock_method.return_value = _create_mock_return(None, 404)
result = self.gerrit.get_account('[email protected]')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com'
'/a/accounts/[email protected]'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEqual(result, None)
@mock.patch.object(requests.Session, 'request')
def test_get_account_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 201)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.get_account, 'self')
@mock.patch.object(requests.Session, 'request')
def test_list_group_members(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s[{"_account_id":1000057,"name":"Jane Roe","email":'
'"[email protected]","username": "jane"}]') % GERRIT_JSON_HEADER,
200)
result = self.gerrit.list_group_members('test-group')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/groups/'
'test-group/members'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
expected_result = [{
'_account_id': 1000057,
'name': 'Jane Roe',
'email': '[email protected]',
'username': 'jane'
}]
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_list_group_members_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.list_group_members, 'test-group')
def test_list_group_members_wrong_group(self):
self.assertRaises(ValueError, self.gerrit.list_group_members, 'a/b/c')
@mock.patch.object(requests.Session, 'request')
def test_add_group_members(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s[{"_account_id":1000057,"name":"Jane Roe","email":'
'"[email protected]","username": "jane"}]') % GERRIT_JSON_HEADER,
200)
members = ['[email protected]']
payload = { 'members': members }
result = self.gerrit.add_group_members('test-group', members)
mock_method.assert_called_once_with(
data=json.dumps(payload),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/groups/'
'test-group/members.add'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
expected_result = [{
'_account_id': 1000057,
'name': 'Jane Roe',
'email': '[email protected]',
'username': 'jane'
}]
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_add_group_members_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.add_group_members, 'test-group', ['[email protected]'])
def test_add_group_members_wrong_group(self):
self.assertRaises(ValueError, self.gerrit.add_group_members, 'a/b/c', [])
def test_add_group_members_read_only(self):
self.assertRaises(gerrit_api.AccessViolationException,
self.gerrit_read_only.add_group_members,
'test-group', ['[email protected]'])
@mock.patch.object(requests.Session, 'request')
def test_delete_group_members(self, mock_method):
mock_method.return_value = _create_mock_return(
('%s[{"_account_id":1000057,"name":"Jane Roe","email":'
'"[email protected]","username": "jane"}]') % GERRIT_JSON_HEADER,
204)
members = ['[email protected]']
payload = { 'members': members }
result = self.gerrit.delete_group_members('test-group', members)
mock_method.assert_called_once_with(
data=json.dumps(payload),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/groups/'
'test-group/members.delete'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
expected_result = [{
'_account_id': 1000057,
'name': 'Jane Roe',
'email': '[email protected]',
'username': 'jane'
}]
self.assertEqual(result, expected_result)
@mock.patch.object(requests.Session, 'request')
def test_delete_group_members_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(
gerrit_api.UnexpectedResponseException,
self.gerrit.delete_group_members, 'test-group', ['[email protected]'])
def test_delete_group_members_wrong_group(self):
self.assertRaises(ValueError, self.gerrit.delete_group_members, 'a/b/c', [])
def test_delete_group_members_read_only(self):
self.assertRaises(gerrit_api.AccessViolationException,
self.gerrit_read_only.delete_group_members,
'test-group', ['[email protected]'])
@mock.patch.object(requests.Session, 'request')
def test_set_project_parent(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s"parent"' % GERRIT_JSON_HEADER, 200)
result = self.gerrit.set_project_parent('project', 'parent')
payload = {
'parent': 'parent',
'commit_message': 'Changing parent project to parent'
}
mock_method.assert_called_once_with(
data=json.dumps(payload),
method='PUT',
params=None,
url=('https://chromium-review.googlesource.com/a/projects/'
'project/parent'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
self.assertEqual(result, 'parent')
@mock.patch.object(requests.Session, 'request')
def test_set_project_parent_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.set_project_parent, 'a', 'b')
@mock.patch.object(requests.Session, 'request')
def test_query(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps([TEST_CHANGE_INFO])), 200)
result = self.gerrit.query(project='test',
with_labels=False, with_revisions=False,
owner='[email protected]')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'q':'project:test owner:[email protected]', 'o': ['MESSAGES']},
url='https://chromium-review.googlesource.com/a/changes/',
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, [TEST_CHANGE_INFO])
@mock.patch.object(requests.Session, 'request')
def test_query_with_query_name(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps([TEST_CHANGE_INFO])), 200)
result = self.gerrit.query(project='test', query_name='pending_cls',
owner='1012155')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'q':'project:test query:pending_cls owner:1012155',
'o': ['CURRENT_REVISION', 'LABELS', 'MESSAGES']},
url='https://chromium-review.googlesource.com/a/changes/',
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, [TEST_CHANGE_INFO])
@mock.patch.object(requests.Session, 'request')
def test_query_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 400)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.query, 'a', with_messages=False,
with_labels=False, with_revisions=False)
@mock.patch.object(requests.Session, 'request')
def test_get_issue(self, mock_method):
# By default, Gerrit doesn't return revisions data.
info_without_revisions = TEST_CHANGE_INFO.copy()
info_without_revisions.pop('revisions')
info_without_revisions.pop('current_revision')
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(info_without_revisions)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, info_without_revisions)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_with_files(self, mock_method):
info_with_files = copy.deepcopy(TEST_CHANGE_INFO)
current = info_with_files['current_revision']
info_with_files['revisions'][current]['files'] = {
"first.py": {
"lines_deleted": 8,
"size_delta": -412,
"size": 7782
},
"first.java": {
"lines_inserted": 1,
"size_delta": 23,
"size": 6762
},
}
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(info_with_files)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash',
current_files=True)
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'o': ['CURRENT_FILES', 'CURRENT_REVISION']},
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, info_with_files)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_with_files_and_revisions(self, mock_method):
info = copy.deepcopy(TEST_CHANGE_INFO)
current = info['current_revision']
info['revisions'][current]['files'] = {
"first.py": {
"lines_deleted": 8,
"size_delta": -412,
"size": 7782
},
"first.java": {
"lines_inserted": 1,
"size_delta": 23,
"size": 6762
},
}
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(info)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash',
current_files=True,
revisions='ALL_REVISIONS')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'o': ['CURRENT_FILES', 'ALL_REVISIONS']},
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, info)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_with_all_revisions(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER, json.dumps(TEST_CHANGE_INFO)), 200)
result = self.gerrit.get_issue('test/project~weird/branch~hash',
revisions='ALL_REVISIONS')
mock_method.assert_called_once_with(
data=None,
method='GET',
params={'o': ['ALL_REVISIONS']},
url=('https://chromium-review.googlesource.com/a/changes/'
'test%2Fproject~weird%2Fbranch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, TEST_CHANGE_INFO)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_not_found(self, mock_method):
mock_method.return_value = _create_mock_return('Not found', 404)
result = self.gerrit.get_issue('unknown~branch~hash')
mock_method.assert_called_once_with(
data=None,
method='GET',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'unknown~branch~hash/detail'),
headers=HEADERS,
hooks=self.gerrit._instrumentation_hooks)
self.assertEquals(result, None)
@mock.patch.object(requests.Session, 'request')
def test_get_issue_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 500)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.get_issue, 'issue')
@mock.patch.object(requests.Session, 'request')
def test_set_review(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER,
json.dumps({'labels':{'Code-Review':1}})), 200)
self.gerrit.set_review('change_id', 'revision_id', 'Test message.',
{ 'Code-Review': 1 })
mock_method.assert_called_once_with(
data=json.dumps(TEST_PAYLOAD),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'change_id/revisions/revision_id/review'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
@mock.patch.object(requests.Session, 'request')
def test_set_review_only_label(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER,
json.dumps({'labels':{'Code-Review':1}})), 200)
self.gerrit.set_review('change_id', 'revision_id',
labels={ 'Code-Review': 1 }, notify='OWNER')
mock_method.assert_called_once_with(
data=json.dumps(TEST_PAYLOAD_LABELS_ONLY),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'change_id/revisions/revision_id/review'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
@mock.patch.object(requests.Session, 'request')
def test_set_review_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 500)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.set_review, 'change_id', 'revision_id')
@mock.patch.object(requests.Session, 'request')
def test_submit_revision(self, mock_method):
mock_method.return_value = _create_mock_return(
'%s%s' % (GERRIT_JSON_HEADER,
json.dumps({'status': 'MERGE'})), 200)
self.gerrit.submit_revision('change_id', 'current_revision_id')
mock_method.assert_called_once_with(
data=json.dumps({'wait_for_merge': True}),
method='POST',
params=None,
url=('https://chromium-review.googlesource.com/a/changes/'
'change_id/revisions/current_revision_id/submit'),
headers=HEADERS_WITH_CONTENT_TYPE,
hooks=self.gerrit._instrumentation_hooks)
@mock.patch.object(requests.Session, 'request')
def test_submit_revision_revision_conflict(self, mock_method):
mock_method.return_value = _create_mock_return(
'revision revision_id is not current revision', 409)
self.assertRaises(gerrit_api.RevisionConflictException,
self.gerrit.submit_revision, 'change_id', 'revision_id')
@mock.patch.object(requests.Session, 'request')
def test_submit_revision_unexpected_response(self, mock_method):
mock_method.return_value = _create_mock_return(None, 500)
self.assertRaises(gerrit_api.UnexpectedResponseException,
self.gerrit.submit_revision, 'change_id', 'revision_id')
|
test_get_account
|
unary.rs
|
use ndarray::*;
use tract_core::ops::prelude::*;
#[derive(Debug, Copy, Clone)]
pub enum PaddingStrat {
FlexFixed(usize),
FixedFlex(usize),
FixedFixed(usize, usize),
}
#[derive(Debug, Clone, new)]
pub struct SpaceToBatchUnary {
pub datum_type: DatumType,
pub space_shape: TVec<TDim>,
pub batch_shape: TVec<TDim>,
pub block_shape: Array1<i32>,
pub pad: TVec<PaddingStrat>,
}
impl Op for SpaceToBatchUnary {
fn name(&self) -> Cow<str> {
"SpaceToBatchUnary".into()
}
}
impl StatelessOp for SpaceToBatchUnary {
fn eval(&self, mut inputs: TVec<SharedTensor>) -> TractResult<TVec<SharedTensor>>
|
}
impl InferenceRulesOp for SpaceToBatchUnary {
/// Registers the inference rules of the operator.
fn rules<'r, 'p: 'r, 's: 'r>(
&'s self,
s: &mut Solver<'r>,
inputs: &'p SharedTensorsProxy,
outputs: &'p SharedTensorsProxy,
) -> InferenceResult {
s.equals(&inputs.len, 1)?;
s.equals(&outputs.len, 1)?;
s.equals(&inputs[0].datum_type, self.datum_type)?;
s.equals(&outputs[0].datum_type, self.datum_type)?;
s.equals(&inputs[0].rank, &outputs[0].rank)?;
s.equals(&outputs[0].shape, self.batch_shape.clone())?;
s.equals(&inputs[0].shape, self.space_shape.clone())?;
Ok(())
}
}
#[derive(Debug, Clone, new)]
pub struct BatchToSpaceUnary {
datum_type: DatumType,
batch_shape: TVec<TDim>,
space_shape: TVec<TDim>,
block_shape: Array1<i32>,
pad: Vec<PaddingStrat>,
}
impl Op for BatchToSpaceUnary {
fn name(&self) -> Cow<str> {
"BatchToSpaceUnary".into()
}
}
impl StatelessOp for BatchToSpaceUnary {
fn eval(&self, mut inputs: TVec<SharedTensor>) -> TractResult<TVec<SharedTensor>> {
let input = args_1!(inputs);
let mut paddings = unsafe { Array2::uninitialized((self.block_shape.len(), 2)) };
for (ax, &strat) in self.pad.iter().enumerate() {
let spread = (self.batch_shape[2 + ax] * self.block_shape[ax]
- self.space_shape[2 + ax])
.to_integer()? as usize;
let (bef, aft) = match strat {
PaddingStrat::FlexFixed(f) => (spread - f, f),
PaddingStrat::FixedFlex(f) => (f, spread - f),
PaddingStrat::FixedFixed(a, b) => (a, b),
};
paddings[(ax, 0)] = bef as i32;
paddings[(ax, 1)] = aft as i32;
}
let r = dispatch_numbers!(super::batch_to_space(input.datum_type())(
input,
&self.block_shape.view(),
&paddings.view()
))?;
Ok(tvec!(r))
}
}
impl InferenceRulesOp for BatchToSpaceUnary {
/// Registers the inference rules of the operator.
fn rules<'r, 'p: 'r, 's: 'r>(
&'s self,
s: &mut Solver<'r>,
inputs: &'p SharedTensorsProxy,
outputs: &'p SharedTensorsProxy,
) -> InferenceResult {
s.equals(&inputs.len, 1)?;
s.equals(&outputs.len, 1)?;
s.equals(&inputs[0].datum_type, self.datum_type)?;
s.equals(&outputs[0].datum_type, self.datum_type)?;
s.equals(&inputs[0].rank, &outputs[0].rank)?;
s.equals(&inputs[0].shape, self.batch_shape.clone())?;
s.equals(&outputs[0].shape, self.space_shape.clone())?;
Ok(())
}
}
|
{
let input = args_1!(inputs);
let mut paddings = unsafe { Array2::uninitialized((self.block_shape.len(), 2)) };
for (ax, &strat) in self.pad.iter().enumerate() {
let spread = (self.batch_shape[2 + ax] * self.block_shape[ax]
- self.space_shape[2 + ax])
.to_integer()? as usize;
let (bef, aft) = match strat {
PaddingStrat::FlexFixed(f) => (spread - f, f),
PaddingStrat::FixedFlex(f) => (f, spread - f),
PaddingStrat::FixedFixed(a, b) => (a, b),
};
paddings[(ax, 0)] = bef as i32;
paddings[(ax, 1)] = aft as i32;
}
let r = dispatch_numbers!(super::space_to_batch(input.datum_type())(
input,
&self.block_shape.view(),
&paddings.view()
))?;
Ok(tvec!(r))
}
|
defaults.py
|
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from datadog_checks.base.utils.models.fields import get_default_field_value
def shared_proxy(field, value):
return get_default_field_value(field, value)
def shared_service(field, value):
return get_default_field_value(field, value)
def shared_skip_proxy(field, value):
return False
def shared_timeout(field, value):
return 10
def instance_allow_redirects(field, value):
return True
def instance_auth_token(field, value):
return get_default_field_value(field, value)
def instance_auth_type(field, value):
return 'basic'
def instance_aws_host(field, value):
|
def instance_aws_region(field, value):
return get_default_field_value(field, value)
def instance_aws_service(field, value):
return get_default_field_value(field, value)
def instance_connect_timeout(field, value):
return get_default_field_value(field, value)
def instance_disable_generic_tags(field, value):
return False
def instance_empty_default_hostname(field, value):
return False
def instance_extra_headers(field, value):
return get_default_field_value(field, value)
def instance_headers(field, value):
return get_default_field_value(field, value)
def instance_kerberos_auth(field, value):
return 'disabled'
def instance_kerberos_cache(field, value):
return get_default_field_value(field, value)
def instance_kerberos_delegate(field, value):
return False
def instance_kerberos_force_initiate(field, value):
return False
def instance_kerberos_hostname(field, value):
return get_default_field_value(field, value)
def instance_kerberos_keytab(field, value):
return get_default_field_value(field, value)
def instance_kerberos_principal(field, value):
return get_default_field_value(field, value)
def instance_log_requests(field, value):
return False
def instance_min_collection_interval(field, value):
return 15
def instance_ntlm_domain(field, value):
return get_default_field_value(field, value)
def instance_persist_connections(field, value):
return False
def instance_proxy(field, value):
return get_default_field_value(field, value)
def instance_read_timeout(field, value):
return get_default_field_value(field, value)
def instance_service(field, value):
return get_default_field_value(field, value)
def instance_skip_proxy(field, value):
return False
def instance_tags(field, value):
return get_default_field_value(field, value)
def instance_timeout(field, value):
return 10
def instance_tls_ca_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_ignore_warning(field, value):
return False
def instance_tls_private_key(field, value):
return get_default_field_value(field, value)
def instance_tls_use_host_header(field, value):
return False
def instance_tls_verify(field, value):
return True
def instance_use_legacy_auth_encoding(field, value):
return True
|
return get_default_field_value(field, value)
|
mod.rs
|
pub fn put_uvarint(buf: &mut [u8], mut x: u64) -> usize {
let mut i = 0;
while x >= 0x80 {
buf[i] = x as u8 | 0x80;
x >>= 7;
i += 1;
}
buf[i] = x as u8;
(i + 1) as usize
}
pub fn uvarint(buf: &[u8]) -> (u64, isize)
|
pub fn load32(b: &[u8], i: usize) -> u32 {
let b2 = &b[i..i+4];
return b2[0] as u32 | (b2[1] as u32) << 8 | (b2[2] as u32) << 16 | (b2[3] as u32) << 24
}
pub fn load64(b: &[u8], i: usize) -> u64 {
let b2 = &b[i..i+8];
return b2[0] as u64 | (b2[1] as u64) << 8 | (b2[2] as u64) << 16 | (b2[3] as u64) << 24 |
(b2[4] as u64) << 32 | (b2[5] as u64) << 40 | (b2[6] as u64) << 48 | (b2[7] as u64) << 56
}
|
{
let mut x: u64 = 0;
let mut s: u32 = 0;
for i in 0..buf.len() {
let b = buf[i];
if b < 0x80 {
if i > 9 || i == 9 && b > 1 {
return (0, -1 * (i + 1) as isize)
}
return (x | ((b as u64) << s) as u64, (i + 1) as isize)
}
x = x | (((b & 0x7f) as u64) << s as u64);
s += 7;
}
return (0, 0)
}
|
bot.py
|
from discord.ext import commands
import discord
import sys
from pathlib import Path
import motor.motor_asyncio
from config import token, extension_dir
from utils.context import UnnamedContext
from utils.help import PaginatedHelpCommand
class UnnamedBot(commands.Bot):
def __init__(self, command_prefix, **options):
self.db_client = motor.motor_asyncio.AsyncIOMotorClient('localhost', 27017)
self.db = self.db_client['unnamed-bot']
super().__init__(command_prefix, **options)
async def on_ready(self):
print(f"\n{'#' * 40}"
f"\n{self.user.name}"
f"\nPython version: {sys.version}"
f"\nDiscord.py version: {discord.__version__}\n{'#' * 40}"
f"\nLogged in as: {self.user.name} (ID: {self.user.id})")
self.help_command = PaginatedHelpCommand()
def run(self, **kwargs):
if kwargs.get('load_all'):
self.load_all_extensions(self.get_all_extensions_from_dir())
super().run(token)
async def get_context(self, message, *, cls=None):
return await super().get_context(message, cls=UnnamedContext)
@staticmethod
def format_cog(path, replacements=(('/', '.'), ('\\', '.'), ('.py', ''))):
for replacement in replacements:
path = path.replace(*replacement)
return path
def get_all_extensions_from_dir(self, directory=extension_dir):
for cog in Path(directory).glob('**/*.py'):
cog_path = self.format_cog(str(cog))
yield cog_path
yield 'jishaku'
def
|
(self, extensions):
for extension in extensions:
try:
self.load_extension(extension)
print(f"Loaded {extension}")
except Exception as e:
print(f"Could'nt load {extension}. {e.__class__}: {e}")
|
load_all_extensions
|
inverse_gamma.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The InverseGamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
|
class InverseGamma(distribution.Distribution):
"""The `InverseGamma` distribution with parameter alpha and beta.
The parameters are the shape and inverse scale parameters alpha, beta.
The PDF of this distribution is:
```pdf(x) = (beta^alpha)/Gamma(alpha)(x^(-alpha-1))e^(-beta/x), x > 0```
and the CDF of this distribution is:
```cdf(x) = GammaInc(alpha, beta / x) / Gamma(alpha), x > 0```
where GammaInc is the upper incomplete Gamma function.
Examples:
```python
dist = InverseGamma(alpha=3.0, beta=2.0)
dist2 = InverseGamma(alpha=[3.0, 4.0], beta=[2.0, 3.0])
```
"""
def __init__(self,
alpha,
beta,
validate_args=False,
allow_nan_stats=True,
name="InverseGamma"):
"""Construct InverseGamma distributions with parameters `alpha` and `beta`.
The parameters `alpha` and `beta` must be shaped in a way that supports
broadcasting (e.g. `alpha + beta` is a valid operation).
Args:
alpha: Floating point tensor, the shape params of the
distribution(s).
alpha must contain only positive values.
beta: Floating point tensor, the scale params of the distribution(s).
beta must contain only positive values.
validate_args: `Boolean`, default `False`. Whether to assert that
`a > 0`, `b > 0`, and that `x > 0` in the methods `prob(x)` and
`log_prob(x)`. If `validate_args` is `False` and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to prepend to all ops created by this distribution.
Raises:
TypeError: if `alpha` and `beta` are different dtypes.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[alpha, beta]) as ns:
with ops.control_dependencies([
check_ops.assert_positive(alpha),
check_ops.assert_positive(beta),
] if validate_args else []):
self._alpha = array_ops.identity(alpha, name="alpha")
self._beta = array_ops.identity(beta, name="beta")
super(InverseGamma, self).__init__(
dtype=self._alpha.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
is_continuous=True,
is_reparameterized=False,
parameters=parameters,
graph_parents=[self._alpha, self._beta],
name=ns)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("alpha", "beta"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def alpha(self):
"""Shape parameter."""
return self._alpha
@property
def beta(self):
"""Scale parameter."""
return self._beta
def _batch_shape(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.alpha), array_ops.shape(self.beta))
def _get_batch_shape(self):
return array_ops.broadcast_static_shape(
self.alpha.get_shape(), self.beta.get_shape())
def _event_shape(self):
return constant_op.constant([], dtype=dtypes.int32)
def _get_event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
"""See the documentation for tf.random_gamma for more details."""
return 1. / random_ops.random_gamma([n], self.alpha, beta=self.beta,
dtype=self.dtype, seed=seed)
def _log_prob(self, x):
x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
self.validate_args else [], x)
return (self.alpha * math_ops.log(self.beta) -
math_ops.lgamma(self.alpha) -
(self.alpha + 1.) * math_ops.log(x) - self.beta / x)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return math_ops.log(self._cdf(x))
def _cdf(self, x):
x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
self.validate_args else [], x)
# Note that igammac returns the upper regularized incomplete gamma
# function Q(a, x), which is what we want for the CDF.
return math_ops.igammac(self.alpha, self.beta / x)
@distribution_util.AppendDocstring(
"""This is defined to be
```
entropy = alpha - log(beta) + log(Gamma(alpha))
+ (1-alpha)digamma(alpha)
```
where digamma(alpha) is the digamma function.""")
def _entropy(self):
return (self.alpha +
math_ops.log(self.beta) +
math_ops.lgamma(self.alpha) -
(1. + self.alpha) * math_ops.digamma(self.alpha))
@distribution_util.AppendDocstring(
"""The mean of an inverse gamma distribution is `beta / (alpha - 1)`,
when `alpha > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is
`False`, an exception will be raised rather than returning `NaN`""")
def _mean(self):
mean = self.beta / (self.alpha - 1.)
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where(
self.alpha > 1., mean,
array_ops.fill(self.batch_shape(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones((), self.dtype), self.alpha,
message="mean not defined for components of self.alpha <= 1"),
], mean)
@distribution_util.AppendDocstring(
"""Variance for inverse gamma is defined only for `alpha > 2`. If
`self.allow_nan_stats` is `False`, an exception will be raised rather
than returning `NaN`.""")
def _variance(self):
var = (math_ops.square(self.beta) /
(math_ops.square(self.alpha - 1.) * (self.alpha - 2.)))
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where(
self.alpha > 2., var,
array_ops.fill(self.batch_shape(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
constant_op.constant(2., dtype=self.dtype), self.alpha,
message="variance not defined for components of alpha <= 2"),
], var)
def _mode(self):
"""The mode of an inverse gamma distribution is `beta / (alpha + 1)`."""
return self.beta / (self.alpha + 1.)
class InverseGammaWithSoftplusAlphaBeta(InverseGamma):
"""Inverse Gamma with softplus applied to `alpha` and `beta`."""
def __init__(self,
alpha,
beta,
validate_args=False,
allow_nan_stats=True,
name="InverseGammaWithSoftplusAlphaBeta"):
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[alpha, beta]) as ns:
super(InverseGammaWithSoftplusAlphaBeta, self).__init__(
alpha=nn.softplus(alpha, name="softplus_alpha"),
beta=nn.softplus(beta, name="softplus_gamma"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
|
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
|
list-to-tree.js
|
/**
* Created by DenQ on 31.08.2015.
*/
var LTT, list, ltt;
LTT = (function() {
LTT.prototype.groupParent = [];
LTT.prototype.key_id = 'id';
LTT.prototype.key_parent = 'parent';
LTT.prototype.position = 'position';
LTT.prototype.options = {};
function LTT(list, options) {
this.list = list;
this.options = options != null ? options : {};
this.ParseOptions();
this.list = _.map(_.sortByOrder(this.list, [this.position, this.key_parent, this.key_id], ['asc', 'asc']));
this.groupParent = _.uniq(_.pluck(this.list, this.key_parent));
return this;
}
LTT.prototype.ParseOptions = function() {
if (this.options.key_id != null) {
this.key_id = this.options.key_id;
}
if (this.options.key_parent != null) {
this.key_parent = this.options.key_parent;
}
if (this.options.position != null){
this.position = this.options.position;
}
};
LTT.prototype.GetParentItems = function(parent) {
var item, result, _i, _len, _ref;
result = [];
_ref = this.list;
for (_i = 0, _len = _ref.length; _i < _len; _i++) {
item = _ref[_i];
if (item[this.key_parent] === parent) {
if(!item.children) item.children = [];
result.push(item);
}
}
return result;
};
LTT.prototype.GetItemById = function(id) {
var item, _i, _len, _ref;
_ref = this.list;
for (_i = 0, _len = _ref.length; _i < _len; _i++) {
item = _ref[_i];
if (item[this.key_id] === id) {
return item;
}
}
return false;
};
LTT.prototype.GetTree = function() {
var children, i, obj, parentId, result, _i, _j, _len, _len1, _ref;
result = [];
_ref = this.groupParent;
for (_i = 0, _len = _ref.length; _i < _len; _i++) {
parentId = _ref[_i];
obj = this.GetItemById(parentId);
children = this.GetParentItems(parentId);
if (obj === false) {
for (_j = 0, _len1 = children.length; _j < _len1; _j++) {
i = children[_j];
result.push(i);
}
|
} else {
obj.children = children;
}
}
return result;
};
return LTT;
})();
| |
test_server.rs
|
// Copyright 2021 Protocol Labs.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use futures::{channel::oneshot, Future, FutureExt, StreamExt};
use futures_timer::Delay;
use libp2p::{
development_transport,
identity::Keypair,
multiaddr::Protocol,
swarm::{AddressScore, Swarm, SwarmEvent},
Multiaddr, PeerId,
};
use libp2p_autonat::{
Behaviour, Config, Event, InboundProbeError, InboundProbeEvent, ResponseError,
};
use libp2p_core::{ConnectedPoint, Endpoint};
use libp2p_swarm::DialError;
use std::{num::NonZeroU32, time::Duration};
async fn init_swarm(config: Config) -> Swarm<Behaviour> {
let keypair = Keypair::generate_ed25519();
let local_id = PeerId::from_public_key(&keypair.public());
let transport = development_transport(keypair).await.unwrap();
let behaviour = Behaviour::new(local_id, config);
Swarm::new(transport, behaviour, local_id)
}
async fn init_server(config: Option<Config>) -> (Swarm<Behaviour>, PeerId, Multiaddr) {
let mut config = config.unwrap_or_default();
// Don't do any outbound probes.
config.boot_delay = Duration::from_secs(60);
let mut server = init_swarm(config).await;
let peer_id = *server.local_peer_id();
server
.listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap())
.unwrap();
let addr = loop {
match server.select_next_some().await {
SwarmEvent::NewListenAddr { address, .. } => break address,
_ => {}
};
};
(server, peer_id, addr)
}
async fn spawn_client(
listen: bool,
add_dummy_external_addr: bool,
server_id: PeerId,
server_addr: Multiaddr,
kill: oneshot::Receiver<()>,
) -> (PeerId, Option<Multiaddr>) {
let (tx, rx) = oneshot::channel();
async_std::task::spawn(async move {
let mut client = init_swarm(Config {
boot_delay: Duration::from_millis(100),
refresh_interval: Duration::from_millis(100),
retry_interval: Duration::from_millis(200),
throttle_server_period: Duration::ZERO,
..Default::default()
})
.await;
client
.behaviour_mut()
.add_server(server_id, Some(server_addr));
let peer_id = *client.local_peer_id();
let mut addr = None;
if listen {
client
.listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap())
.unwrap();
loop {
match client.select_next_some().await {
SwarmEvent::NewListenAddr { address, .. } => {
addr = Some(address);
break;
}
_ => {}
};
}
}
if add_dummy_external_addr {
let dummy_addr: Multiaddr = "/ip4/127.0.0.1/tcp/42".parse().unwrap();
client.add_external_address(dummy_addr, AddressScore::Infinite);
}
tx.send((peer_id, addr)).unwrap();
let mut kill = kill.fuse();
loop {
futures::select! {
_ = client.select_next_some() => {},
_ = kill => return,
}
}
});
rx.await.unwrap()
}
async fn next_event(swarm: &mut Swarm<Behaviour>) -> Event {
loop {
match swarm.select_next_some().await {
SwarmEvent::Behaviour(event) => {
break event;
}
_ => {}
}
}
}
async fn run_test_with_timeout(test: impl Future) {
futures::select! {
_ = test.fuse() => {},
_ = Delay::new(Duration::from_secs(60)).fuse() => panic!("test timed out")
}
}
#[async_std::test]
async fn test_dial_back() {
let test = async {
let (mut server, server_id, server_addr) = init_server(None).await;
let (_handle, rx) = oneshot::channel();
let (client_id, client_addr) = spawn_client(true, false, server_id, server_addr, rx).await;
let client_port = client_addr
.unwrap()
.into_iter()
.find_map(|p| match p {
Protocol::Tcp(port) => Some(port),
_ => None,
})
.unwrap();
let observed_client_ip = loop {
match server.select_next_some().await {
SwarmEvent::ConnectionEstablished {
peer_id,
endpoint:
ConnectedPoint::Listener {
mut send_back_addr, ..
},
..
} => {
assert_eq!(peer_id, client_id);
let observed_client_ip = loop {
match send_back_addr.pop().unwrap() {
Protocol::Ip4(ip4_addr) => break ip4_addr,
_ => {}
}
};
break observed_client_ip;
}
SwarmEvent::IncomingConnection { .. }
| SwarmEvent::NewListenAddr { .. }
| SwarmEvent::ExpiredListenAddr { .. } => {}
other => panic!("Unexpected swarm event: {:?}.", other),
|
let expect_addr = Multiaddr::empty()
.with(Protocol::Ip4(observed_client_ip))
.with(Protocol::Tcp(client_port))
.with(Protocol::P2p(client_id.into()));
let request_probe_id = match next_event(&mut server).await {
Event::InboundProbe(InboundProbeEvent::Request {
peer,
addresses,
probe_id,
}) => {
assert_eq!(peer, client_id);
assert_eq!(addresses.len(), 1);
assert_eq!(addresses[0], expect_addr);
probe_id
}
other => panic!("Unexpected behaviour event: {:?}.", other),
};
loop {
match server.select_next_some().await {
SwarmEvent::ConnectionEstablished {
peer_id,
endpoint:
ConnectedPoint::Dialer {
address,
role_override: Endpoint::Dialer,
},
num_established,
concurrent_dial_errors,
} => {
assert_eq!(peer_id, client_id);
assert_eq!(num_established, NonZeroU32::new(2).unwrap());
assert!(concurrent_dial_errors.unwrap().is_empty());
assert_eq!(address, expect_addr);
break;
}
SwarmEvent::Dialing(peer) => assert_eq!(peer, client_id),
SwarmEvent::NewListenAddr { .. } | SwarmEvent::ExpiredListenAddr { .. } => {}
other => panic!("Unexpected swarm event: {:?}.", other),
}
}
match next_event(&mut server).await {
Event::InboundProbe(InboundProbeEvent::Response {
probe_id,
peer,
address,
}) => {
assert_eq!(probe_id, request_probe_id);
assert_eq!(peer, client_id);
assert_eq!(address, expect_addr);
}
other => panic!("Unexpected behaviour event: {:?}.", other),
}
drop(_handle);
};
run_test_with_timeout(test).await;
}
#[async_std::test]
async fn test_dial_error() {
let test = async {
let (mut server, server_id, server_addr) = init_server(None).await;
let (_handle, rx) = oneshot::channel();
let (client_id, _) = spawn_client(false, true, server_id, server_addr, rx).await;
let request_probe_id = match next_event(&mut server).await {
Event::InboundProbe(InboundProbeEvent::Request { peer, probe_id, .. }) => {
assert_eq!(peer, client_id);
probe_id
}
other => panic!("Unexpected behaviour event: {:?}.", other),
};
loop {
match server.select_next_some().await {
SwarmEvent::OutgoingConnectionError { peer_id, error } => {
assert_eq!(peer_id.unwrap(), client_id);
assert!(matches!(error, DialError::Transport(_)));
break;
}
SwarmEvent::Dialing(peer) => assert_eq!(peer, client_id),
SwarmEvent::NewListenAddr { .. } | SwarmEvent::ExpiredListenAddr { .. } => {}
other => panic!("Unexpected swarm event: {:?}.", other),
}
}
match next_event(&mut server).await {
Event::InboundProbe(InboundProbeEvent::Error {
probe_id,
peer,
error,
}) => {
assert_eq!(probe_id, request_probe_id);
assert_eq!(peer, client_id);
assert_eq!(error, InboundProbeError::Response(ResponseError::DialError));
}
other => panic!("Unexpected behaviour event: {:?}.", other),
}
drop(_handle);
};
run_test_with_timeout(test).await;
}
#[async_std::test]
async fn test_throttle_global_max() {
let test = async {
let (mut server, server_id, server_addr) = init_server(Some(Config {
throttle_clients_global_max: 1,
throttle_clients_period: Duration::from_secs(60),
..Default::default()
}))
.await;
let mut _handles = Vec::new();
for _ in 0..2 {
let (_handle, rx) = oneshot::channel();
spawn_client(true, false, server_id, server_addr.clone(), rx).await;
_handles.push(_handle);
}
let (first_probe_id, first_peer_id) = match next_event(&mut server).await {
Event::InboundProbe(InboundProbeEvent::Request { peer, probe_id, .. }) => {
(probe_id, peer)
}
other => panic!("Unexpected behaviour event: {:?}.", other),
};
loop {
match next_event(&mut server).await {
Event::InboundProbe(InboundProbeEvent::Error {
peer,
probe_id,
error: InboundProbeError::Response(ResponseError::DialRefused),
}) => {
assert_ne!(first_peer_id, peer);
assert_ne!(first_probe_id, probe_id);
break;
}
Event::InboundProbe(InboundProbeEvent::Response { peer, probe_id, .. }) => {
assert_eq!(first_peer_id, peer);
assert_eq!(first_probe_id, probe_id);
}
other => panic!("Unexpected behaviour event: {:?}.", other),
};
}
drop(_handles);
};
run_test_with_timeout(test).await;
}
#[async_std::test]
async fn test_throttle_peer_max() {
let test = async {
let (mut server, server_id, server_addr) = init_server(Some(Config {
throttle_clients_peer_max: 1,
throttle_clients_period: Duration::from_secs(60),
..Default::default()
}))
.await;
let (_handle, rx) = oneshot::channel();
let (client_id, _) = spawn_client(true, false, server_id, server_addr.clone(), rx).await;
let first_probe_id = match next_event(&mut server).await {
Event::InboundProbe(InboundProbeEvent::Request { peer, probe_id, .. }) => {
assert_eq!(client_id, peer);
probe_id
}
other => panic!("Unexpected behaviour event: {:?}.", other),
};
match next_event(&mut server).await {
Event::InboundProbe(InboundProbeEvent::Response { peer, probe_id, .. }) => {
assert_eq!(peer, client_id);
assert_eq!(probe_id, first_probe_id);
}
other => panic!("Unexpected behaviour event: {:?}.", other),
}
match next_event(&mut server).await {
Event::InboundProbe(InboundProbeEvent::Error {
peer,
probe_id,
error,
}) => {
assert_eq!(client_id, peer);
assert_ne!(first_probe_id, probe_id);
assert_eq!(
error,
InboundProbeError::Response(ResponseError::DialRefused)
)
}
other => panic!("Unexpected behaviour event: {:?}.", other),
};
drop(_handle);
};
run_test_with_timeout(test).await;
}
#[async_std::test]
async fn test_dial_multiple_addr() {
let test = async {
let (mut server, server_id, server_addr) = init_server(Some(Config {
throttle_clients_peer_max: 1,
throttle_clients_period: Duration::from_secs(60),
..Default::default()
}))
.await;
let (_handle, rx) = oneshot::channel();
let (client_id, _) = spawn_client(true, true, server_id, server_addr.clone(), rx).await;
let dial_addresses = match next_event(&mut server).await {
Event::InboundProbe(InboundProbeEvent::Request {
peer, addresses, ..
}) => {
assert_eq!(addresses.len(), 2);
assert_eq!(client_id, peer);
addresses
}
other => panic!("Unexpected behaviour event: {:?}.", other),
};
loop {
match server.select_next_some().await {
SwarmEvent::ConnectionEstablished {
peer_id,
endpoint:
ConnectedPoint::Dialer {
address,
role_override: Endpoint::Dialer,
},
concurrent_dial_errors,
..
} => {
assert_eq!(peer_id, client_id);
let dial_errors = concurrent_dial_errors.unwrap();
assert_eq!(dial_errors.len(), 1);
assert_eq!(dial_errors[0].0, dial_addresses[0]);
assert_eq!(address, dial_addresses[1]);
break;
}
SwarmEvent::Dialing(peer) => assert_eq!(peer, client_id),
SwarmEvent::NewListenAddr { .. } | SwarmEvent::ExpiredListenAddr { .. } => {}
other => panic!("Unexpected swarm event: {:?}.", other),
}
}
};
run_test_with_timeout(test).await;
}
|
}
};
|
numpy_generator.py
|
import itertools
import numpy as np
from brian2.parsing.bast import brian_dtype_from_dtype
from brian2.parsing.rendering import NumpyNodeRenderer
from brian2.core.functions import DEFAULT_FUNCTIONS, timestep
from brian2.core.variables import ArrayVariable
from brian2.utils.stringtools import get_identifiers, word_substitute, indent
from brian2.utils.logger import get_logger
from .base import CodeGenerator
__all__ = ['NumpyCodeGenerator']
logger = get_logger(__name__)
class VectorisationError(Exception):
pass
class NumpyCodeGenerator(CodeGenerator):
"""
Numpy language
Essentially Python but vectorised.
"""
class_name = 'numpy'
_use_ufunc_at_vectorisation = True # allow this to be off for testing only
def translate_expression(self, expr):
expr = word_substitute(expr, self.func_name_replacements)
return NumpyNodeRenderer(auto_vectorise=self.auto_vectorise).render_expr(expr, self.variables).strip()
def translate_statement(self, statement):
# TODO: optimisation, translate arithmetic to a sequence of inplace
# operations like a=b+c -> add(b, c, a)
var, op, expr, comment = (statement.var, statement.op,
statement.expr, statement.comment)
if op == ':=':
op = '='
# For numpy we replace complex expressions involving a single boolean variable into a
# where(boolvar, expr_if_true, expr_if_false)
if (statement.used_boolean_variables is not None and len(statement.used_boolean_variables)==1
and brian_dtype_from_dtype(statement.dtype)=='float'
and statement.complexity_std>sum(statement.complexities.values())):
used_boolvars = statement.used_boolean_variables
bool_simp = statement.boolean_simplified_expressions
boolvar = used_boolvars[0]
for bool_assigns, simp_expr in bool_simp.items():
_, boolval = bool_assigns[0]
if boolval:
expr_true = simp_expr
else:
expr_false = simp_expr
code = f'{var} {op} _numpy.where({boolvar}, {expr_true}, {expr_false})'
else:
code = f"{var} {op} {self.translate_expression(expr)}"
if len(comment):
code += f" # {comment}"
return code
def ufunc_at_vectorisation(self, statement, variables, indices,
conditional_write_vars, created_vars, used_variables):
if not self._use_ufunc_at_vectorisation:
raise VectorisationError()
# Avoids circular import
from brian2.devices.device import device
# See https://github.com/brian-team/brian2/pull/531 for explanation
used = set(get_identifiers(statement.expr))
used = used.intersection(k for k in list(variables.keys()) if k in indices and indices[k]!='_idx')
used_variables.update(used)
if statement.var in used_variables:
raise VectorisationError()
expr = NumpyNodeRenderer(auto_vectorise=self.auto_vectorise).render_expr(statement.expr)
if statement.op == ':=' or indices[statement.var] == '_idx' or not statement.inplace:
if statement.op == ':=':
op = '='
else:
op = statement.op
line = f'{statement.var} {op} {expr}'
elif statement.inplace:
if statement.op == '+=':
ufunc_name = '_numpy.add'
elif statement.op == '*=':
ufunc_name = '_numpy.multiply'
elif statement.op == '/=':
ufunc_name = '_numpy.divide'
elif statement.op == '-=':
ufunc_name = '_numpy.subtract'
else:
raise VectorisationError()
array_name = device.get_array_name(variables[statement.var])
idx = indices[statement.var]
line = f'{ufunc_name}.at({array_name}, {idx}, {expr})'
line = self.conditional_write(line, statement, variables,
conditional_write_vars=conditional_write_vars,
created_vars=created_vars)
else:
raise VectorisationError()
if len(statement.comment):
line += f" # {statement.comment}"
return line
def vectorise_code(self, statements, variables, variable_indices, index='_idx'):
created_vars = {stmt.var for stmt in statements if stmt.op == ':='}
try:
lines = []
used_variables = set()
for statement in statements:
lines.append(f'# Abstract code: {statement.var} {statement.op} {statement.expr}')
# We treat every statement individually with its own read and write code
# to be on the safe side
read, write, indices, conditional_write_vars = self.arrays_helper([statement])
# We make sure that we only add code to `lines` after it went
# through completely
ufunc_lines = []
# No need to load a variable if it is only in read because of
# the in-place operation
if (statement.inplace and
variable_indices[statement.var] != '_idx' and
statement.var not in get_identifiers(statement.expr)):
read = read - {statement.var}
ufunc_lines.extend(self.read_arrays(read, write, indices,
variables, variable_indices))
ufunc_lines.append(self.ufunc_at_vectorisation(statement,
variables,
variable_indices,
conditional_write_vars,
created_vars,
used_variables,
))
# Do not write back such values, the ufuncs have modified the
# underlying array already
if statement.inplace and variable_indices[statement.var] != '_idx':
write = write - {statement.var}
ufunc_lines.extend(self.write_arrays([statement], read, write,
variables,
variable_indices))
lines.extend(ufunc_lines)
except VectorisationError:
if self._use_ufunc_at_vectorisation:
logger.info("Failed to vectorise code, falling back on Python loop: note that "
"this will be very slow! Switch to another code generation target for "
"best performance (e.g. cython). First line is: "+str(statements[0]),
once=True)
lines = []
lines.extend(['_full_idx = _idx',
'for _idx in _full_idx:',
' _vectorisation_idx = _idx'
])
read, write, indices, conditional_write_vars = self.arrays_helper(statements)
lines.extend(indent(code) for code in
self.read_arrays(read, write, indices,
variables, variable_indices))
for statement in statements:
line = self.translate_statement(statement)
if statement.var in conditional_write_vars:
lines.append(indent(f'if {conditional_write_vars[statement.var]}:'))
lines.append(indent(line, 2))
else:
lines.append(indent(line))
lines.extend(indent(code) for code in
self.write_arrays(statements, read, write,
variables, variable_indices))
return lines
def read_arrays(self, read, write, indices, variables, variable_indices):
# index and read arrays (index arrays first)
lines = []
for varname in itertools.chain(indices, read):
var = variables[varname]
index = variable_indices[varname]
# if index in iterate_all:
# line = '{varname} = {array_name}'
# else:
# line = '{varname} = {array_name}.take({index})'
# line = line.format(varname=varname, array_name=self.get_array_name(var), index=index)
line = f"{varname} = {self.get_array_name(var)}"
if not index in self.iterate_all:
line += f"[{index}]"
elif varname in write:
# avoid potential issues with aliased variables, see github #259
line += '.copy()'
lines.append(line)
return lines
def write_arrays(self, statements, read, write, variables, variable_indices):
# write arrays
|
def conditional_write(self, line, stmt, variables, conditional_write_vars,
created_vars):
if stmt.var in conditional_write_vars:
subs = {}
index = conditional_write_vars[stmt.var]
# we replace all var with var[index], but actually we use this repl_string first because
# we don't want to end up with lines like x[not_refractory[not_refractory]] when
# multiple substitution passes are invoked
repl_string = '#$(@#&$@$*U#@)$@(#' # this string shouldn't occur anywhere I hope! :)
for varname, var in list(variables.items()):
if isinstance(var, ArrayVariable) and not var.scalar:
subs[varname] = f"{varname}[{repl_string}]"
# all newly created vars are arrays and will need indexing
for varname in created_vars:
subs[varname] = f"{varname}[{repl_string}]"
# Also index _vectorisation_idx so that e.g. rand() works correctly
subs['_vectorisation_idx'] = f"_vectorisation_idx[{repl_string}]"
line = word_substitute(line, subs)
line = line.replace(repl_string, index)
return line
def translate_one_statement_sequence(self, statements, scalar=False):
variables = self.variables
variable_indices = self.variable_indices
read, write, indices, conditional_write_vars = self.arrays_helper(statements)
lines = []
all_unique = not self.has_repeated_indices(statements)
if scalar or all_unique:
# Simple translation
lines.extend(self.read_arrays(read, write, indices, variables,
variable_indices))
created_vars = {stmt.var for stmt in statements if stmt.op == ':='}
for stmt in statements:
line = self.translate_statement(stmt)
line = self.conditional_write(line, stmt, variables,
conditional_write_vars,
created_vars)
lines.append(line)
lines.extend(self.write_arrays(statements, read, write, variables,
variable_indices))
else:
# More complex translation to deal with repeated indices
lines.extend(self.vectorise_code(statements, variables,
variable_indices))
return lines
def determine_keywords(self):
try:
import scipy
scipy_available = True
except ImportError:
scipy_available = False
return {'_scipy_available': scipy_available}
################################################################################
# Implement functions
################################################################################
# Functions that exist under the same name in numpy
for func_name, func in [('sin', np.sin), ('cos', np.cos), ('tan', np.tan),
('sinh', np.sinh), ('cosh', np.cosh), ('tanh', np.tanh),
('exp', np.exp), ('log', np.log), ('log10', np.log10),
('sqrt', np.sqrt), ('arcsin', np.arcsin),
('arccos', np.arccos), ('arctan', np.arctan),
('abs', np.abs), ('sign', np.sign)]:
DEFAULT_FUNCTIONS[func_name].implementations.add_implementation(NumpyCodeGenerator,
code=func)
# Functions that are implemented in a somewhat special way
def randn_func(vectorisation_idx):
try:
N = len(vectorisation_idx)
return np.random.randn(N)
except TypeError:
# scalar value
return np.random.randn()
def rand_func(vectorisation_idx):
try:
N = len(vectorisation_idx)
return np.random.rand(N)
except TypeError:
# scalar value
return np.random.rand()
def poisson_func(lam, vectorisation_idx):
try:
N = len(vectorisation_idx)
return np.random.poisson(lam, size=N)
except TypeError:
# scalar value
return np.random.poisson(lam)
DEFAULT_FUNCTIONS['randn'].implementations.add_implementation(NumpyCodeGenerator,
code=randn_func)
DEFAULT_FUNCTIONS['rand'].implementations.add_implementation(NumpyCodeGenerator,
code=rand_func)
DEFAULT_FUNCTIONS['poisson'].implementations.add_implementation(NumpyCodeGenerator,
code=poisson_func)
clip_func = lambda array, a_min, a_max: np.clip(array, a_min, a_max)
DEFAULT_FUNCTIONS['clip'].implementations.add_implementation(NumpyCodeGenerator,
code=clip_func)
int_func = lambda value: np.int32(value)
DEFAULT_FUNCTIONS['int'].implementations.add_implementation(NumpyCodeGenerator,
code=int_func)
ceil_func = lambda value: np.int32(np.ceil(value))
DEFAULT_FUNCTIONS['ceil'].implementations.add_implementation(NumpyCodeGenerator,
code=ceil_func)
floor_func = lambda value: np.int32(np.floor(value))
DEFAULT_FUNCTIONS['floor'].implementations.add_implementation(NumpyCodeGenerator,
code=floor_func)
# We need to explicitly add an implementation for the timestep function,
# otherwise Brian would *add* units during simulation, thinking that the
# timestep function would not work correctly otherwise. This would slow the
# function down significantly.
DEFAULT_FUNCTIONS['timestep'].implementations.add_implementation(NumpyCodeGenerator,
code=timestep)
|
lines = []
for varname in write:
var = variables[varname]
index_var = variable_indices[varname]
# check if all operations were inplace and we're operating on the
# whole vector, if so we don't need to write the array back
if index_var not in self.iterate_all or varname in read:
all_inplace = False
else:
all_inplace = True
for stmt in statements:
if stmt.var == varname and not stmt.inplace:
all_inplace = False
break
if not all_inplace:
line = self.get_array_name(var)
if index_var in self.iterate_all:
line = f"{line}[:]"
else:
line = f"{line}[{index_var}]"
line = f"{line} = {varname}"
lines.append(line)
return lines
|
wordlist.py
|
wl = ["aah",
"aaron",
"aba",
"ababa",
"aback",
"abase",
"abash",
"abate",
"abbas",
"abbe",
"abbey",
"abbot",
"abbott",
"abc",
"abe",
"abed",
"abel",
"abet",
"abide",
"abject",
"ablaze",
"able",
"abner",
"abo",
"abode",
"abort",
"about",
"above",
"abrade",
"abram",
"absorb",
"abuse",
"abut",
"abyss",
"acadia",
"accra",
"accrue",
"ace",
"acetic",
"ache",
"acid",
"acidic",
"acm",
"acme",
"acorn",
"acre",
"acrid",
"act",
"acton",
"actor",
"acts",
"acuity",
"acute",
"ada",
"adage",
"adagio",
"adair",
"adam",
"adams",
"adapt",
"add",
"added",
"addict",
"addis",
"addle",
"adele",
"aden",
"adept",
"adieu",
"adjust",
"adler",
"admit",
"admix",
"ado",
"adobe",
"adonis",
"adopt",
"adore",
"adorn",
"adult",
"advent",
"advert",
"advise",
"aegis",
"aeneid",
"afar",
"affair",
"affine",
"affix",
"afire",
"afoot",
"afraid",
"africa",
"afro",
"aft",
"again",
"agate",
"agave",
"age",
"age",
"agenda",
"agent",
"agile",
"aging",
"agnes",
"agnew",
"ago",
"agone",
"agony",
"agree",
"ague",
"agway",
"ahead",
"ahem",
"ahoy",
"aid",
"aida",
"aide",
"aides",
"aiken",
"ail",
"aile",
"aim",
"ainu",
"air",
"aires",
"airman",
"airway",
"airy",
"aisle",
"ajar",
"ajax",
"akers",
"akin",
"akron",
"ala",
"alai",
"alamo",
"alan",
"alarm",
"alaska",
"alb",
"alba",
"album",
"alcoa",
"alden",
"alder",
"ale",
"alec",
"aleck",
"aleph",
"alert",
"alex",
"alexei",
"alga",
"algae",
"algal",
"alger",
"algol",
"ali",
"alia",
"alias",
"alibi",
"alice",
"alien",
"alight",
"align",
"alike",
"alive",
"all",
"allah",
"allan",
"allay",
"allen",
"alley",
"allied",
"allis",
"allot",
"allow",
"alloy",
"allure",
"ally",
"allyl",
"allyn",
"alma",
"almost",
"aloe",
"aloft",
"aloha",
"alone",
"along",
"aloof",
"aloud",
"alp",
"alpha",
"alps",
"also",
"alsop",
"altair",
"altar",
"alter",
"alto",
"alton",
"alum",
"alumni",
"alva",
"alvin",
"alway",
"ama",
"amass",
"amaze",
"amber",
"amble",
"ambush",
"amen",
"amend",
"ames",
"ami",
"amid",
"amide",
"amigo",
"amino",
"amiss",
"amity",
"amman",
"ammo",
"amoco",
"amok",
"among",
"amort",
"amos",
"amp",
"ampere",
"ampex",
"ample",
"amply",
"amra",
"amulet",
"amuse",
"amy",
"ana",
"and",
"andes",
"andre",
"andrew",
"andy",
"anent",
"anew",
"angel",
"angelo",
"anger",
"angie",
"angle",
"anglo",
"angola",
"angry",
"angst",
"angus",
"ani",
"anion",
"anise",
"anita",
"ankle",
"ann",
"anna",
"annal",
"anne",
"annex",
"annie",
"annoy",
"annul",
"annuli",
"annum",
"anode",
"ansi",
"answer",
"ant",
"ante",
"anti",
"antic",
"anton",
"anus",
"anvil",
"any",
"anyhow",
"anyway",
"aok",
"aorta",
"apart",
"apathy",
"ape",
"apex",
"aphid",
"aplomb",
"appeal",
"append",
"apple",
"apply",
"april",
"apron",
"apse",
"apt",
"aqua",
"arab",
"araby",
"arc",
"arcana",
"arch",
"archer",
"arden",
"ardent",
"are",
"area",
"arena",
"ares",
"argive",
"argo",
"argon",
"argot",
"argue",
"argus",
"arhat",
"arid",
"aries",
"arise",
"ark",
"arlen",
"arlene",
"arm",
"armco",
"army",
"arnold",
"aroma",
"arose",
"arpa",
"array",
"arrear",
"arrow",
"arson",
"art",
"artery",
"arthur",
"artie",
"arty",
"aruba",
"arum",
"aryl",
"ascend",
"ash",
"ashen",
"asher",
"ashley",
"ashy",
"asia",
"aside",
"ask",
"askew",
"asleep",
"aspen",
"aspire",
"ass",
"assai",
"assam",
"assay",
"asset",
"assort",
"assure",
"aster",
"astm",
"astor",
"astral",
"ate",
"athens",
"atlas",
"atom",
"atomic",
"atone",
"atop",
"attic",
"attire",
"aubrey",
"audio",
"audit",
"aug",
"auger",
"augur",
"august",
"auk",
"aunt",
"aura",
"aural",
"auric",
"austin",
"auto",
"autumn",
"avail",
"ave",
"aver",
"avert",
"avery",
"aviate",
"avid",
"avis",
"aviv",
"avoid",
"avon",
"avow",
"await",
"awake",
"award",
"aware",
"awash",
"away",
"awe",
"awful",
"awl",
"awn",
"awoke",
"awry",
"axe",
"axes",
"axial",
"axiom",
"axis",
"axle",
"axon",
"aye",
"ayers",
"aztec",
"azure",
"babe",
"babel",
"baby",
"bach",
"back",
"backup",
"bacon",
"bad",
"bade",
"baden",
"badge",
"baffle",
"bag",
"baggy",
"bah",
"bahama",
"bail",
"baird",
"bait",
"bake",
"baku",
"bald",
"baldy",
"bale",
"bali",
"balk",
"balkan",
"balky",
"ball",
"balled",
"ballot",
"balm",
"balmy",
"balsa",
"bam",
"bambi",
"ban",
"banal",
"band",
"bandit",
"bandy",
"bane",
"bang",
"banish",
"banjo",
"bank",
"banks",
"bantu",
"bar",
"barb",
"bard",
"bare",
"barfly",
"barge",
"bark",
"barley",
"barn",
"barnes",
"baron",
"barony",
"barr",
"barre",
"barry",
"barter",
"barth",
"barton",
"basal",
"base",
"basel",
"bash",
"basic",
"basil",
"basin",
"basis",
"bask",
"bass",
"bassi",
"basso",
"baste",
"bat",
"batch",
"bate",
"bater",
"bates",
"bath",
"bathe",
"batik",
"baton",
"bator",
"batt",
"bauble",
"baud",
"bauer",
"bawd",
"bawdy",
"bawl",
"baxter",
"bay",
"bayda",
"bayed",
"bayou",
"bazaar",
"bbb",
"bbbb",
"bcd",
"beach",
"bead",
"beady",
"beak",
"beam",
"bean",
"bear",
"beard",
"beast",
"beat",
"beau",
"beauty",
"beaux",
"bebop",
"becalm",
"beck",
"becker",
"becky",
"bed",
"bedim",
"bee",
"beebe",
"beech",
"beef",
"beefy",
"been",
"beep",
"beer",
"beet",
"befall",
"befit",
"befog",
"beg",
"began",
"beget",
"beggar",
"begin",
"begun",
"behind",
"beige",
"being",
"beirut",
"bel",
"bela",
"belch",
"belfry",
"belie",
"bell",
"bella",
"belle",
"belly",
"below",
"belt",
"bema",
"beman",
"bemoan",
"ben",
"bench",
"bend",
"bender",
"benny",
"bent",
"benz",
"berea",
"bereft",
"beret",
"berg",
"berlin",
"bern",
"berne",
"bernet",
"berra",
"berry",
"bert",
"berth",
"beryl",
"beset",
"bess",
"bessel",
"best",
"bestir",
"bet",
"beta",
"betel",
"beth",
"bethel",
"betsy",
"bette",
"betty",
"bevel",
"bevy",
"beware",
"bey",
"bezel",
"bhoy",
"bias",
"bib",
"bibb",
"bible",
"bicep",
"biceps",
"bid",
"biddy",
"bide",
"bien",
"big",
"biggs",
"bigot",
"bile",
"bilge",
"bilk",
"bill",
"billow",
"billy",
"bin",
"binary",
"bind",
"bing",
"binge",
"bingle",
"bini",
"biota",
"birch",
"bird",
"birdie",
"birth",
"bison",
"bisque",
"bit",
"bitch",
"bite",
"bitt",
"bitten",
"biz",
"bizet",
"blab",
"black",
"blade",
"blair",
"blake",
"blame",
"blanc",
"bland",
"blank",
"blare",
"blast",
"blat",
"blatz",
"blaze",
"bleak",
"bleat",
"bled",
"bleed",
"blend",
"bless",
"blest",
"blew",
"blimp",
"blind",
"blink",
"blinn",
"blip",
"bliss",
"blithe",
"blitz",
"bloat",
"blob",
"bloc",
"bloch",
"block",
"bloke",
"blond",
"blonde",
"blood",
"bloom",
"bloop",
"blot",
"blotch",
"blow",
"blown",
"blue",
"bluet",
"bluff",
"blum",
"blunt",
"blur",
"blurt",
"blush",
"blvd",
"blythe",
"bmw",
"boa",
"boar",
"board",
"boast",
"boat",
"bob",
"bobbin",
"bobby",
"bobcat",
"boca",
"bock",
"bode",
"body",
"bog",
"bogey",
"boggy",
"bogus",
"bogy",
"bohr",
"boil",
"bois",
"boise",
"bold",
"bole",
"bolo",
"bolt",
"bomb",
"bombay",
"bon",
"bona",
"bond",
"bone",
"bong",
"bongo",
"bonn",
"bonus",
"bony",
"bonze",
"boo",
"booby",
"boogie",
"book",
"booky",
"boom",
"boon",
"boone",
"boor",
"boost",
"boot",
"booth",
"booty",
"booze",
"bop",
"borax",
"border",
"bore",
"borg",
"boric",
"boris",
"born",
"borne",
"borneo",
"boron",
"bosch",
"bose",
"bosom",
"boson",
"boss",
"boston",
"botch",
"both",
"bottle",
"bough",
"bouncy",
"bound",
"bourn",
"bout",
"bovine",
"bow",
"bowel",
"bowen",
"bowie",
"bowl",
"box",
"boxy",
"boy",
"boyar",
"boyce",
"boyd",
"boyle",
"brace",
"bract",
"brad",
"brady",
"brae",
"brag",
"bragg",
"braid",
"brain",
"brainy",
"brake",
"bran",
"brand",
"brandt",
"brant",
"brash",
"brass",
"brassy",
"braun",
"brave",
"bravo",
"brawl",
"bray",
"bread",
"break",
"bream",
"breath",
"bred",
"breed",
"breeze",
"bremen",
"brent",
"brest",
"brett",
"breve",
"brew",
"brian",
"briar",
"bribe",
"brice",
"brick",
"bride",
"brief",
"brig",
"briggs",
"brim",
"brine",
"bring",
"brink",
"briny",
"brisk",
"broad",
"brock",
"broil",
"broke",
"broken",
"bronx",
"brood",
"brook",
"brooke",
"broom",
"broth",
"brow",
"brown",
"browse",
"bruce",
"bruit",
"brunch",
"bruno",
"brunt",
"brush",
"brute",
"bryan",
"bryant",
"bryce",
"bryn",
"bstj",
"btl",
"bub",
"buck",
"bud",
"budd",
"buddy",
"budge",
"buena",
"buenos",
"buff",
"bug",
"buggy",
"bugle",
"buick",
"build",
"built",
"bulb",
"bulge",
"bulk",
"bulky",
"bull",
"bully",
"bum",
"bump",
"bun",
"bunch",
"bundy",
"bunk",
"bunny",
"bunt",
"bunyan",
"buoy",
"burch",
"bureau",
"buret",
"burg",
"buried",
"burke",
"burl",
"burly",
"burma",
"burn",
"burnt",
"burp",
"burr",
"burro",
"burst",
"burt",
"burton",
"burtt",
"bury",
"bus",
"busch",
"bush",
"bushel",
"bushy",
"buss",
"bust",
"busy",
"but",
"butane",
"butch",
"buteo",
"butt",
"butte",
"butyl",
"buxom",
"buy",
"buyer",
"buzz",
"buzzy",
"bye",
"byers",
"bylaw",
"byline",
"byrd",
"byrne",
"byron",
"byte",
"byway",
"byword",
"cab",
"cabal",
"cabin",
"cable",
"cabot",
"cacao",
"cache",
"cacm",
"cacti",
"caddy",
"cadent",
"cadet",
"cadre",
"cady",
"cafe",
"cage",
"cagey",
"cahill",
"caiman",
"cain",
"caine",
"cairn",
"cairo",
"cake",
"cal",
"calder",
"caleb",
"calf",
"call",
"calla",
"callus",
"calm",
"calve",
"cam",
"camber",
"came",
"camel",
"cameo",
"camp",
"can",
"canal",
"canary",
"cancer",
"candle",
"candy",
"cane",
"canis",
"canna",
"cannot",
"canny",
"canoe",
"canon",
"canopy",
"cant",
"canto",
"canton",
"cap",
"cape",
"caper",
"capo",
"car",
"carbon",
"card",
"care",
"caress",
"caret",
"carey",
"cargo",
"carib",
"carl",
"carla",
"carlo",
"carne",
"carob",
"carol",
"carp",
"carpet",
"carr",
"carrie",
"carry",
"carson",
"cart",
"carte",
"caruso",
"carve",
"case",
"casey",
"cash",
"cashew",
"cask",
"casket",
"cast",
"caste",
"cat",
"catch",
"cater",
"cathy",
"catkin",
"catsup",
"cauchy",
"caulk",
"cause",
"cave",
"cavern",
"cavil",
"cavort",
"caw",
"cayuga",
"cbs",
"ccc",
"cccc",
"cdc",
"cease",
"cecil",
"cedar",
"cede",
"ceil",
"celia",
"cell",
"census",
"cent",
"ceres",
"cern",
"cetera",
"cetus",
"chad",
"chafe",
"chaff",
"chai",
"chain",
"chair",
"chalk",
"champ",
"chance",
"chang",
"chant",
"chao",
"chaos",
"chap",
"chapel",
"char",
"chard",
"charm",
"chart",
"chase",
"chasm",
"chaste",
"chat",
"chaw",
"cheap",
"cheat",
"check",
"cheek",
"cheeky",
"cheer",
"chef",
"chen",
"chert",
"cherub",
"chess",
"chest",
"chevy",
"chew",
"chi",
"chic",
"chick",
"chide",
"chief",
"child",
"chile",
"chili",
"chill",
"chilly",
"chime",
"chin",
"china",
"chine",
"chink",
"chip",
"chirp",
"chisel",
"chit",
"chive",
"chock",
"choir",
"choke",
"chomp",
"chop",
"chopin",
"choral",
"chord",
"chore",
"chose",
"chosen",
"chou",
"chow",
"chris",
"chub",
"chuck",
"chuff",
"chug",
"chum",
"chump",
"chunk",
"churn",
"chute",
"cia",
"cicada",
"cider",
"cigar",
"cilia",
"cinch",
"cindy",
"cipher",
"circa",
"circe",
"cite",
"citrus",
"city",
"civet",
"civic",
"civil",
"clad",
"claim",
"clam",
"clammy",
"clamp",
"clan",
"clang",
"clank",
"clap",
"clara",
"clare",
"clark",
"clarke",
"clash",
"clasp",
"class",
"claus",
"clause",
"claw",
"clay",
"clean",
"clear",
"cleat",
"cleft",
"clerk",
"cliche",
"click",
"cliff",
"climb",
"clime",
"cling",
"clink",
"clint",
"clio",
"clip",
"clive",
"cloak",
"clock",
"clod",
"clog",
"clomp",
"clone",
"close",
"closet",
"clot",
"cloth",
"cloud",
"clout",
"clove",
"clown",
"cloy",
"club",
"cluck",
"clue",
"cluj",
"clump",
"clumsy",
"clung",
"clyde",
"coach",
"coal",
"coast",
"coat",
"coax",
"cobb",
"cobble",
"cobol",
"cobra",
"coca",
"cock",
"cockle",
"cocky",
"coco",
"cocoa",
"cod",
"coda",
"coddle",
"code",
"codon",
"cody",
"coed",
"cog",
"cogent",
"cohen",
"cohn",
"coil",
"coin",
"coke",
"col",
"cola",
"colby",
"cold",
"cole",
"colon",
"colony",
"colt",
"colza",
"coma",
"comb",
"combat",
"come",
"comet",
"cometh",
"comic",
"comma",
"con",
"conch",
"cone",
"coney",
"congo",
"conic",
"conn",
"conner",
"conway",
"cony",
"coo",
"cook",
"cooke",
"cooky",
"cool",
"cooley",
"coon",
"coop",
"coors",
"coot",
"cop",
"cope",
"copra",
"copy",
"coral",
"corbel",
"cord",
"core",
"corey",
"cork",
"corn",
"corny",
"corp",
"corps",
"corvus",
"cos",
"cosec",
"coset",
"cosh",
"cost",
"costa",
"cosy",
"cot",
"cotta",
"cotty",
"couch",
"cough",
"could",
"count",
"coup",
"coupe",
"court",
"cousin",
"cove",
"coven",
"cover",
"covet",
"cow",
"cowan",
"cowl",
"cowman",
"cowry",
"cox",
"coy",
"coyote",
"coypu",
"cozen",
"cozy",
"cpa",
"crab",
"crack",
"craft",
"crag",
"craig",
"cram",
"cramp",
"crane",
"crank",
"crap",
"crash",
"crass",
"crate",
"crater",
"crave",
"craw",
"crawl",
"craze",
"crazy",
"creak",
"cream",
"credit",
"credo",
"creed",
"creek",
"creep",
"creole",
"creon",
"crepe",
"crept",
"cress",
"crest",
"crete",
"crew",
"crib",
"cried",
"crime",
"crimp",
"crisp",
"criss",
"croak",
"crock",
"crocus",
"croft",
"croix",
"crone",
"crony",
"crook",
"croon",
"crop",
"cross",
"crow",
"crowd",
"crown",
"crt",
"crud",
"crude",
"cruel",
"crumb",
"crump",
"crush",
"crust",
"crux",
"cruz",
"cry",
"crypt",
"cub",
"cuba",
"cube",
"cubic",
"cud",
"cuddle",
"cue",
"cuff",
"cull",
"culpa",
"cult",
"cumin",
"cuny",
"cup",
"cupful",
"cupid",
"cur",
"curb",
"curd",
"cure",
"curfew",
"curia",
"curie",
"curio",
"curl",
"curry",
"curse",
"curt",
"curve",
"cusp",
"cut",
"cute",
"cutlet",
"cycad",
"cycle",
"cynic",
"cyril",
"cyrus",
"cyst",
"czar",
"czech",
"dab",
"dacca",
"dactyl",
"dad",
"dada",
"daddy",
"dade",
"daffy",
"dahl",
"dahlia",
"dairy",
"dais",
"daisy",
"dakar",
"dale",
"daley",
"dally",
"daly",
"dam",
"dame",
"damn",
"damon",
"damp",
"damsel",
"dan",
"dana",
"dance",
"dandy",
"dane",
"dang",
"dank",
"danny",
"dante",
"dar",
"dare",
"dark",
"darken",
"darn",
"darry",
"dart",
"dash",
"data",
"date",
"dater",
"datum",
"daub",
"daunt",
"dave",
"david",
"davis",
"davit",
"davy",
"dawn",
"dawson",
"day",
"daze",
"ddd",
"dddd",
"deacon",
"dead",
"deaf",
"deal",
"dealt",
"dean",
"deane",
"dear",
"death",
"debar",
"debby",
"debit",
"debra",
"debris",
"debt",
"debug",
"debut",
"dec",
"decal",
"decay",
"decca",
"deck",
"decker",
"decor",
"decree",
"decry",
"dee",
"deed",
"deem",
"deep",
"deer",
"deere",
"def",
"defer",
"deform",
"deft",
"defy",
"degas",
"degum",
"deify",
"deign",
"deity",
"deja",
"del",
"delay",
"delft",
"delhi",
"delia",
"dell",
"della",
"delta",
"delve",
"demark",
"demit",
"demon",
"demur",
"den",
"deneb",
"denial",
"denny",
"dense",
"dent",
"denton",
"deny",
"depot",
"depth",
"depute",
"derby",
"derek",
"des",
"desist",
"desk",
"detach",
"deter",
"deuce",
"deus",
"devil",
"devoid",
"devon",
"dew",
"dewar",
"dewey",
"dewy",
"dey",
"dhabi",
"dial",
"diana",
"diane",
"diary",
"dibble",
"dice",
"dick",
"dicta",
"did",
"dido",
"die",
"died",
"diego",
"diem",
"diesel",
"diet",
"diety",
"dietz",
"dig",
"digit",
"dilate",
"dill",
"dim",
"dime",
"din",
"dinah",
"dine",
"ding",
"dingo",
"dingy",
"dint",
"diode",
"dip",
"dirac",
"dire",
"dirge",
"dirt",
"dirty",
"dis",
"disc",
"dish",
"disk",
"disney",
"ditch",
"ditto",
"ditty",
"diva",
"divan",
"dive",
"dixie",
"dixon",
"dizzy",
"dna",
"dobbs",
"dobson",
"dock",
"docket",
"dod",
"dodd",
"dodge",
"dodo",
"doe",
"doff",
"dog",
"doge",
"dogma",
"dolan",
"dolce",
"dole",
"doll",
"dolly",
"dolt",
"dome",
"don",
"done",
"doneck",
"donna",
"donor",
"doom",
"door",
"dope",
"dora",
"doria",
"doric",
"doris",
"dose",
"dot",
"dote",
"double",
"doubt",
"douce",
"doug",
"dough",
"dour",
"douse",
"dove",
"dow",
"dowel",
"down",
"downs",
"dowry",
"doyle",
"doze",
"dozen",
"drab",
"draco",
"draft",
"drag",
"drain",
"drake",
"dram",
"drama",
"drank",
"drape",
"draw",
"drawl",
"drawn",
"dread",
"dream",
"dreamy",
"dreg",
"dress",
"dressy",
"drew",
"drib",
"dried",
"drier",
"drift",
"drill",
"drink",
"drip",
"drive",
"droll",
"drone",
"drool",
"droop",
"drop",
"dross",
"drove",
"drown",
"drub",
"drug",
"druid",
"drum",
"drunk",
"drury",
"dry",
"dryad",
"dual",
"duane",
"dub",
"dubhe",
"dublin",
"ducat",
"duck",
"duct",
"dud",
"due",
"duel",
"duet",
"duff",
"duffy",
"dug",
"dugan",
"duke",
"dull",
"dully",
"dulse",
"duly",
"duma",
"dumb",
"dummy",
"dump",
"dumpy",
"dun",
"dunce",
"dune",
"dung",
"dunham",
"dunk",
"dunlop",
"dunn",
"dupe",
"durer",
"dusk",
"dusky",
"dust",
"dusty",
"dutch",
"duty",
"dwarf",
"dwell",
"dwelt",
"dwight",
"dwyer",
"dyad",
"dye",
"dyer",
"dying",
"dyke",
"dylan",
"dyne",
"each",
"eagan",
"eager",
"eagle",
"ear",
"earl",
"earn",
"earth",
"ease",
"easel",
"east",
"easy",
"eat",
"eaten",
"eater",
"eaton",
"eave",
"ebb",
"eben",
"ebony",
"echo",
"eclat",
"ecole",
"eddie",
"eddy",
"eden",
"edgar",
"edge",
"edgy",
"edict",
"edify",
"edit",
"edith",
"editor",
"edna",
"edt",
"edwin",
"eee",
"eeee",
"eel",
"eeoc",
"eerie",
"efface",
"effie",
"efg",
"eft",
"egan",
"egg",
"ego",
"egress",
"egret",
"egypt",
"eider",
"eight",
"eire",
"eject",
"eke",
"elan",
"elate",
"elba",
"elbow",
"elder",
"eldon",
"elect",
"elegy",
"elena",
"eleven",
"elfin",
"elgin",
"eli",
"elide",
"eliot",
"elite",
"elk",
"ell",
"ella",
"ellen",
"ellis",
"elm",
"elmer",
"elope",
"else",
"elsie",
"elton",
"elude",
"elute",
"elves",
"ely",
"embalm",
"embark",
"embed",
"ember",
"emcee",
"emery",
"emil",
"emile",
"emily",
"emit",
"emma",
"emory",
"empty",
"enact",
"enamel",
"end",
"endow",
"enemy",
"eng",
"engel",
"engle",
"engulf",
"enid",
"enjoy",
"enmity",
"enoch",
"enol",
"enos",
"enrico",
"ensue",
"enter",
"entrap",
"entry",
"envoy",
"envy",
"epa",
"epic",
"epoch",
"epoxy",
"epsom",
"equal",
"equip",
"era",
"erase",
"erato",
"erda",
"ere",
"erect",
"erg",
"eric",
"erich",
"erie",
"erik",
"ernest",
"ernie",
"ernst",
"erode",
"eros",
"err",
"errand",
"errol",
"error",
"erupt",
"ervin",
"erwin",
"essay",
"essen",
"essex",
"est",
"ester",
"estes",
"estop",
"eta",
"etc",
"etch",
"ethan",
"ethel",
"ether",
"ethic",
"ethos",
"ethyl",
"etude",
"eucre",
"euler",
"eureka",
"eva",
"evade",
"evans",
"eve",
"even",
"event",
"every",
"evict",
"evil",
"evoke",
"evolve",
"ewe",
"ewing",
"exact",
"exalt",
"exam",
"excel",
"excess",
"exert",
"exile",
"exist",
"exit",
"exodus",
"expel",
"extant",
"extent",
"extol",
"extra",
"exude",
"exult",
"exxon",
"eye",
"eyed",
"ezra",
"faa",
"faber",
"fable",
"face",
"facet",
"facile",
"fact",
"facto",
"fad",
"fade",
"faery",
"fag",
"fahey",
"fail",
"fain",
"faint",
"fair",
"fairy",
"faith",
"fake",
"fall",
"false",
"fame",
"fan",
"fancy",
"fang",
"fanny",
"fanout",
"far",
"farad",
"farce",
"fare",
"fargo",
"farley",
"farm",
"faro",
"fast",
"fat",
"fatal",
"fate",
"fatty",
"fault",
"faun",
"fauna",
"faust",
"fawn",
"fay",
"faze",
"fbi",
"fcc",
"fda",
"fear",
"feast",
"feat",
"feb",
"fed",
"fee",
"feed",
"feel",
"feet",
"feign",
"feint",
"felice",
"felix",
"fell",
"felon",
"felt",
"femur",
"fence",
"fend",
"fermi",
"fern",
"ferric",
"ferry",
"fest",
"fetal",
"fetch",
"fete",
"fetid",
"fetus",
"feud",
"fever",
"few",
"fff",
"ffff",
"fgh",
"fiat",
"fib",
"fibrin",
"fiche",
"fide",
"fief",
"field",
"fiend",
"fiery",
"fife",
"fifo",
"fifth",
"fifty",
"fig",
"fight",
"filch",
"file",
"filet",
"fill",
"filler",
"filly",
"film",
"filmy",
"filth",
"fin",
"final",
"finale",
"finch",
"find",
"fine",
"finite",
"fink",
"finn",
"finny",
"fir",
"fire",
"firm",
"first",
"fish",
"fishy",
"fisk",
"fiske",
"fist",
"fit",
"fitch",
"five",
"fix",
"fjord",
"flack",
"flag",
"flail",
"flair",
"flak",
"flake",
"flaky",
"flam",
"flame",
"flank",
"flap",
"flare",
"flash",
"flask",
"flat",
"flatus",
"flaw",
"flax",
"flea",
"fleck",
"fled",
"flee",
"fleet",
"flesh",
"flew",
"flex",
"flick",
"flier",
"flinch",
"fling",
"flint",
"flip",
"flirt",
"flit",
"flo",
"float",
"floc",
"flock",
"floe",
"flog",
"flood",
"floor",
"flop",
"floppy",
"flora",
"flour",
"flout",
"flow",
"flown",
"floyd",
"flu",
"flub",
"flue",
"fluff",
"fluid",
"fluke",
"flung",
"flush",
"flute",
"flux",
"fly",
"flyer",
"flynn",
"fmc",
"foal",
"foam",
"foamy",
"fob",
"focal",
"foci",
"focus",
"fodder",
"foe",
"fog",
"foggy",
"fogy",
"foil",
"foist",
"fold",
"foley",
"folio",
"folk",
"folly",
"fond",
"font",
"food",
"fool",
"foot",
"foote",
"fop",
"for",
"foray",
"force",
"ford",
"fore",
"forge",
"forgot",
"fork",
"form",
"fort",
"forte",
"forth",
"forty",
"forum",
"foss",
"fossil",
"foul",
"found",
"fount",
"four",
"fovea",
"fowl",
"fox",
"foxy",
"foyer",
"fpc",
"frail",
"frame",
"fran",
"franc",
"franca",
"frank",
"franz",
"frau",
"fraud",
"fray",
"freak",
"fred",
"free",
"freed",
"freer",
"frenzy",
"freon",
"fresh",
"fret",
"freud",
"frey",
"freya",
"friar",
"frick",
"fried",
"frill",
"frilly",
"frisky",
"fritz",
"fro",
"frock",
"frog",
"from",
"front",
"frost",
"froth",
"frown",
"froze",
"fruit",
"fry",
"frye",
"ftc",
"fuchs",
"fudge",
"fuel",
"fugal",
"fugue",
"fuji",
"full",
"fully",
"fum",
"fume",
"fun",
"fund",
"fungal",
"fungi",
"funk",
"funny",
"fur",
"furl",
"furry",
"fury",
"furze",
"fuse",
"fuss",
"fussy",
"fusty",
"fuzz",
"fuzzy",
"gab",
"gable",
"gabon",
"gad",
"gadget",
"gaff",
"gaffe",
"gag",
"gage",
"gail",
"gain",
"gait",
"gal",
"gala",
"galaxy",
"gale",
"galen",
"gall",
"gallop",
"galt",
"gam",
"game",
"gamin",
"gamma",
"gamut",
"gander",
"gang",
"gao",
"gap",
"gape",
"gar",
"garb",
"garish",
"garner",
"garry",
"garth",
"gary",
"gas",
"gash",
"gasp",
"gassy",
"gate",
"gates",
"gator",
"gauche",
"gaudy",
"gauge",
"gaul",
"gaunt",
"gaur",
"gauss",
"gauze",
"gave",
"gavel",
"gavin",
"gawk",
"gawky",
"gay",
"gaze",
"gear",
"gecko",
"gee",
"geese",
"geigy",
"gel",
"geld",
"gem",
"gemma",
"gene",
"genie",
"genii",
"genoa",
"genre",
"gent",
"gentry",
"genus",
"gerbil",
"germ",
"gerry",
"get",
"getty",
"ggg",
"gggg",
"ghana",
"ghent",
"ghetto",
"ghi",
"ghost",
"ghoul",
"giant",
"gibbs",
"gibby",
"gibe",
"giddy",
"gift",
"gig",
"gil",
"gila",
"gild",
"giles",
"gill",
"gilt",
"gimbal",
"gimpy",
"gin",
"gina",
"ginn",
"gino",
"gird",
"girl",
"girth",
"gist",
"give",
"given",
"glad",
"gladdy",
"glade",
"glamor",
"gland",
"glans",
"glare",
"glass",
"glaze",
"gleam",
"glean",
"glee",
"glen",
"glenn",
"glib",
"glide",
"glint",
"gloat",
"glob",
"globe",
"glom",
"gloom",
"glory",
"gloss",
"glove",
"glow",
"glue",
"glued",
"gluey",
"gluing",
"glum",
"glut",
"glyph",
"gmt",
"gnarl",
"gnash",
"gnat",
"gnaw",
"gnome",
"gnp",
"gnu",
"goa",
"goad",
"goal",
"goat",
"gob",
"goer",
"goes",
"goff",
"gog",
"goggle",
"gogh",
"gogo",
"gold",
"golf",
"golly",
"gone",
"gong",
"goo",
"good",
"goode",
"goody",
"goof",
"goofy",
"goose",
"gop",
"gordon",
"gore",
"goren",
"gorge",
"gorky",
"gorse",
"gory",
"gosh",
"gospel",
"got",
"gouda",
"gouge",
"gould",
"gourd",
"gout",
"gown",
"gpo",
"grab",
"grace",
"grad",
"grade",
"grady",
"graff",
"graft",
"grail",
"grain",
"grand",
"grant",
"grape",
"graph",
"grasp",
"grass",
"grata",
"grate",
"grater",
"grave",
"gravy",
"gray",
"graze",
"great",
"grebe",
"greed",
"greedy",
"greek",
"green",
"greer",
"greet",
"greg",
"gregg",
"greta",
"grew",
"grey",
"grid",
"grief",
"grieve",
"grill",
"grim",
"grime",
"grimm",
"grin",
"grind",
"grip",
"gripe",
"grist",
"grit",
"groan",
"groat",
"groin",
"groom",
"grope",
"gross",
"groton",
"group",
"grout",
"grove",
"grow",
"growl",
"grown",
"grub",
"gruff",
"grunt",
"gsa",
"guam",
"guano",
"guard",
"guess",
"guest",
"guide",
"guild",
"guile",
"guilt",
"guise",
"guitar",
"gules",
"gulf",
"gull",
"gully",
"gulp",
"gum",
"gumbo",
"gummy",
"gun",
"gunk",
"gunky",
"gunny",
"gurgle",
"guru",
"gus",
"gush",
"gust",
"gusto",
"gusty",
"gut",
"gutsy",
"guy",
"guyana",
"gwen",
"gwyn",
"gym",
"gyp",
"gypsy",
"gyro",
"haag",
"haas",
"habib",
"habit",
"hack",
"had",
"hades",
"hadron",
"hagen",
"hager",
"hague",
"hahn",
"haifa",
"haiku",
"hail",
"hair",
"hairy",
"haiti",
"hal",
"hale",
"haley",
"half",
"hall",
"halma",
"halo",
"halt",
"halvah",
"halve",
"ham",
"hamal",
"hamlin",
"han",
"hand",
"handy",
"haney",
"hang",
"hank",
"hanna",
"hanoi",
"hans",
"hansel",
"hap",
"happy",
"hard",
"hardy",
"hare",
"harem",
"hark",
"harley",
"harm",
"harp",
"harpy",
"harry",
"harsh",
"hart",
"harvey",
"hash",
"hasp",
"hast",
"haste",
"hasty",
"hat",
"hatch",
"hate",
"hater",
"hath",
"hatred",
"haul",
"haunt",
"have",
"haven",
"havoc",
"haw",
"hawk",
"hay",
"haydn",
"hayes",
"hays",
"hazard",
"haze",
"hazel",
"hazy",
"head",
"heady",
"heal",
"healy",
"heap",
"hear",
"heard",
"heart",
"heat",
"heath",
"heave",
"heavy",
"hebe",
"hebrew",
"heck",
"heckle",
"hedge",
"heed",
"heel",
"heft",
"hefty",
"heigh",
"heine",
"heinz",
"heir",
"held",
"helen",
"helga",
"helix",
"hell",
"hello",
"helm",
"helmut",
"help",
"hem",
"hemp",
"hen",
"hence",
"henri",
"henry",
"her",
"hera",
"herb",
"herd",
"here",
"hero",
"heroic",
"heron",
"herr",
"hertz",
"hess",
"hesse",
"hettie",
"hetty",
"hew",
"hewitt",
"hewn",
"hex",
"hey",
"hhh",
"hhhh",
"hiatt",
"hick",
"hicks",
"hid",
"hide",
"high",
"hij",
"hike",
"hill",
"hilly",
"hilt",
"hilum",
"him",
"hind",
"hindu",
"hines",
"hinge",
"hint",
"hip",
"hippo",
"hippy",
"hiram",
"hire",
"hirsch",
"his",
"hiss",
"hit",
"hitch",
"hive",
"hoagy",
"hoar",
"hoard",
"hob",
"hobbs",
"hobby",
"hobo",
"hoc",
"hock",
"hodge",
"hodges",
"hoe",
"hoff",
"hog",
"hogan",
"hoi",
"hokan",
"hold",
"holdup",
"hole",
"holly",
"holm",
"holst",
"holt",
"home",
"homo",
"honda",
"hondo",
"hone",
"honey",
"hong",
"honk",
"hooch",
"hood",
"hoof",
"hook",
"hookup",
"hoop",
"hoot",
"hop",
"hope",
"horde",
"horn",
"horny",
"horse",
"horus",
"hose",
"host",
"hot",
"hotbox",
"hotel",
"hough",
"hound",
"hour",
"house",
"hove",
"hovel",
"hover",
"how",
"howdy",
"howe",
"howl",
"hoy",
"hoyt",
"hub",
"hubbub",
"hubby",
"huber",
"huck",
"hue",
"hued",
"huff",
"hug",
"huge",
"hugh",
"hughes",
"hugo",
"huh",
"hulk",
"hull",
"hum",
"human",
"humid",
"hump",
"humus",
"hun",
"hunch",
"hung",
"hunk",
"hunt",
"hurd",
"hurl",
"huron",
"hurrah",
"hurry",
"hurst",
"hurt",
"hurty",
"hush",
"husky",
"hut",
"hutch",
"hyde",
"hydra",
"hydro",
"hyena",
"hying",
"hyman",
"hymen",
"hymn",
"hymnal",
"iambic",
"ian",
"ibex",
"ibid",
"ibis",
"ibm",
"ibn",
"icc",
"ice",
"icing",
"icky",
"icon",
"icy",
"ida",
"idaho",
"idea",
"ideal",
"idiom",
"idiot",
"idle",
"idol",
"idyll",
"ieee",
"iffy",
"ifni",
"igloo",
"igor",
"iii",
"iiii",
"ijk",
"ike",
"ileum",
"iliac",
"iliad",
"ill",
"illume",
"ilona",
"image",
"imbue",
"imp",
"impel",
"import",
"impute",
"inane",
"inapt",
"inc",
"inca",
"incest",
"inch",
"incur",
"index",
"india",
"indies",
"indy",
"inept",
"inert",
"infect",
"infer",
"infima",
"infix",
"infra",
"ingot",
"inhere",
"injun",
"ink",
"inlay",
"inlet",
"inman",
"inn",
"inner",
"input",
"insect",
"inset",
"insult",
"intend",
"inter",
"into",
"inure",
"invoke",
"ion",
"ionic",
"iota",
"iowa",
"ipso",
"ira",
"iran",
"iraq",
"irate",
"ire",
"irene",
"iris",
"irish",
"irk",
"irma",
"iron",
"irony",
"irs",
"irvin",
"irwin",
"isaac",
"isabel",
"ising",
"isis",
"islam",
"island",
"isle",
"israel",
"issue",
"italy",
"itch",
"item",
"ito",
"itt",
"ivan",
"ive",
"ivory",
"ivy",
"jab",
"jack",
"jacky",
"jacm",
"jacob",
"jacobi",
"jade",
"jag",
"jail",
"jaime",
"jake",
"jam",
"james",
"jan",
"jane",
"janet",
"janos",
"janus",
"japan",
"jar",
"jason",
"java",
"jaw",
"jay",
"jazz",
"jazzy",
"jean",
"jed",
"jeep",
"jeff",
"jejune",
"jelly",
"jenny",
"jeres",
"jerk",
"jerky",
"jerry",
"jersey",
"jess",
"jesse",
"jest",
"jesus",
"jet",
"jew",
"jewel",
"jewett",
"jewish",
"jibe",
"jiffy",
"jig",
"jill",
"jilt",
"jim",
"jimmy",
"jinx",
"jive",
"jjj",
"jjjj",
"jkl",
"joan",
"job",
"jock",
"jockey",
"joe",
"joel",
"joey",
"jog",
"john",
"johns",
"join",
"joint",
"joke",
"jolla",
"jolly",
"jolt",
"jon",
"jonas",
"jones",
"jorge",
"jose",
"josef",
"joshua",
"joss",
"jostle",
"jot",
"joule",
"joust",
"jove",
"jowl",
"jowly",
"joy",
"joyce",
"juan",
"judas",
"judd",
"jude",
"judge",
"judo",
"judy",
"jug",
"juggle",
"juice",
"juicy",
"juju",
"juke",
"jukes",
"julep",
"jules",
"julia",
"julie",
"julio",
"july",
"jumbo",
"jump",
"jumpy",
"junco",
"june",
"junk",
"junky",
"juno",
"junta",
"jura",
"jure",
"juror",
"jury",
"just",
"jut",
"jute",
"kabul",
"kafka",
"kahn",
"kajar",
"kale",
"kalmia",
"kane",
"kant",
"kapok",
"kappa",
"karate",
"karen",
"karl",
"karma",
"karol",
"karp",
"kate",
"kathy",
"katie",
"katz",
"kava",
"kay",
"kayo",
"kazoo",
"keats",
"keel",
"keen",
"keep",
"keg",
"keith",
"keller",
"kelly",
"kelp",
"kemp",
"ken",
"keno",
"kent",
"kenya",
"kepler",
"kept",
"kern",
"kerr",
"kerry",
"ketch",
"kevin",
"key",
"keyed",
"keyes",
"keys",
"khaki",
"khan",
"khmer",
"kick",
"kid",
"kidde",
"kidney",
"kiev",
"kigali",
"kill",
"kim",
"kin",
"kind",
"king",
"kink",
"kinky",
"kiosk",
"kiowa",
"kirby",
"kirk",
"kirov",
"kiss",
"kit",
"kite",
"kitty",
"kiva",
"kivu",
"kiwi",
"kkk",
"kkkk",
"klan",
"klaus",
"klein",
"kline",
"klm",
"klux",
"knack",
"knapp",
"knauer",
"knead",
"knee",
"kneel",
"knelt",
"knew",
"knick",
"knife",
"knit",
"knob",
"knock",
"knoll",
"knot",
"knott",
"know",
"known",
"knox",
"knurl",
"koala",
"koch",
"kodak",
"kola",
"kombu",
"kong",
"koran",
"korea",
"kraft",
"krause",
"kraut",
"krebs",
"kruse",
"kudo",
"kudzu",
"kuhn",
"kulak",
"kurd",
"kurt",
"kyle",
"kyoto",
"lab",
"laban",
"label",
"labia",
"labile",
"lac",
"lace",
"lack",
"lacy",
"lad",
"laden",
"ladle",
"lady",
"lag",
"lager",
"lagoon",
"lagos",
"laid",
"lain",
"lair",
"laity",
"lake",
"lam",
"lamar",
"lamb",
"lame",
"lamp",
"lana",
"lance",
"land",
"lane",
"lang",
"lange",
"lanka",
"lanky",
"lao",
"laos",
"lap",
"lapel",
"lapse",
"larch",
"lard",
"lares",
"large",
"lark",
"larkin",
"larry",
"lars",
"larva",
"lase",
"lash",
"lass",
"lasso",
"last",
"latch",
"late",
"later",
"latest",
"latex",
"lath",
"lathe",
"latin",
"latus",
"laud",
"laue",
"laugh",
"launch",
"laura",
"lava",
"law",
"lawn",
"lawson",
"lax",
"lay",
"layup",
"laze",
"lazy",
"lea",
"leach",
"lead",
"leaf",
"leafy",
"leak",
"leaky",
"lean",
"leap",
"leapt",
"lear",
"learn",
"lease",
"leash",
"least",
"leave",
"led",
"ledge",
"lee",
"leech",
"leeds",
"leek",
"leer",
"leery",
"leeway",
"left",
"lefty",
"leg",
"legal",
"leggy",
"legion",
"leigh",
"leila",
"leland",
"lemma",
"lemon",
"len",
"lena",
"lend",
"lenin",
"lenny",
"lens",
"lent",
"leo",
"leon",
"leona",
"leone",
"leper",
"leroy",
"less",
"lessee",
"lest",
"let",
"lethe",
"lev",
"levee",
"level",
"lever",
"levi",
"levin",
"levis",
"levy",
"lew",
"lewd",
"lewis",
"leyden",
"liar",
"libel",
"libido",
"libya",
"lice",
"lick",
"lid",
"lie",
"lied",
"lien",
"lieu",
"life",
"lifo",
"lift",
"light",
"like",
"liken",
"lila",
"lilac",
"lilly",
"lilt",
"lily",
"lima",
"limb",
"limbo",
"lime",
"limit",
"limp",
"lin",
"lind",
"linda",
"linden",
"line",
"linen",
"lingo",
"link",
"lint",
"linus",
"lion",
"lip",
"lipid",
"lisa",
"lise",
"lisle",
"lisp",
"list",
"listen",
"lit",
"lithe",
"litton",
"live",
"liven",
"livid",
"livre",
"liz",
"lizzie",
"lll",
"llll",
"lloyd",
"lmn",
"load",
"loaf",
"loam",
"loamy",
"loan",
"loath",
"lob",
"lobar",
"lobby",
"lobe",
"lobo",
"local",
"loci",
"lock",
"locke",
"locus",
"lodge",
"loeb",
"loess",
"loft",
"lofty",
"log",
"logan",
"loge",
"logic",
"loin",
"loire",
"lois",
"loiter",
"loki",
"lola",
"loll",
"lolly",
"lomb",
"lome",
"lone",
"long",
"look",
"loom",
"loon",
"loop",
"loose",
"loot",
"lop",
"lope",
"lopez",
"lord",
"lore",
"loren",
"los",
"lose",
"loss",
"lossy",
"lost",
"lot",
"lotte",
"lotus",
"lou",
"loud",
"louis",
"louise",
"louse",
"lousy",
"louver",
"love",
"low",
"lowe",
"lower",
"lowry",
"loy",
"loyal",
"lsi",
"ltv",
"lucas",
"lucia",
"lucid",
"luck",
"lucky",
"lucre",
"lucy",
"lug",
"luge",
"luger",
"luis",
"luke",
"lull",
"lulu",
"lumbar",
"lumen",
"lump",
"lumpy",
"lunar",
"lunch",
"lund",
"lung",
"lunge",
"lura",
"lurch",
"lure",
"lurid",
"lurk",
"lush",
"lust",
"lusty",
"lute",
"lutz",
"lux",
"luxe",
"luzon",
"lydia",
"lye",
"lying",
"lykes",
"lyle",
"lyman",
"lymph",
"lynch",
"lynn",
"lynx",
"lyon",
"lyons",
"lyra",
"lyric",
"mabel",
"mac",
"mace",
"mach",
"macho",
"mack",
"mackey",
"macon",
"macro",
"mad",
"madam",
"made",
"madman",
"madsen",
"mae",
"magi",
"magic",
"magma",
"magna",
"magog",
"maid",
"maier",
"mail",
"maim",
"main",
"maine",
"major",
"make",
"malady",
"malay",
"male",
"mali",
"mall",
"malt",
"malta",
"mambo",
"mamma",
"mammal",
"man",
"mana",
"manama",
"mane",
"mange",
"mania",
"manic",
"mann",
"manna",
"manor",
"mans",
"manse",
"mantle",
"many",
"mao",
"maori",
"map",
"maple",
"mar",
"marc",
"march",
"marco",
"marcy",
"mardi",
"mare",
"margo",
"maria",
"marie",
"marin",
"marine",
"mario",
"mark",
"marks",
"marlin",
"marrow",
"marry",
"mars",
"marsh",
"mart",
"marty",
"marx",
"mary",
"maser",
"mash",
"mask",
"mason",
"masque",
"mass",
"mast",
"mat",
"match",
"mate",
"mateo",
"mater",
"math",
"matte",
"maul",
"mauve",
"mavis",
"maw",
"mawr",
"max",
"maxim",
"maxima",
"may",
"maya",
"maybe",
"mayer",
"mayhem",
"mayo",
"mayor",
"mayst",
"mazda",
"maze",
"mba",
"mccoy",
"mcgee",
"mckay",
"mckee",
"mcleod",
"mead",
"meal",
"mealy",
"mean",
"meant",
"meat",
"meaty",
"mecca",
"mecum",
"medal",
"medea",
"media",
"medic",
"medley",
"meek",
"meet",
"meg",
"mega",
"meier",
"meir",
"mel",
"meld",
"melee",
"mellow",
"melon",
"melt",
"memo",
"memoir",
"men",
"mend",
"menlo",
"menu",
"merck",
"mercy",
"mere",
"merge",
"merit",
"merle",
"merry",
"mesa",
"mescal",
"mesh",
"meson",
"mess",
"messy",
"met",
"metal",
"mete",
"meter",
"metro",
"mew",
"meyer",
"meyers",
"mezzo",
"miami",
"mica",
"mice",
"mickey",
"micky",
"micro",
"mid",
"midas",
"midge",
"midst",
"mien",
"miff",
"mig",
"might",
"mike",
"mila",
"milan",
"milch",
"mild",
"mildew",
"mile",
"miles",
"milk",
"milky",
"mill",
"mills",
"milt",
"mimi",
"mimic",
"mince",
"mind",
"mine",
"mini",
"minim",
"mink",
"minnow",
"minor",
"minos",
"minot",
"minsk",
"mint",
"minus",
"mira",
"mirage",
"mire",
"mirth",
"miser",
"misery",
"miss",
"missy",
"mist",
"misty",
"mit",
"mite",
"mitre",
"mitt",
"mix",
"mixup",
"mizar",
"mmm",
"mmmm",
"mno",
"moan",
"moat",
"mob",
"mobil",
"mock",
"modal",
"mode",
"model",
"modem",
"modish",
"moe",
"moen",
"mohr",
"moire",
"moist",
"molal",
"molar",
"mold",
"mole",
"moll",
"mollie",
"molly",
"molt",
"molten",
"mommy",
"mona",
"monad",
"mondo",
"monel",
"money",
"monic",
"monk",
"mont",
"monte",
"month",
"monty",
"moo",
"mood",
"moody",
"moon",
"moor",
"moore",
"moose",
"moot",
"mop",
"moral",
"morale",
"moran",
"more",
"morel",
"morn",
"moron",
"morse",
"morsel",
"mort",
"mosaic",
"moser",
"moses",
"moss",
"mossy",
"most",
"mot",
"motel",
"motet",
"moth",
"mother",
"motif",
"motor",
"motto",
"mould",
"mound",
"mount",
"mourn",
"mouse",
"mousy",
"mouth",
"move",
"movie",
"mow",
"moyer",
"mph",
"mrs",
"much",
"muck",
"mucus",
"mud",
"mudd",
"muddy",
"muff",
"muffin",
"mug",
"muggy",
"mugho",
"muir",
"mulch",
"mulct",
"mule",
"mull",
"multi",
"mum",
"mummy",
"munch",
"mung",
"munson",
"muon",
"muong",
"mural",
"muriel",
"murk",
"murky",
"murre",
"muse",
"mush",
"mushy",
"music",
"musk",
"muslim",
"must",
"musty",
"mute",
"mutt",
"muzak",
"muzo",
"myel",
"myers",
"mylar",
"mynah",
"myopia",
"myra",
"myron",
"myrrh",
"myself",
"myth",
"naacp",
"nab",
"nadir",
"nag",
"nagoya",
"nagy",
"naiad",
"nail",
"nair",
"naive",
"naked",
"name",
"nan",
"nancy",
"naomi",
"nap",
"nary",
"nasa",
"nasal",
"nash",
"nasty",
"nat",
"natal",
"nate",
"nato",
"natty",
"nature",
"naval",
"nave",
"navel",
"navy",
"nay",
"nazi",
"nbc",
"nbs",
"ncaa",
"ncr",
"neal",
"near",
"neat",
"neath",
"neck",
"ned",
"nee",
"need",
"needy",
"neff",
"negate",
"negro",
"nehru",
"neil",
"nell",
"nelsen",
"neon",
"nepal",
"nero",
"nerve",
"ness",
"nest",
"net",
"neuron",
"neva",
"neve",
"new",
"newel",
"newt",
"next",
"nib",
"nibs",
"nice",
"nicety",
"niche",
"nick",
"niece",
"niger",
"nigh",
"night",
"nih",
"nikko",
"nil",
"nile",
"nimbus",
"nimh",
"nina",
"nine",
"ninth",
"niobe",
"nip",
"nit",
"nitric",
"nitty",
"nixon",
"nnn",
"nnnn",
"noaa",
"noah",
"nob",
"nobel",
"noble",
"nod",
"nodal",
"node",
"noel",
"noise",
"noisy",
"nolan",
"noll",
"nolo",
"nomad",
"non",
"nonce",
"none",
"nook",
"noon",
"noose",
"nop",
"nor",
"nora",
"norm",
"norma",
"north",
"norway",
"nose",
"not",
"notch",
"note",
"notre",
"noun",
"nov",
"nova",
"novak",
"novel",
"novo",
"now",
"nrc",
"nsf",
"ntis",
"nuance",
"nubia",
"nuclei",
"nude",
"nudge",
"null",
"numb",
"nun",
"nurse",
"nut",
"nyc",
"nylon",
"nymph",
"nyu",
"oaf",
"oak",
"oaken",
"oakley",
"oar",
"oases",
"oasis",
"oat",
"oath",
"obese",
"obey",
"objet",
"oboe",
"occur",
"ocean",
"oct",
"octal",
"octave",
"octet",
"odd",
"ode",
"odin",
"odium",
"off",
"offal",
"offend",
"offer",
"oft",
"often",
"ogden",
"ogle",
"ogre",
"ohio",
"ohm",
"ohmic",
"oil",
"oily",
"oint",
"okay",
"olaf",
"olav",
"old",
"olden",
"oldy",
"olga",
"olin",
"olive",
"olsen",
"olson",
"omaha",
"oman",
"omega",
"omen",
"omit",
"once",
"one",
"onion",
"only",
"onset",
"onto",
"onus",
"onward",
"onyx",
"ooo",
"oooo",
"ooze",
"opal",
"opec",
"opel",
"open",
"opera",
"opium",
"opt",
"optic",
"opus",
"oral",
"orate",
"orb",
"orbit",
"orchid",
"ordain",
"order",
"ore",
"organ",
"orgy",
"orin",
"orion",
"ornery",
"orono",
"orr",
"osaka",
"oscar",
"osier",
"oslo",
"other",
"otis",
"ott",
"otter",
"otto",
"ouch",
"ought",
"ounce",
"our",
"oust",
"out",
"ouvre",
"ouzel",
"ouzo",
"ova",
"oval",
"ovary",
"ovate",
"oven",
"over",
"overt",
"ovid",
"owe",
"owens",
"owing",
"owl",
"owly",
"own",
"oxen",
"oxeye",
"oxide",
"oxnard",
"ozark",
"ozone",
"pablo",
"pabst",
"pace",
"pack",
"packet",
"pact",
"pad",
"paddy",
"padre",
"paean",
"pagan",
"page",
"paid",
"pail",
"pain",
"paine",
"paint",
"pair",
"pal",
"pale",
"pall",
"palm",
"palo",
"palsy",
"pam",
"pampa",
"pan",
"panama",
"panda",
"pane",
"panel",
"pang",
"panic",
"pansy",
"pant",
"panty",
"paoli",
"pap",
"papa",
"papal",
"papaw",
"paper",
"pappy",
"papua",
"par",
"parch",
"pardon",
"pare",
"pareto",
"paris",
"park",
"parke",
"parks",
"parr",
"parry",
"parse",
"part",
"party",
"pascal",
"pasha",
"paso",
"pass",
"passe",
"past",
"paste",
"pasty",
"pat",
"patch",
"pate",
"pater",
"path",
"patio",
"patsy",
"patti",
"patton",
"patty",
"paul",
"paula",
"pauli",
"paulo",
"pause",
"pave",
"paw",
"pawn",
"pax",
"pay",
"payday",
"payne",
"paz",
"pbs",
"pea",
"peace",
"peach",
"peak",
"peaky",
"peal",
"peale",
"pear",
"pearl",
"pease",
"peat",
"pebble",
"pecan",
"peck",
"pecos",
"pedal",
"pedro",
"pee",
"peed",
"peek",
"peel",
"peep",
"peepy",
"peer",
"peg",
"peggy",
"pelt",
"pen",
"penal",
"pence",
"pencil",
"pend",
"penh",
"penn",
"penna",
"penny",
"pent",
"peony",
"pep",
"peppy",
"pepsi",
"per",
"perch",
"percy",
"perez",
"peril",
"perk",
"perky",
"perle",
"perry",
"persia",
"pert",
"perth",
"peru",
"peruse",
"pest",
"peste",
"pet",
"petal",
"pete",
"peter",
"petit",
"petri",
"petty",
"pew",
"pewee",
"phage",
"phase",
"phd",
"phenol",
"phi",
"phil",
"phlox",
"phon",
"phone",
"phony",
"photo",
"phyla",
"physic",
"piano",
"pica",
"pick",
"pickup",
"picky",
"pie",
"piece",
"pier",
"pierce",
"piety",
"pig",
"piggy",
"pike",
"pile",
"pill",
"pilot",
"pimp",
"pin",
"pinch",
"pine",
"ping",
"pinion",
"pink",
"pint",
"pinto",
"pion",
"piotr",
"pious",
"pip",
"pipe",
"piper",
"pique",
"pit",
"pitch",
"pith",
"pithy",
"pitney",
"pitt",
"pity",
"pius",
"pivot",
"pixel",
"pixy",
"pizza",
"place",
"plague",
"plaid",
"plain",
"plan",
"plane",
"plank",
"plant",
"plasm",
"plat",
"plate",
"plato",
"play",
"playa",
"plaza",
"plea",
"plead",
"pleat",
"pledge",
"pliny",
"plod",
"plop",
"plot",
"plow",
"pluck",
"plug",
"plum",
"plumb",
"plume",
"plump",
"plunk",
"plus",
"plush",
"plushy",
"pluto",
"ply",
"poach",
"pobox",
"pod",
"podge",
"podia",
"poe",
"poem",
"poesy",
"poet",
"poetry",
"pogo",
"poi",
"point",
"poise",
"poke",
"pol",
"polar",
"pole",
"police",
"polio",
"polis",
"polk",
"polka",
"poll",
"polo",
"pomona",
"pomp",
"ponce",
"pond",
"pong",
"pont",
"pony",
"pooch",
"pooh",
"pool",
"poole",
"poop",
"poor",
"pop",
"pope",
"poppy",
"porch",
"pore",
"pork",
"porous",
"port",
"porte",
"portia",
"porto",
"pose",
"posey",
"posh",
"posit",
"posse",
"post",
"posy",
"pot",
"potts",
"pouch",
"pound",
"pour",
"pout",
"pow",
"powder",
"power",
"ppm",
"ppp",
"pppp",
"pqr",
"prado",
"pram",
"prank",
"pratt",
"pray",
"preen",
"prefix",
"prep",
"press",
"prexy",
"prey",
"priam",
"price",
"prick",
"pride",
"prig",
"prim",
"prima",
"prime",
"primp",
"prince",
"print",
"prior",
"prism",
"prissy",
"privy",
"prize",
"pro",
"probe",
"prod",
"prof",
"prom",
"prone",
"prong",
"proof",
"prop",
"propyl",
"prose",
"proud",
"prove",
"prow",
"prowl",
"proxy",
"prune",
"pry",
"psalm",
"psi",
"psych",
"pta",
"pub",
"puck",
"puddly",
"puerto",
"puff",
"puffy",
"pug",
"pugh",
"puke",
"pull",
"pulp",
"pulse",
"puma",
"pump",
"pun",
"punch",
"punic",
"punish",
"punk",
"punky",
"punt",
"puny",
"pup",
"pupal",
"pupil",
"puppy",
"pure",
"purge",
"purl",
"purr",
"purse",
"pus",
"pusan",
"pusey",
"push",
"pussy",
"put",
"putt",
"putty",
"pvc",
"pygmy",
"pyle",
"pyre",
"pyrex",
"pyrite",
"qatar",
"qed",
"qqq",
"qqqq",
"qrs",
"qua",
"quack",
"quad",
"quaff",
"quail",
"quake",
"qualm",
"quark",
"quarry",
"quart",
"quash",
"quasi",
"quay",
"queasy",
"queen",
"queer",
"quell",
"query",
"quest",
"queue",
"quick",
"quid",
"quiet",
"quill",
"quilt",
"quinn",
"quint",
"quip",
"quirk",
"quirt",
"quit",
"quite",
"quito",
"quiz",
"quo",
"quod",
"quota",
"quote",
"rabat",
"rabbi",
"rabbit",
"rabid",
"rabin",
"race",
"rack",
"racy",
"radar",
"radii",
"radio",
"radium",
"radix",
"radon",
"rae",
"rafael",
"raft",
"rag",
"rage",
"raid",
"rail",
"rain",
"rainy",
"raise",
"raj",
"rajah",
"rake",
"rally",
"ralph",
"ram",
"raman",
"ramo",
"ramp",
"ramsey",
"ran",
"ranch",
"rand",
"randy",
"rang",
"range",
"rangy",
"rank",
"rant",
"raoul",
"rap",
"rape",
"rapid",
"rapt",
"rare",
"rasa",
"rascal",
"rash",
"rasp",
"rat",
"rata",
"rate",
"rater",
"ratio",
"rattle",
"raul",
"rave",
"ravel",
"raven",
"raw",
"ray",
"raze",
"razor",
"rca",
"reach",
"read",
"ready",
"reagan",
"real",
"realm",
"ream",
"reap",
"rear",
"reave",
"reb",
"rebel",
"rebut",
"recipe",
"reck",
"recur",
"red",
"redeem",
"reduce",
"reed",
"reedy",
"reef",
"reek",
"reel",
"reese",
"reeve",
"refer",
"regal",
"regina",
"regis",
"reich",
"reid",
"reign",
"rein",
"relax",
"relay",
"relic",
"reman",
"remedy",
"remit",
"remus",
"rena",
"renal",
"rend",
"rene",
"renown",
"rent",
"rep",
"repel",
"repent",
"resin",
"resort",
"rest",
"ret",
"retch",
"return",
"reub",
"rev",
"reveal",
"revel",
"rever",
"revet",
"revved",
"rex",
"rhea",
"rheum",
"rhine",
"rhino",
"rho",
"rhoda",
"rhode",
"rhyme",
"rib",
"rica",
"rice",
"rich",
"rick",
"rico",
"rid",
"ride",
"ridge",
"rifle",
"rift",
"rig",
"riga",
"rigel",
"riggs",
"right",
"rigid",
"riley",
"rill",
"rilly",
"rim",
"rime",
"rimy",
"ring",
"rink",
"rinse",
"rio",
"riot",
"rip",
"ripe",
"ripen",
"ripley",
"rise",
"risen",
"risk",
"risky",
"rite",
"ritz",
"rival",
"riven",
"river",
"rivet",
"riyadh",
"roach",
"road",
"roam",
"roar",
"roast",
"rob",
"robe",
"robin",
"robot",
"rock",
"rocket",
"rocky",
"rod",
"rode",
"rodeo",
"roe",
"roger",
"rogue",
"roil",
"role",
"roll",
"roman",
"rome",
"romeo",
"romp",
"ron",
"rondo",
"rood",
"roof",
"rook",
"rookie",
"rooky",
"room",
"roomy",
"roost",
"root",
"rope",
"rosa",
"rose",
"rosen",
"ross",
"rosy",
"rot",
"rotc",
"roth",
"rotor",
"rouge",
"rough",
"round",
"rouse",
"rout",
"route",
"rove",
"row",
"rowdy",
"rowe",
"roy",
"royal",
"royce",
"rpm",
"rrr",
"rrrr",
"rst",
"rsvp",
"ruanda",
"rub",
"rube",
"ruben",
"rubin",
"rubric",
"ruby",
"ruddy",
"rude",
"rudy",
"rue",
"rufus",
"rug",
"ruin",
"rule",
"rum",
"rumen",
"rummy",
"rump",
"rumpus",
"run",
"rune",
"rung",
"runge",
"runic",
"runt",
"runty",
"rupee",
"rural",
"ruse",
"rush",
"rusk",
"russ",
"russo",
"rust",
"rusty",
"rut",
"ruth",
"rutty",
"ryan",
"ryder",
"rye",
"sabine",
"sable",
"sabra",
"sac",
"sachs",
"sack",
"sad",
"saddle",
"sadie",
"safari",
"safe",
"sag",
"saga",
"sage",
"sago",
"said",
"sail",
"saint",
"sake",
"sal",
"salad",
"sale",
"salem",
"saline",
"salk",
"salle",
"sally",
"salon",
"salt",
"salty",
"salve",
"salvo",
"sam",
"samba",
"same",
"sammy",
"samoa",
"samuel",
"san",
"sana",
"sand",
"sandal",
"sandy",
"sane",
"sang",
"sank",
"sans",
"santa",
"santo",
"sao",
|
"sarah",
"saran",
"sari",
"sash",
"sat",
"satan",
"satin",
"satyr",
"sauce",
"saucy",
"saud",
"saudi",
"saul",
"sault",
"saute",
"save",
"savoy",
"savvy",
"saw",
"sawyer",
"sax",
"saxon",
"say",
"scab",
"scala",
"scald",
"scale",
"scalp",
"scam",
"scamp",
"scan",
"scant",
"scar",
"scare",
"scarf",
"scary",
"scat",
"scaup",
"scene",
"scent",
"school",
"scion",
"scm",
"scoff",
"scold",
"scoop",
"scoot",
"scope",
"scops",
"score",
"scoria",
"scorn",
"scot",
"scott",
"scour",
"scout",
"scowl",
"scram",
"scrap",
"scrape",
"screw",
"scrim",
"scrub",
"scuba",
"scud",
"scuff",
"scull",
"scum",
"scurry",
"sea",
"seal",
"seam",
"seamy",
"sean",
"sear",
"sears",
"season",
"seat",
"sec",
"secant",
"sect",
"sedan",
"seder",
"sedge",
"see",
"seed",
"seedy",
"seek",
"seem",
"seen",
"seep",
"seethe",
"seize",
"self",
"sell",
"selma",
"semi",
"sen",
"send",
"seneca",
"senor",
"sense",
"sent",
"sentry",
"seoul",
"sepal",
"sepia",
"sepoy",
"sept",
"septa",
"sequin",
"sera",
"serf",
"serge",
"serif",
"serum",
"serve",
"servo",
"set",
"seth",
"seton",
"setup",
"seven",
"sever",
"severe",
"sew",
"sewn",
"sex",
"sexy",
"shack",
"shad",
"shade",
"shady",
"shafer",
"shaft",
"shag",
"shah",
"shake",
"shaken",
"shako",
"shaky",
"shale",
"shall",
"sham",
"shame",
"shank",
"shape",
"shard",
"share",
"shari",
"shark",
"sharp",
"shave",
"shaw",
"shawl",
"shay",
"she",
"shea",
"sheaf",
"shear",
"sheath",
"shed",
"sheen",
"sheep",
"sheer",
"sheet",
"sheik",
"shelf",
"shell",
"shied",
"shift",
"shill",
"shim",
"shin",
"shine",
"shinto",
"shiny",
"ship",
"shire",
"shirk",
"shirt",
"shish",
"shiv",
"shoal",
"shock",
"shod",
"shoe",
"shoji",
"shone",
"shoo",
"shook",
"shoot",
"shop",
"shore",
"short",
"shot",
"shout",
"shove",
"show",
"shown",
"showy",
"shrank",
"shred",
"shrew",
"shrike",
"shrub",
"shrug",
"shu",
"shuck",
"shun",
"shunt",
"shut",
"shy",
"sial",
"siam",
"sian",
"sib",
"sibley",
"sibyl",
"sic",
"sick",
"side",
"sidle",
"siege",
"siena",
"sieve",
"sift",
"sigh",
"sight",
"sigma",
"sign",
"signal",
"signor",
"silas",
"silk",
"silky",
"sill",
"silly",
"silo",
"silt",
"silty",
"sima",
"simon",
"simons",
"sims",
"sin",
"sinai",
"since",
"sine",
"sinew",
"sing",
"singe",
"sinh",
"sink",
"sinus",
"sioux",
"sip",
"sir",
"sire",
"siren",
"sis",
"sisal",
"sit",
"site",
"situ",
"situs",
"siva",
"six",
"sixgun",
"sixth",
"sixty",
"size",
"skat",
"skate",
"skeet",
"skew",
"ski",
"skid",
"skied",
"skiff",
"skill",
"skim",
"skimp",
"skimpy",
"skin",
"skip",
"skirt",
"skit",
"skulk",
"skull",
"skunk",
"sky",
"skye",
"slab",
"slack",
"slag",
"slain",
"slake",
"slam",
"slang",
"slant",
"slap",
"slash",
"slat",
"slate",
"slater",
"slav",
"slave",
"slay",
"sled",
"sleek",
"sleep",
"sleet",
"slept",
"slew",
"slice",
"slick",
"slid",
"slide",
"slim",
"slime",
"slimy",
"sling",
"slip",
"slit",
"sliver",
"sloan",
"slob",
"sloe",
"slog",
"sloop",
"slop",
"slope",
"slosh",
"slot",
"sloth",
"slow",
"slug",
"sluice",
"slum",
"slump",
"slung",
"slur",
"slurp",
"sly",
"smack",
"small",
"smart",
"smash",
"smear",
"smell",
"smelt",
"smile",
"smirk",
"smith",
"smithy",
"smog",
"smoke",
"smoky",
"smug",
"smut",
"snack",
"snafu",
"snag",
"snail",
"snake",
"snap",
"snare",
"snark",
"snarl",
"snatch",
"sneak",
"sneer",
"snell",
"snick",
"sniff",
"snip",
"snipe",
"snob",
"snook",
"snoop",
"snore",
"snort",
"snout",
"snow",
"snowy",
"snub",
"snuff",
"snug",
"soak",
"soap",
"soapy",
"soar",
"sob",
"sober",
"social",
"sock",
"sod",
"soda",
"sofa",
"sofia",
"soft",
"soften",
"soggy",
"soil",
"sol",
"solar",
"sold",
"sole",
"solemn",
"solid",
"solo",
"solon",
"solve",
"soma",
"somal",
"some",
"son",
"sonar",
"song",
"sonic",
"sonny",
"sonora",
"sony",
"soon",
"soot",
"sooth",
"sop",
"sora",
"sorb",
"sore",
"sorry",
"sort",
"sos",
"sou",
"sough",
"soul",
"sound",
"soup",
"sour",
"source",
"sousa",
"south",
"sow",
"sown",
"soy",
"soya",
"spa",
"space",
"spade",
"spain",
"span",
"spar",
"spare",
"sparge",
"spark",
"spasm",
"spat",
"spate",
"spawn",
"spay",
"speak",
"spear",
"spec",
"speck",
"sped",
"speed",
"spell",
"spend",
"spent",
"sperm",
"sperry",
"spew",
"spica",
"spice",
"spicy",
"spike",
"spiky",
"spill",
"spilt",
"spin",
"spine",
"spiny",
"spire",
"spiro",
"spit",
"spite",
"spitz",
"splat",
"splay",
"spline",
"split",
"spoil",
"spoke",
"spoof",
"spook",
"spooky",
"spool",
"spoon",
"spore",
"sport",
"spot",
"spout",
"sprain",
"spray",
"spree",
"sprig",
"spruce",
"sprue",
"spud",
"spume",
"spun",
"spunk",
"spur",
"spurn",
"spurt",
"spy",
"squad",
"squat",
"squaw",
"squibb",
"squid",
"squint",
"sri",
"sss",
"ssss",
"sst",
"st.",
"stab",
"stack",
"stacy",
"staff",
"stag",
"stage",
"stagy",
"stahl",
"staid",
"stain",
"stair",
"stake",
"stale",
"stalk",
"stall",
"stamp",
"stan",
"stance",
"stand",
"stank",
"staph",
"star",
"stare",
"stark",
"starr",
"start",
"stash",
"state",
"statue",
"stave",
"stay",
"stead",
"steak",
"steal",
"steam",
"steed",
"steel",
"steele",
"steen",
"steep",
"steer",
"stein",
"stella",
"stem",
"step",
"stern",
"steve",
"stew",
"stick",
"stiff",
"stile",
"still",
"stilt",
"sting",
"stingy",
"stink",
"stint",
"stir",
"stock",
"stoic",
"stoke",
"stole",
"stomp",
"stone",
"stony",
"stood",
"stool",
"stoop",
"stop",
"store",
"storey",
"stork",
"storm",
"story",
"stout",
"stove",
"stow",
"strafe",
"strap",
"straw",
"stray",
"strewn",
"strip",
"stroll",
"strom",
"strop",
"strum",
"strut",
"stu",
"stuart",
"stub",
"stuck",
"stud",
"study",
"stuff",
"stuffy",
"stump",
"stun",
"stung",
"stunk",
"stunt",
"sturm",
"style",
"styli",
"styx",
"suave",
"sub",
"subtly",
"such",
"suck",
"sud",
"sudan",
"suds",
"sue",
"suey",
"suez",
"sugar",
"suit",
"suite",
"sulfa",
"sulk",
"sulky",
"sully",
"sultry",
"sum",
"sumac",
"summon",
"sun",
"sung",
"sunk",
"sunny",
"sunset",
"suny",
"sup",
"super",
"supra",
"sure",
"surf",
"surge",
"sus",
"susan",
"sushi",
"susie",
"sutton",
"swab",
"swag",
"swain",
"swam",
"swami",
"swamp",
"swampy",
"swan",
"swank",
"swap",
"swarm",
"swart",
"swat",
"swath",
"sway",
"swear",
"sweat",
"sweaty",
"swede",
"sweep",
"sweet",
"swell",
"swelt",
"swept",
"swift",
"swig",
"swim",
"swine",
"swing",
"swipe",
"swirl",
"swish",
"swiss",
"swoop",
"sword",
"swore",
"sworn",
"swum",
"swung",
"sybil",
"sykes",
"sylow",
"sylvan",
"synge",
"synod",
"syria",
"syrup",
"tab",
"table",
"taboo",
"tabu",
"tabula",
"tacit",
"tack",
"tacky",
"tacoma",
"tact",
"tad",
"taffy",
"taft",
"tag",
"tahoe",
"tail",
"taint",
"take",
"taken",
"talc",
"tale",
"talk",
"talky",
"tall",
"tallow",
"tally",
"talon",
"talus",
"tam",
"tame",
"tamp",
"tampa",
"tan",
"tang",
"tango",
"tangy",
"tanh",
"tank",
"tansy",
"tanya",
"tao",
"taos",
"tap",
"tapa",
"tape",
"taper",
"tapir",
"tapis",
"tappa",
"tar",
"tara",
"tardy",
"tariff",
"tarry",
"tart",
"task",
"tass",
"taste",
"tasty",
"tat",
"tate",
"tater",
"tattle",
"tatty",
"tau",
"taunt",
"taut",
"tavern",
"tawny",
"tax",
"taxi",
"tea",
"teach",
"teal",
"team",
"tear",
"tease",
"teat",
"tech",
"tecum",
"ted",
"teddy",
"tee",
"teem",
"teen",
"teensy",
"teet",
"teeth",
"telex",
"tell",
"tempo",
"tempt",
"ten",
"tend",
"tenet",
"tenney",
"tenon",
"tenor",
"tense",
"tensor",
"tent",
"tenth",
"tepee",
"tepid",
"term",
"tern",
"terra",
"terre",
"terry",
"terse",
"tess",
"test",
"testy",
"tete",
"texan",
"texas",
"text",
"thai",
"than",
"thank",
"that",
"thaw",
"the",
"thea",
"thee",
"theft",
"their",
"them",
"theme",
"then",
"there",
"these",
"theta",
"they",
"thick",
"thief",
"thigh",
"thin",
"thine",
"thing",
"think",
"third",
"this",
"thong",
"thor",
"thorn",
"thorny",
"those",
"thou",
"thread",
"three",
"threw",
"throb",
"throes",
"throw",
"thrum",
"thud",
"thug",
"thule",
"thumb",
"thump",
"thus",
"thy",
"thyme",
"tiber",
"tibet",
"tibia",
"tic",
"tick",
"ticket",
"tid",
"tidal",
"tidbit",
"tide",
"tidy",
"tie",
"tied",
"tier",
"tift",
"tiger",
"tight",
"til",
"tilde",
"tile",
"till",
"tilt",
"tilth",
"tim",
"time",
"timex",
"timid",
"timon",
"tin",
"tina",
"tine",
"tinge",
"tint",
"tiny",
"tioga",
"tip",
"tipoff",
"tippy",
"tipsy",
"tire",
"tit",
"titan",
"tithe",
"title",
"titus",
"tnt",
"toad",
"toady",
"toast",
"toby",
"today",
"todd",
"toe",
"tofu",
"tog",
"togo",
"togs",
"toil",
"toilet",
"token",
"tokyo",
"told",
"toll",
"tom",
"tomb",
"tome",
"tommy",
"ton",
"tonal",
"tone",
"tong",
"toni",
"tonic",
"tonk",
"tonsil",
"tony",
"too",
"took",
"tool",
"toot",
"tooth",
"top",
"topaz",
"topic",
"topple",
"topsy",
"tor",
"torah",
"torch",
"tore",
"tori",
"torn",
"torr",
"torso",
"tort",
"torus",
"tory",
"toss",
"tot",
"total",
"tote",
"totem",
"touch",
"tough",
"tour",
"tout",
"tow",
"towel",
"tower",
"town",
"toxic",
"toxin",
"toy",
"trace",
"track",
"tract",
"tracy",
"trade",
"trag",
"trail",
"train",
"trait",
"tram",
"tramp",
"trap",
"trash",
"trawl",
"tread",
"treat",
"treble",
"tree",
"trek",
"trench",
"trend",
"tress",
"triad",
"trial",
"tribe",
"trick",
"tried",
"trig",
"trill",
"trim",
"trio",
"trip",
"tripe",
"trite",
"triton",
"trod",
"troll",
"troop",
"trot",
"trout",
"troy",
"truce",
"truck",
"trudge",
"trudy",
"true",
"truly",
"trump",
"trunk",
"truss",
"trust",
"truth",
"trw",
"try",
"tsar",
"ttl",
"ttt",
"tttt",
"tty",
"tub",
"tuba",
"tube",
"tuck",
"tudor",
"tuff",
"tuft",
"tug",
"tulane",
"tulip",
"tulle",
"tulsa",
"tum",
"tun",
"tuna",
"tune",
"tung",
"tunic",
"tunis",
"tunnel",
"tuple",
"turf",
"turin",
"turk",
"turn",
"turvy",
"tusk",
"tussle",
"tutor",
"tutu",
"tuv",
"tva",
"twa",
"twain",
"tweak",
"tweed",
"twice",
"twig",
"twill",
"twin",
"twine",
"twirl",
"twist",
"twisty",
"twit",
"two",
"twx",
"tyburn",
"tying",
"tyler",
"type",
"typic",
"typo",
"tyson",
"ucla",
"ugh",
"ugly",
"ulan",
"ulcer",
"ultra",
"umber",
"umbra",
"umpire",
"unary",
"uncle",
"under",
"unify",
"union",
"unit",
"unite",
"unity",
"unix",
"until",
"upend",
"uphold",
"upon",
"upper",
"uproar",
"upset",
"uptake",
"upton",
"urban",
"urbane",
"urea",
"urge",
"uri",
"urine",
"uris",
"urn",
"ursa",
"usa",
"usaf",
"usage",
"usc",
"usda",
"use",
"useful",
"usgs",
"usher",
"usia",
"usn",
"usps",
"ussr",
"usual",
"usurp",
"usury",
"utah",
"utica",
"utile",
"utmost",
"utter",
"uuu",
"uuuu",
"uvw",
"vacua",
"vacuo",
"vade",
"vaduz",
"vague",
"vail",
"vain",
"vale",
"valet",
"valeur",
"valid",
"value",
"valve",
"vamp",
"van",
"vance",
"vane",
"vary",
"vase",
"vast",
"vat",
"vault",
"veal",
"veda",
"vee",
"veer",
"veery",
"vega",
"veil",
"vein",
"velar",
"veldt",
"vella",
"vellum",
"venal",
"vend",
"venial",
"venom",
"vent",
"venus",
"vera",
"verb",
"verde",
"verdi",
"verge",
"verity",
"verna",
"verne",
"versa",
"verse",
"verve",
"very",
"vessel",
"vest",
"vet",
"vetch",
"veto",
"vex",
"via",
"vial",
"vicar",
"vice",
"vichy",
"vicky",
"vida",
"video",
"vie",
"viet",
"view",
"vigil",
"vii",
"viii",
"vile",
"villa",
"vine",
"vinyl",
"viola",
"violet",
"virgil",
"virgo",
"virus",
"vis",
"visa",
"vise",
"visit",
"visor",
"vista",
"vita",
"vitae",
"vital",
"vito",
"vitro",
"viva",
"vivian",
"vivid",
"vivo",
"vixen",
"viz",
"vocal",
"vogel",
"vogue",
"voice",
"void",
"volt",
"volta",
"volvo",
"vomit",
"von",
"voss",
"vote",
"vouch",
"vow",
"vowel",
"vulcan",
"vvv",
"vvvv",
"vying",
"waals",
"wac",
"wack",
"wacke",
"wacky",
"waco",
"wad",
"wade",
"wadi",
"wafer",
"wag",
"wage",
"waggle",
"wah",
"wahl",
"wail",
"waist",
"wait",
"waite",
"waive",
"wake",
"waken",
"waldo",
"wale",
"walk",
"walkie",
"wall",
"walls",
"wally",
"walsh",
"walt",
"walton",
"waltz",
"wan",
"wand",
"wane",
"wang",
"want",
"war",
"ward",
"ware",
"warm",
"warmth",
"warn",
"warp",
"warren",
"wart",
"warty",
"wary",
"was",
"wash",
"washy",
"wasp",
"wast",
"waste",
"watch",
"water",
"watt",
"watts",
"wave",
"wavy",
"wax",
"waxen",
"waxy",
"way",
"wayne",
"weak",
"weal",
"wealth",
"wean",
"wear",
"weary",
"weave",
"web",
"webb",
"weber",
"weco",
"wed",
"wedge",
"wee",
"weed",
"weedy",
"week",
"weeks",
"weep",
"wehr",
"wei",
"weigh",
"weir",
"weird",
"weiss",
"welch",
"weld",
"well",
"wells",
"welsh",
"welt",
"wendy",
"went",
"wept",
"were",
"wert",
"west",
"wet",
"whack",
"whale",
"wham",
"wharf",
"what",
"wheat",
"whee",
"wheel",
"whelk",
"whelm",
"whelp",
"when",
"where",
"whet",
"which",
"whiff",
"whig",
"while",
"whim",
"whine",
"whinny",
"whip",
"whir",
"whirl",
"whisk",
"whit",
"white",
"whiz",
"who",
"whoa",
"whole",
"whom",
"whoop",
"whoosh",
"whop",
"whose",
"whup",
"why",
"wick",
"wide",
"widen",
"widow",
"width",
"wield",
"wier",
"wife",
"wig",
"wild",
"wile",
"wiley",
"wilkes",
"will",
"willa",
"wills",
"wilma",
"wilt",
"wily",
"win",
"wince",
"winch",
"wind",
"windy",
"wine",
"wing",
"wink",
"winnie",
"wino",
"winter",
"winy",
"wipe",
"wire",
"wiry",
"wise",
"wish",
"wishy",
"wisp",
"wispy",
"wit",
"witch",
"with",
"withe",
"withy",
"witt",
"witty",
"wive",
"woe",
"wok",
"woke",
"wold",
"wolf",
"wolfe",
"wolff",
"wolve",
"woman",
"womb",
"women",
"won",
"wonder",
"wong",
"wont",
"woo",
"wood",
"woods",
"woody",
"wool",
"woozy",
"word",
"wordy",
"wore",
"work",
"world",
"worm",
"wormy",
"worn",
"worry",
"worse",
"worst",
"worth",
"wotan",
"would",
"wound",
"wove",
"woven",
"wow",
"wrack",
"wrap",
"wrath",
"wreak",
"wreck",
"wrest",
"wring",
"wrist",
"writ",
"write",
"writhe",
"wrong",
"wrote",
"wry",
"wuhan",
"www",
"wwww",
"wxy",
"wyatt",
"wyeth",
"wylie",
"wyman",
"wyner",
"wynn",
"xenon",
"xerox",
"xxx",
"xxxx",
"xylem",
"xyz",
"yacht",
"yah",
"yak",
"yale",
"yalta",
"yam",
"yamaha",
"yang",
"yank",
"yap",
"yaqui",
"yard",
"yarn",
"yates",
"yaw",
"yawl",
"yawn",
"yea",
"yeah",
"year",
"yearn",
"yeast",
"yeasty",
"yeats",
"yell",
"yelp",
"yemen",
"yen",
"yet",
"yield",
"yin",
"yip",
"ymca",
"yodel",
"yoder",
"yoga",
"yogi",
"yoke",
"yokel",
"yolk",
"yon",
"yond",
"yore",
"york",
"yost",
"you",
"young",
"your",
"youth",
"yow",
"yucca",
"yuck",
"yuh",
"yuki",
"yukon",
"yule",
"yves",
"ywca",
"yyy",
"yyyy",
"zag",
"zaire",
"zan",
"zap",
"zazen",
"zeal",
"zealot",
"zebra",
"zeiss",
"zen",
"zero",
"zest",
"zesty",
"zeta",
"zeus",
"zig",
"zilch",
"zinc",
"zing",
"zion",
"zip",
"zloty",
"zoe",
"zomba",
"zone",
"zoo",
"zoom",
"zorn",
"zurich",
"zzz",
"zzzz"]
|
"sap",
"sappy",
"sara",
|
func1.rs
|
pub fn func1()
|
{}
|
|
state.rs
|
use super::{
comments_buffer::{BufferedComment, BufferedCommentKind},
Context, Input, Lexer,
};
use crate::{error::Error, input::Tokens, lexer::util::CharExt, token::*, EsVersion, Syntax};
use enum_kind::Kind;
#[cfg(not(debug_assertions))]
use smallvec::SmallVec;
use std::mem::take;
use swc_common::BytePos;
use tracing::trace;
/// State of lexer.
///
/// Ported from babylon.
#[derive(Clone)]
pub(super) struct State {
pub is_expr_allowed: bool,
/// if line break exists between previous token and new token?
pub had_line_break: bool,
/// TODO: Remove this field.
is_first: bool,
pub start: BytePos,
pub cur_line: usize,
pub line_start: BytePos,
pub prev_hi: BytePos,
context: TokenContexts,
syntax: Syntax,
token_type: Option<TokenType>,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
enum TokenType {
Template,
Dot,
Colon,
LBrace,
RParen,
Semi,
BinOp(BinOpToken),
Keyword(Keyword),
JSXName,
JSXText,
JSXTagStart,
JSXTagEnd,
Arrow,
Other {
before_expr: bool,
can_have_trailing_comment: bool,
},
}
impl TokenType {
fn before_expr(self) -> bool {
match self {
TokenType::JSXName
| TokenType::JSXTagStart
| TokenType::JSXTagEnd
| TokenType::Template
| TokenType::Dot
| TokenType::RParen => false,
TokenType::JSXText
| TokenType::Colon
| TokenType::LBrace
| TokenType::Semi
| TokenType::Arrow => true,
TokenType::BinOp(b) => b.before_expr(),
TokenType::Keyword(k) => k.before_expr(),
TokenType::Other { before_expr, .. } => before_expr,
}
}
}
impl<'a> From<&'a Token> for TokenType {
fn from(t: &Token) -> Self {
match *t {
Token::Template { .. } => TokenType::Template,
Token::Dot => TokenType::Dot,
Token::Colon => TokenType::Colon,
Token::LBrace => TokenType::LBrace,
Token::RParen => TokenType::RParen,
Token::Semi => TokenType::Semi,
Token::JSXTagEnd => TokenType::JSXTagEnd,
Token::JSXTagStart => TokenType::JSXTagStart,
Token::JSXText { .. } => TokenType::JSXText,
Token::JSXName { .. } => TokenType::JSXName,
Token::BinOp(op) => TokenType::BinOp(op),
Token::Arrow => TokenType::Arrow,
Token::Word(Word::Keyword(k)) => TokenType::Keyword(k),
_ => TokenType::Other {
before_expr: t.before_expr(),
can_have_trailing_comment: matches!(
*t,
Token::Num(..)
| Token::Str { .. }
| Token::Word(Word::Ident(..))
| Token::DollarLBrace
| Token::Regex(..)
| Token::BigInt(..)
| Token::JSXText { .. }
| Token::RBrace
),
},
}
}
}
impl<I: Input> Tokens for Lexer<'_, I> {
fn set_ctx(&mut self, ctx: Context) {
if ctx.module && !self.module_errors.borrow().is_empty() {
let mut module_errors = self.module_errors.borrow_mut();
self.errors.borrow_mut().append(&mut *module_errors);
}
self.ctx = ctx
}
fn ctx(&self) -> Context {
self.ctx
}
fn syntax(&self) -> Syntax {
self.syntax
}
fn target(&self) -> EsVersion {
self.target
}
fn start_pos(&self) -> BytePos {
self.start_pos
}
fn set_expr_allowed(&mut self, allow: bool) {
self.set_expr_allowed(allow)
}
fn token_context(&self) -> &TokenContexts {
&self.state.context
}
fn token_context_mut(&mut self) -> &mut TokenContexts {
&mut self.state.context
}
fn set_token_context(&mut self, c: TokenContexts) {
self.state.context = c;
}
fn add_error(&self, error: Error) {
self.errors.borrow_mut().push(error);
}
fn add_module_mode_error(&self, error: Error) {
if self.ctx.module {
self.add_error(error);
return;
}
self.module_errors.borrow_mut().push(error);
}
fn take_errors(&mut self) -> Vec<Error> {
take(&mut self.errors.borrow_mut())
}
}
impl<'a, I: Input> Iterator for Lexer<'a, I> {
type Item = TokenAndSpan;
fn next(&mut self) -> Option<Self::Item> {
let mut start = self.cur_pos();
let res = (|| -> Result<Option<_>, _> {
if self.state.is_first {
if let Some(shebang) = self.read_shebang()? {
return Ok(Some(Token::Shebang(shebang)));
}
}
self.state.had_line_break = self.state.is_first;
self.state.is_first = false;
// skip spaces before getting next character, if we are allowed to.
if self.state.can_skip_space() {
self.skip_space()?;
start = self.input.cur_pos();
};
let c = match self.input.cur() {
Some(c) => c,
// End of input.
None => {
if let Some(comments) = self.comments.as_mut() {
let comments_buffer = self.comments_buffer.as_mut().unwrap();
let last = self.state.prev_hi;
// move the pending to the leading or trailing
for c in comments_buffer.take_pending_leading() {
// if the file had no tokens and no shebang, then treat any
// comments in the leading comments buffer as leading.
// Otherwise treat them as trailing.
if last == self.start_pos {
comments_buffer.push(BufferedComment {
kind: BufferedCommentKind::Leading,
pos: last,
comment: c,
});
} else {
comments_buffer.push(BufferedComment {
kind: BufferedCommentKind::Trailing,
pos: last,
comment: c,
});
}
}
// now fill the user's passed in comments
for comment in comments_buffer.take_comments() {
match comment.kind {
BufferedCommentKind::Leading => {
comments.add_leading(comment.pos, comment.comment);
}
BufferedCommentKind::Trailing => {
comments.add_trailing(comment.pos, comment.comment);
}
}
}
}
return Ok(None);
}
};
// println!(
// "\tContext: ({:?}) {:?}",
// self.input.cur().unwrap(),
// self.state.context.0
// );
self.state.start = start;
if self.syntax.jsx() && !self.ctx.in_property_name && !self.ctx.in_type {
//jsx
if self.state.context.current() == Some(TokenContext::JSXExpr) {
return self.read_jsx_token();
}
let c = self.cur();
if let Some(c) = c {
if self.state.context.current() == Some(TokenContext::JSXOpeningTag)
|| self.state.context.current() == Some(TokenContext::JSXClosingTag)
{
if c.is_ident_start() {
return self.read_jsx_word().map(Some);
}
if c == '>' {
self.input.bump();
return Ok(Some(Token::JSXTagEnd));
}
if (c == '\'' || c == '"')
&& self.state.context.current() == Some(TokenContext::JSXOpeningTag)
{
return self.read_jsx_str(c).map(Some);
}
}
if c == '<' && self.state.is_expr_allowed && self.input.peek() != Some('!') {
self.input.bump();
return Ok(Some(Token::JSXTagStart));
}
}
}
if let Some(TokenContext::Tpl {
start: start_pos_of_tpl,
}) = self.state.context.current()
{
return self.read_tmpl_token(start_pos_of_tpl).map(Some);
}
if self.syntax.typescript() && self.ctx.in_type {
if c == '<' {
self.input.bump();
return Ok(Some(tok!('<')));
} else if c == '>' {
self.input.bump();
return Ok(Some(tok!('>')));
}
}
self.read_token()
})();
let token = match res.map_err(Token::Error).map_err(Some) {
Ok(t) => t,
Err(e) => e,
};
let span = self.span(start);
if let Some(ref token) = token {
if let Some(comments) = self.comments_buffer.as_mut() {
for comment in comments.take_pending_leading() {
comments.push(BufferedComment {
kind: BufferedCommentKind::Leading,
pos: start,
comment,
});
}
}
self.state.update(start, &token);
self.state.prev_hi = self.last_pos();
}
token.map(|token| {
// Attach span to token.
TokenAndSpan {
token,
had_line_break: self.had_line_break_before_last(),
span,
}
})
}
}
impl State {
pub fn new(syntax: Syntax, start_pos: BytePos) -> Self {
#[cfg(debug_assertions)]
let context = TokenContexts(vec![TokenContext::BraceStmt]);
#[cfg(not(debug_assertions))]
let context = TokenContexts(SmallVec::from_slice(&[TokenContext::BraceStmt]));
State {
is_expr_allowed: true,
is_first: true,
had_line_break: false,
prev_hi: start_pos,
context,
token_type: None,
start: BytePos(0),
line_start: BytePos(0),
cur_line: 1,
syntax,
}
}
}
impl State {
pub fn can_skip_space(&self) -> bool {
!self
.context
.current()
.map(|t| t.preserve_space())
.unwrap_or(false)
}
pub fn can_have_trailing_comment(&self) -> bool {
match self.token_type {
Some(TokenType::Keyword(..)) => false,
Some(TokenType::Semi) | Some(TokenType::LBrace) => true,
Some(TokenType::Other {
can_have_trailing_comment,
..
}) => can_have_trailing_comment,
_ => false,
}
}
pub fn last_was_tpl_element(&self) -> bool {
match self.token_type {
Some(TokenType::Template) => true,
_ => false,
}
}
fn update(&mut self, start: BytePos, next: &Token) {
if cfg!(feature = "debug") {
trace!(
"updating state: next={:?}, had_line_break={} ",
next,
self.had_line_break
);
}
let prev = self.token_type.take();
self.token_type = Some(TokenType::from(next));
self.is_expr_allowed = Self::is_expr_allowed_on_next(
&mut self.context,
self.syntax,
prev,
start,
next,
self.had_line_break,
self.is_expr_allowed,
);
}
/// `is_expr_allowed`: previous value.
/// `start`: start of newly produced token.
fn is_expr_allowed_on_next(
context: &mut TokenContexts,
syntax: Syntax,
prev: Option<TokenType>,
start: BytePos,
next: &Token,
had_line_break: bool,
is_expr_allowed: bool,
) -> bool {
let is_next_keyword = match *next {
Word(Word::Keyword(..)) => true,
_ => false,
};
if is_next_keyword && prev == Some(TokenType::Dot) {
false
} else {
// ported updateContext
match *next {
tok!(')') | tok!('}') => {
// TODO: Verify
if context.len() == 1 {
return true;
}
let out = context.pop().unwrap();
// let a = function(){}
if out == TokenContext::BraceStmt
&& context.current() == Some(TokenContext::FnExpr)
{
context.pop();
return false;
}
// ${} in template
if out == TokenContext::TplQuasi {
match context.current() {
Some(TokenContext::Tpl { .. }) => return false,
_ => return true,
}
}
// expression cannot follow expression
!out.is_expr()
}
tok!("function") => {
// This is required to lex
// `x = function(){}/42/i`
if is_expr_allowed
&& !context.is_brace_block(prev, had_line_break, is_expr_allowed)
{
context.push(TokenContext::FnExpr);
}
false
}
// for (a of b) {}
tok!("of")
if Some(TokenContext::ParenStmt { is_for_loop: true }) == context.current() =>
{
// e.g. for (a of _) => true
!prev
.expect("context.current() if ParenStmt, so prev token cannot be None")
.before_expr()
}
Word(Word::Ident(..)) => {
// variable declaration
match prev {
Some(prev) => match prev {
// handle automatic semicolon insertion.
TokenType::Keyword(Let)
| TokenType::Keyword(Const)
| TokenType::Keyword(Var)
if had_line_break =>
{
true
}
_ => false,
},
_ => false,
}
}
tok!('{') => {
let cur = context.current();
if syntax.jsx() && cur == Some(TokenContext::JSXOpeningTag) {
context.push(TokenContext::BraceExpr)
} else if syntax.jsx() && cur == Some(TokenContext::JSXExpr) {
context.push(TokenContext::TplQuasi);
} else {
let next_ctxt =
if context.is_brace_block(prev, had_line_break, is_expr_allowed) {
TokenContext::BraceStmt
} else {
TokenContext::BraceExpr
};
context.push(next_ctxt);
}
true
}
tok!('/') if syntax.jsx() && prev == Some(TokenType::JSXTagStart) => {
context.pop();
context.pop(); // do not consider JSX expr -> JSX open tag -> ... anymore
context.push(TokenContext::JSXClosingTag); // reconsider as closing tag context
false
}
tok!("${") => {
context.push(TokenContext::TplQuasi);
true
}
tok!('(') => {
// if, for, with, while is statement
context.push(match prev {
Some(TokenType::Keyword(k)) => match k {
If | With | While => TokenContext::ParenStmt { is_for_loop: false },
For => TokenContext::ParenStmt { is_for_loop: true },
_ => TokenContext::ParenExpr,
},
_ => TokenContext::ParenExpr,
});
true
}
// remains unchanged.
tok!("++") | tok!("--") => is_expr_allowed,
tok!('`') => {
// If we are in template, ` terminates template.
if let Some(TokenContext::Tpl { .. }) = context.current() {
context.pop();
} else {
context.push(TokenContext::Tpl { start });
}
false
}
// tt.jsxTagStart.updateContext
Token::JSXTagStart => {
context.push(TokenContext::JSXExpr); // treat as beginning of JSX expression
context.push(TokenContext::JSXOpeningTag); // start opening tag context
false
}
// tt.jsxTagEnd.updateContext
Token::JSXTagEnd => {
let out = context.pop();
if (out == Some(TokenContext::JSXOpeningTag)
&& prev == Some(TokenType::BinOp(BinOpToken::Div)))
|| out == Some(TokenContext::JSXClosingTag)
{
context.pop();
context.current() == Some(TokenContext::JSXExpr)
} else {
true
}
}
_ => next.before_expr(),
}
}
}
}
#[derive(Clone, Default)]
#[cfg(debug_assertions)]
pub struct TokenContexts(pub(crate) Vec<TokenContext>);
#[derive(Clone, Default)]
#[cfg(not(debug_assertions))]
pub struct TokenContexts(pub(crate) SmallVec<[TokenContext; 32]>);
impl TokenContexts {
/// Returns true if following `LBrace` token is `block statement` according
/// to `ctx`, `prev`, `is_expr_allowed`.
fn is_brace_block(
&self,
prev: Option<TokenType>,
had_line_break: bool,
is_expr_allowed: bool,
) -> bool {
if let Some(TokenType::Colon) = prev {
match self.current() {
Some(TokenContext::BraceStmt) => return true,
// `{ a: {} }`
// ^ ^
Some(TokenContext::BraceExpr) => return false,
_ => {}
};
}
match prev {
// function a() {
// return { a: "" };
// }
// function a() {
// return
// {
// function b(){}
// };
// }
Some(TokenType::Keyword(Return)) | Some(TokenType::Keyword(Yield)) => {
return had_line_break;
}
Some(TokenType::Keyword(Else))
| Some(TokenType::Semi)
| None
| Some(TokenType::RParen) => {
return true;
}
// If previous token was `{`
Some(TokenType::LBrace) => return self.current() == Some(TokenContext::BraceStmt),
// `class C<T> { ... }`
Some(TokenType::BinOp(Lt)) | Some(TokenType::BinOp(Gt)) => return true,
// () => {}
Some(TokenType::Arrow) => return true,
_ => {}
}
!is_expr_allowed
}
pub fn len(&self) -> usize {
self.0.len()
}
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
pub fn pop(&mut self) -> Option<TokenContext> {
let opt = self.0.pop();
if cfg!(feature = "debug") {
trace!("context.pop({:?}): {:?}", opt, self.0);
}
opt
}
pub fn current(&self) -> Option<TokenContext> {
self.0.last().cloned()
}
fn push(&mut self, t: TokenContext)
|
}
/// The algorithm used to determine whether a regexp can appear at a
/// given point in the program is loosely based on sweet.js' approach.
/// See https://github.com/mozilla/sweet.js/wiki/design
#[derive(Debug, Clone, Copy, PartialEq, Eq, Kind)]
#[kind(function(is_expr = "bool", preserve_space = "bool"))]
pub enum TokenContext {
BraceStmt,
#[kind(is_expr)]
BraceExpr,
#[kind(is_expr)]
TplQuasi,
ParenStmt {
/// Is this `for` loop?
is_for_loop: bool,
},
#[kind(is_expr)]
ParenExpr,
#[kind(is_expr, preserve_space)]
Tpl {
/// Start of a template literal.
start: BytePos,
},
#[kind(is_expr)]
FnExpr,
JSXOpeningTag,
JSXClosingTag,
#[kind(is_expr, preserve_space)]
JSXExpr,
}
#[cfg(test)]
pub(crate) fn with_lexer<F, Ret>(
syntax: Syntax,
target: EsVersion,
s: &str,
f: F,
) -> Result<Ret, ::testing::StdErr>
where
F: FnOnce(&mut Lexer<'_, crate::lexer::input::StringInput<'_>>) -> Result<Ret, ()>,
{
crate::with_test_sess(s, |_, fm| {
let mut l = Lexer::new(syntax, target, fm, None);
let res = f(&mut l);
#[cfg(debug_assertions)]
let c = vec![TokenContext::BraceStmt];
#[cfg(debug_assertions)]
debug_assert_eq!(l.state.context.0, c);
res
})
}
#[cfg(test)]
pub(crate) fn lex(syntax: Syntax, s: &'static str) -> Vec<TokenAndSpan> {
with_lexer(syntax, Default::default(), s, |l| Ok(l.collect())).unwrap()
}
/// lex `s` within module context.
#[cfg(test)]
pub(crate) fn lex_module_errors(syntax: Syntax, s: &'static str) -> Vec<Error> {
with_lexer(syntax, Default::default(), s, |l| {
l.ctx.strict = true;
l.ctx.module = true;
let _: Vec<_> = l.collect();
Ok(l.take_errors())
})
.unwrap()
}
#[cfg(test)]
pub(crate) fn lex_tokens(syntax: Syntax, s: &'static str) -> Vec<Token> {
with_lexer(syntax, Default::default(), s, |l| {
Ok(l.map(|ts| ts.token).collect())
})
.unwrap()
}
/// Returns `(tokens, recovered_errors)`. `(tokens)` may contain an error token
/// if the lexer fails to recover from it.
#[cfg(test)]
pub(crate) fn lex_errors(syntax: Syntax, s: &'static str) -> (Vec<Token>, Vec<Error>) {
with_lexer(syntax, EsVersion::Es2020, s, |l| {
let tokens = l.map(|ts| ts.token).collect();
let errors = l.take_errors();
Ok((tokens, errors))
})
.unwrap()
}
|
{
self.0.push(t);
if cfg!(feature = "debug") {
trace!("context.push({:?}): {:?}", t, self.0);
}
}
|
type_conversions64.go
|
package modbus
import (
"encoding/binary"
"fmt"
"math"
)
type convert64 func([]byte) uint64
func binaryMSWLEU64(b []byte) uint64 {
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(binary.LittleEndian.Uint16(b[0:]))<<48 | uint64(binary.LittleEndian.Uint16(b[2:]))<<32 | uint64(binary.LittleEndian.Uint16(b[4:]))<<16 | uint64(binary.LittleEndian.Uint16(b[6:]))
}
func binaryLSWBEU64(b []byte) uint64 {
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(binary.BigEndian.Uint16(b[6:]))<<48 | uint64(binary.BigEndian.Uint16(b[4:]))<<32 | uint64(binary.BigEndian.Uint16(b[2:]))<<16 | uint64(binary.BigEndian.Uint16(b[0:]))
}
func endianessConverter64(byteOrder string) (convert64, error) {
switch byteOrder {
case "ABCD": // Big endian (Motorola)
return binary.BigEndian.Uint64, nil
case "BADC": // Big endian with bytes swapped
return binaryMSWLEU64, nil
case "CDAB": // Little endian with bytes swapped
return binaryLSWBEU64, nil
case "DCBA": // Little endian (Intel)
return binary.LittleEndian.Uint64, nil
}
return nil, fmt.Errorf("invalid byte-order: %s", byteOrder)
}
// I64 - no scale
func determineConverterI64(outType, byteOrder string) (fieldConverterFunc, error) {
tohost, err := endianessConverter64(byteOrder)
if err != nil {
return nil, err
}
switch outType {
case "native", "INT64":
return func(b []byte) interface{} {
return int64(tohost(b))
}, nil
case "UINT64":
return func(b []byte) interface{} {
in := int64(tohost(b))
return uint64(in)
}, nil
case "FLOAT64":
return func(b []byte) interface{} {
in := int64(tohost(b))
return float64(in)
}, nil
}
return nil, fmt.Errorf("invalid output data-type: %s", outType)
}
// U64 - no scale
func
|
(outType, byteOrder string) (fieldConverterFunc, error) {
tohost, err := endianessConverter64(byteOrder)
if err != nil {
return nil, err
}
switch outType {
case "INT64":
return func(b []byte) interface{} {
return int64(tohost(b))
}, nil
case "native", "UINT64":
return func(b []byte) interface{} {
return tohost(b)
}, nil
case "FLOAT64":
return func(b []byte) interface{} {
return float64(tohost(b))
}, nil
}
return nil, fmt.Errorf("invalid output data-type: %s", outType)
}
// F64 - no scale
func determineConverterF64(outType, byteOrder string) (fieldConverterFunc, error) {
tohost, err := endianessConverter64(byteOrder)
if err != nil {
return nil, err
}
switch outType {
case "native", "FLOAT64":
return func(b []byte) interface{} {
raw := tohost(b)
return math.Float64frombits(raw)
}, nil
}
return nil, fmt.Errorf("invalid output data-type: %s", outType)
}
// I64 - scale
func determineConverterI64Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) {
tohost, err := endianessConverter64(byteOrder)
if err != nil {
return nil, err
}
switch outType {
case "native":
return func(b []byte) interface{} {
in := int64(tohost(b))
return int64(float64(in) * scale)
}, nil
case "INT64":
return func(b []byte) interface{} {
in := int64(tohost(b))
return int64(float64(in) * scale)
}, nil
case "UINT64":
return func(b []byte) interface{} {
in := int64(tohost(b))
return uint64(float64(in) * scale)
}, nil
case "FLOAT64":
return func(b []byte) interface{} {
in := int64(tohost(b))
return float64(in) * scale
}, nil
}
return nil, fmt.Errorf("invalid output data-type: %s", outType)
}
// U64 - scale
func determineConverterU64Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) {
tohost, err := endianessConverter64(byteOrder)
if err != nil {
return nil, err
}
switch outType {
case "native":
return func(b []byte) interface{} {
in := tohost(b)
return uint64(float64(in) * scale)
}, nil
case "INT64":
return func(b []byte) interface{} {
in := tohost(b)
return int64(float64(in) * scale)
}, nil
case "UINT64":
return func(b []byte) interface{} {
in := tohost(b)
return uint64(float64(in) * scale)
}, nil
case "FLOAT64":
return func(b []byte) interface{} {
in := tohost(b)
return float64(in) * scale
}, nil
}
return nil, fmt.Errorf("invalid output data-type: %s", outType)
}
// F64 - scale
func determineConverterF64Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) {
tohost, err := endianessConverter64(byteOrder)
if err != nil {
return nil, err
}
switch outType {
case "native", "FLOAT64":
return func(b []byte) interface{} {
raw := tohost(b)
in := math.Float64frombits(raw)
return in * scale
}, nil
}
return nil, fmt.Errorf("invalid output data-type: %s", outType)
}
|
determineConverterU64
|
draw.rs
|
use crate::color::ARGB;
use crate::pixel::PixelSquare;
#[inline]
fn
|
(x: isize, y: isize, r: isize) -> bool {
(x - r).pow(2) + (y - r).pow(2) < r.pow(2)
}
#[inline]
fn border_color(color: ARGB) -> u32 {
if color.is_dark() {
ARGB::WHITE.into()
} else {
ARGB::BLACK.into()
}
}
pub fn draw_magnifying_glass(
cursor: &mut PixelSquare<&mut [u32]>,
screenshot: &PixelSquare<&[ARGB]>,
pixel_size: usize,
) {
assert!(pixel_size % 2 != 0, "pixel_size must be odd");
assert!(cursor.width() % 2 != 0, "cursor.width must be odd");
assert!(screenshot.width() % 2 != 0, "screenshot.width must be odd");
let transparent: u32 = ARGB::TRANSPARENT.into();
let pixel_size = pixel_size as isize;
let cursor_width = cursor.width() as isize;
let screenshot_width = screenshot.width() as isize;
let border_width = 1;
let border_radius = cursor_width / 2;
let content_radius = border_radius - border_width;
let cursor_center = cursor_width / 2;
let cursor_center_pixel = cursor_center - pixel_size / 2;
let screenshot_center = screenshot_width / 2;
let offset = screenshot_center * pixel_size - cursor_center_pixel;
for cx in 0..cursor_width {
for cy in 0..cursor_width {
// screenshot coordinates
let sx = ((cx + offset) / pixel_size) as usize;
let sy = ((cy + offset) / pixel_size) as usize;
let screenshot_color = screenshot[(sx, sy)];
// set cursor pixel
cursor[(cx as usize, cy as usize)] = if is_inside_circle(cx, cy, content_radius) {
let is_grid_line =
(cx + offset) % pixel_size == 0 || (cy + offset) % pixel_size == 0;
if is_grid_line {
let is_center_x =
cx >= cursor_center_pixel && cx <= cursor_center_pixel + pixel_size;
let is_center_y =
cy >= cursor_center_pixel && cy <= cursor_center_pixel + pixel_size;
// center pixel's border color
if is_center_x && is_center_y {
border_color(screenshot_color)
} else {
// grid color
if screenshot_color.is_dark() {
screenshot_color.lighten(0.2).into()
} else {
screenshot_color.darken(0.2).into()
}
}
} else {
screenshot_color.into()
}
} else if is_inside_circle(cx + border_width, cy + border_width, border_radius) {
border_color(screenshot_color)
} else {
transparent
};
}
}
}
|
is_inside_circle
|
operator-remove.go
|
// Copyright 2019 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mesh
import (
"fmt"
"strings"
"time"
"github.com/spf13/cobra"
"istio.io/istio/operator/pkg/kubectlcmd"
"istio.io/istio/operator/pkg/manifest"
"istio.io/istio/operator/pkg/object"
"istio.io/pkg/log"
)
type operatorRemoveArgs struct {
operatorInitArgs
// force proceeds even if there are validation errors
force bool
}
type manifestDeleter func(manifestStr, componentName string, opts *kubectlcmd.Options, l *Logger) bool
var (
defaultManifestDeleter = deleteManifest
)
func addOperatorRemoveFlags(cmd *cobra.Command, oiArgs *operatorRemoveArgs) {
addOperatorInitFlags(cmd, &oiArgs.operatorInitArgs)
cmd.PersistentFlags().BoolVar(&oiArgs.force, "force", false, "Proceed even with errors")
}
func operatorRemoveCmd(rootArgs *rootArgs, orArgs *operatorRemoveArgs) *cobra.Command {
return &cobra.Command{
Use: "remove",
Short: "Removes the Istio operator controller from the cluster.",
Long: "The remove subcommand removes the Istio operator controller from the cluster.",
Args: cobra.ExactArgs(0),
Run: func(cmd *cobra.Command, args []string) {
l := NewLogger(rootArgs.logToStdErr, cmd.OutOrStdout(), cmd.OutOrStderr())
operatorRemove(rootArgs, orArgs, l, defaultManifestDeleter)
}}
}
// operatorRemove removes the Istio operator controller from the cluster.
func operatorRemove(args *rootArgs, orArgs *operatorRemoveArgs, l *Logger, deleteManifestFunc manifestDeleter) {
initLogsOrExit(args)
installed, err := isControllerInstalled(orArgs.kubeConfigPath, orArgs.context, orArgs.operatorNamespace)
if installed && err != nil {
l.logAndFatal(err)
}
if !installed {
l.logAndPrintf("Operator controller is not installed in %s namespace (no Deployment detected).", orArgs.operatorNamespace)
if !orArgs.force {
l.logAndFatal("Aborting, use --force to override.")
}
}
l.logAndPrintf("Using operator Deployment image: %s/operator:%s", orArgs.hub, orArgs.tag)
mstr, err := renderOperatorManifest(args, &orArgs.operatorInitArgs, l)
if err != nil {
l.logAndFatal(err)
}
log.Infof("Using the following manifest to install operator:\n%s\n", mstr)
opts := &kubectlcmd.Options{
DryRun: args.dryRun,
Verbose: args.verbose,
WaitTimeout: 1 * time.Minute,
Kubeconfig: orArgs.kubeConfigPath,
Context: orArgs.context,
}
if _, err := manifest.InitK8SRestClient(opts.Kubeconfig, opts.Context); err != nil {
l.logAndFatal(err)
}
success := deleteManifestFunc(mstr, "Operator", opts, l)
if !success {
l.logAndPrint("\n*** Errors were logged during deleteManifestFunc operation. Please check logs above. ***\n")
return
}
l.logAndPrint("\n*** Success. ***\n")
}
func
|
(manifestStr, componentName string, opts *kubectlcmd.Options, l *Logger) bool {
l.logAndPrintf("Deleting manifest for component %s...", componentName)
objs, err := object.ParseK8sObjectsFromYAMLManifest(manifestStr)
if err != nil {
l.logAndPrint("Parse error: ", err, "\n")
return false
}
stdout, stderr, err := kubectlcmd.New().Delete(manifestStr, opts)
success := true
if err != nil {
cs := fmt.Sprintf("Component %s delete returned the following errors:", componentName)
l.logAndPrintf("\n%s\n%s", cs, strings.Repeat("=", len(cs)))
l.logAndPrint("Error: ", err, "\n")
success = false
} else {
l.logAndPrintf("Component %s deleted successfully.", componentName)
if opts.Verbose {
l.logAndPrintf("The following objects were deleted:\n%s", k8sObjectsString(objs))
}
}
if !ignoreError(stderr) {
l.logAndPrint("Error detail:\n", stderr, "\n")
l.logAndPrint(stdout, "\n")
success = false
}
return success
}
|
deleteManifest
|
dataset.py
|
import tensorflow as tf
# tf.enable_eager_execution()
class Dataset(object):
def
|
(self, params, mode):
if mode == tf.estimator.ModeKeys.TRAIN:
features_path = params["train_features_file"]
labels_path = params["train_labels_file"]
elif mode == tf.estimator.ModeKeys.EVAL:
features_path = params["eval_features_file"]
labels_path = params["eval_labels_file"]
elif mode == tf.estimator.ModeKeys.PREDICT:
features_path = params["test_features_file"]
labels_path = params["test_labels_file"]
else:
raise ValueError("wrong mode!!!")
features_dataset, labels_dataset = self._load_dataset(features_path, labels_path, mode)
if mode == tf.estimator.ModeKeys.PREDICT:
dataset = features_dataset.map(lambda x: tf.string_split([x]).values)
dataset = dataset.shuffle(buffer_size=params["buffer_size"],
reshuffle_each_iteration=params["reshuffle_each_iteration"])
dataset = dataset.prefetch(buffer_size=params["buffer_size"])
dataset = dataset.map(lambda src: (src, tf.size(src)))
dataset = dataset.padded_batch(batch_size=params["batch_size"],
padded_shapes=(tf.TensorShape([None]), tf.TensorShape([])),
padding_values=(tf.constant("<blank>"), 0))
iterator = dataset.make_one_shot_iterator()
src, src_len = iterator.get_next()
features = {
"input": src,
"input_length": src_len
}
labels = None
else:
dataset = tf.data.Dataset.zip((features_dataset, labels_dataset))
dataset = dataset.map(lambda x, y: (tf.string_split([x]).values, tf.string_split([y]).values))
dataset = dataset.repeat(params["repeat"]).shuffle(buffer_size=params["buffer_size"],
reshuffle_each_iteration=params[
"reshuffle_each_iteration"])
dataset = dataset.prefetch(buffer_size=params["buffer_size"])
if params["src_max_len"] > 0:
dataset = dataset.map(
lambda src, tgt: (src[:params["src_max_len"]], tgt))
if params["tgt_max_len"] > 0:
dataset = dataset.map(
lambda src, tgt: (src, tgt[:params["tgt_max_len"]]))
dataset = dataset.map(
lambda src, tgt: (src,
tf.concat((["<s>"], tgt), 0),
tf.concat((tgt, ["</s>"]), 0)),
num_parallel_calls=params["num_parallel_calls"])
dataset = dataset.map(lambda src, tgt_in, tgt_out: (src, tgt_in, tgt_out, tf.size(src), tf.size(tgt_out)))
dataset = dataset.padded_batch(batch_size=params["batch_size"],
padded_shapes=(
tf.TensorShape([None]),
tf.TensorShape([None]),
tf.TensorShape([None]),
tf.TensorShape([]),
tf.TensorShape([])),
padding_values=(
tf.constant("<blank>", dtype=tf.string),
tf.constant("<s>", dtype=tf.string),
tf.constant("</s>", dtype=tf.string),
0,
0))
iterator = dataset.make_one_shot_iterator()
src, tgt_in, tgt_out, input_length, output_length = iterator.get_next()
features = {
"input": src,
"input_length": input_length
}
labels = {
"output_in": tgt_in,
"output_out": tgt_out,
"output_length": output_length
}
return features, labels
@staticmethod
def _load_dataset(features_path, labels_path, mode):
''' 从文件读取dataset
:param mode:
:return:
'''
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
features_dataset = tf.data.TextLineDataset(filenames=features_path)
labels_dataset = tf.data.TextLineDataset(filenames=labels_path)
return features_dataset, labels_dataset
elif mode == tf.estimator.ModeKeys.PREDICT:
features_dataset = tf.data.TextLineDataset(filenames=features_path)
return features_dataset, None
data_util = Dataset()
|
get_dataset
|
TrimExtension.ts
|
import { GenerateTypescriptPathProcessor } from "../GenerateTypescriptPathProcessor";
import { GenerateTypescriptPathArguments } from "../GenerateTypescriptPathArguments";
import upath = require("upath");
import { GenerateTypescriptPathMessages } from "../GenerateTypescriptPathMessages";
import S from "string";
|
export class TrimExtension extends GenerateTypescriptPathProcessor {
public static readonly Instance = new TrimExtension();
public async SafeExecute(args: GenerateTypescriptPathArguments): Promise<void> {
let p = args.GetResult();
let result = upath.trimExt(p);
args.SetResultWithInformation(
result,
S(GenerateTypescriptPathMessages.TrimmedExtension)
.template({ path: p, result: result }).s
);
}
public SafeCondition(args: GenerateTypescriptPathArguments): boolean {
return super.SafeCondition(args) && this.CustomCondition(args);
}
public CustomCondition(args: GenerateTypescriptPathArguments): boolean {
let safeCondition = true;
return safeCondition;
}
}
| |
scripting.py
|
import requests
from types import FunctionType
import plotly.io as pio
import json
from .core import EndaqCloud, ENV_PRODUCTION, ENV_STAGING, ENV_DEVELOP
__all__ = [
'create_cloud_dashboard_output',
'produce_dashboard_plots',
]
def create_cloud_dashboard_output(name_to_fig: dict) -> str:
"""
A function which makes producing the json based string used to produce custom enDAQ Cloud report
dashboards easy.
:param name_to_fig: A dictionary mapping the desired names/titles of plots to a Plotly figure.
The dictionary must have 4 elements, and the ordering DOES matter as it dictates the order
the plots will be added to the dashboard. This means the ordering of Python dictionaries which
appears in Python 3.7 is crucial for using this!
:return: The json based string which is to be given as the variable 'output' in enDAQ cloud
custom dashboards
"""
if not isinstance(name_to_fig, dict):
raise TypeError(f"'name_to_fig' parameter must be a dictionary, but was given {type(name_to_fig)} instead")
if len(name_to_fig) != 4:
raise ValueError("The number of (key, value) pairs in 'name_to_fig' must be exactly 4, "
f"but {len(name_to_fig)} are given")
return "[" + ", ".join([v.to_json()[:-1] + ', "title": "' + k + '"}' for k, v in name_to_fig.items()]) + "]"
def
|
(dashboard_script_fn: FunctionType, api_key: str, max_num_files: int = 100,
environment: str = 'production', display_plots: bool = True) -> list:
"""
A function used to simulate a run of a desired enDAQ Cloud custom report script without needing to use
``cloud.endaq.com``.
:param dashboard_script_fn: A function accepting the parameters `files` and `file_download_url`, which has all
the exact code that would be put into a enDAQ Cloud custom report script, followed by one final line:
`return output`
:param api_key: The enDAQ Cloud API key
:param max_num_files: The maximum number of files to get data about. Specifically, this is used to
specify how many of the most recently uploaded IDE files in the cloud will have their info passed to
your custom report script (through a list of json blobs, as parameter 'files')
:param environment: The version of the enDAQ Cloud to communicate with, the options are 'production', 'staging',
or 'develop'. This should only be used internally at Mide
:param display_plots: If the plots being produced should be displayed
:return: A list of the 4 plotly figures produced
"""
if environment == 'production':
api_access_url = ENV_PRODUCTION
elif environment == 'staging':
api_access_url = ENV_STAGING
elif environment == 'develop':
api_access_url = ENV_DEVELOP
else:
raise ValueError("Only 'production', 'staging', and 'develop' may be given for the 'environment' parameter, "
f" but {environment} was given instead.")
parameters = {"x-api-key": api_key}
cloud_obj = EndaqCloud(api_key, env=api_access_url)
files = cloud_obj._get_files_json_response(limit=max_num_files)
most_recent_file_id = files[-1]['id']
file_download_url = requests.get(
api_access_url + '/api/v1/files/download/' + most_recent_file_id,
headers=parameters
).json()['url']
output = dashboard_script_fn(files=files, file_download_url=file_download_url)
figures = [pio.from_json(json.dumps(blob)) for blob in json.loads(output)]
if display_plots:
for fig in figures:
fig.show()
return figures
|
produce_dashboard_plots
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.