file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
kubernetes.go | // Copyright (c) 2017 Pulcy.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"strconv"
"sync"
"github.com/juju/errgo"
"github.com/op/go-logging"
"github.com/spf13/pflag"
k8s "github.com/YakLabs/k8s-client"
k8s_http "github.com/YakLabs/k8s-client/http"
api "github.com/pulcy/prometheus-conf-api"
"github.com/pulcy/prometheus-conf/service"
"github.com/pulcy/prometheus-conf/util"
)
var (
maskAny = errgo.MaskFunc(errgo.Any)
)
const (
logName = "kubernetes"
metricsAnnotation = "j2.pulcy.com/metrics"
maxRecentErrors = 30
)
type k8sPlugin struct {
LogLevel string
ETCDTLSConfig service.TLSConfig
KubeletTLSConfig service.TLSConfig
log *logging.Logger
client k8s.Client
lastUpdate *k8sUpdate
recentErrors int
nodeExporterPort int
}
type k8sUpdate struct {
log *logging.Logger
nodeExporterPort int
nodes []k8s.Node
services []k8s.Service
etcdTLSConfig service.TLSConfig
kubeletTLSConfig service.TLSConfig
}
func init() {
service.RegisterPlugin("kubernetes", &k8sPlugin{
log: logging.MustGetLogger(logName),
})
}
// Configure the command line flags needed by the plugin.
func (p *k8sPlugin) Setup(flagSet *pflag.FlagSet) {
flagSet.StringVar(&p.ETCDTLSConfig.CAFile, "kubernetes-etcd-ca-file", "", "CA certificate used by ETCD")
flagSet.StringVar(&p.ETCDTLSConfig.CertFile, "kubernetes-etcd-cert-file", "", "Public key file used by ETCD")
flagSet.StringVar(&p.ETCDTLSConfig.KeyFile, "kubernetes-etcd-key-file", "", "Private key file used by ETCD")
flagSet.StringVar(&p.KubeletTLSConfig.CAFile, "kubelet-ca-file", "", "CA certificate used by Kubelet")
flagSet.StringVar(&p.KubeletTLSConfig.CertFile, "kubelet-cert-file", "", "Public key file used by Kubelet")
flagSet.StringVar(&p.KubeletTLSConfig.KeyFile, "kubelet-key-file", "", "Private key file used by Kubelet")
flagSet.StringVar(&p.LogLevel, "kubernetes-log-level", "", "Log level of kubernetes plugin")
}
// Start the plugin. Send a value on the given channel to trigger an update of the configuration.
func (p *k8sPlugin) Start(config service.ServiceConfig, trigger chan string) error {
if err := util.SetLogLevel(p.LogLevel, config.LogLevel, logName); err != nil {
return maskAny(err)
}
// Setup kubernetes client
p.nodeExporterPort = config.NodeExporterPort
c, err := k8s_http.NewInCluster()
if err != nil {
p.log.Infof("No kubernetes available: %v", err)
return nil
}
p.client = c
// Watch nodes for changes
go func() {
for {
nodeEvents := make(chan k8s.NodeWatchEvent)
go func() {
for evt := range nodeEvents {
if evt.Type() == k8s.WatchEventTypeAdded || evt.Type() == k8s.WatchEventTypeDeleted {
p.log.Debugf("got node event of type %s", evt.Type())
trigger <- fmt.Sprintf("node-%s", evt.Type())
}
}
}()
if err := p.client.WatchNodes(nil, nodeEvents); err != nil {
p.log.Errorf("failed to watch nodes: %#v", err)
}
}
}()
// Watch services for changes
go func() {
for {
serviceEvents := make(chan k8s.ServiceWatchEvent)
go func() {
for evt := range serviceEvents {
p.log.Debugf("got service event of type %s", evt.Type())
trigger <- fmt.Sprintf("service-%s", evt.Type())
}
}()
if err := p.client.WatchServices("", nil, serviceEvents); err != nil {
p.log.Errorf("failed to watch services: %#v", err)
}
}
}()
// No custom triggers here, just update once in a while.
return nil
}
func (p *k8sPlugin) | () (service.PluginUpdate, error) {
if p.client == nil {
return nil, nil
}
// Get nodes
p.log.Debugf("fetching kubernetes nodes")
nodes, nodesErr := p.client.ListNodes(nil)
// Get services
p.log.Debugf("fetching kubernetes services")
services, servicesErr := p.client.ListServices("", nil)
if nodesErr != nil || servicesErr != nil {
if nodesErr != nil {
p.log.Warningf("Failed to fetch kubernetes nodes: %#v (using previous ones)", nodesErr)
}
if servicesErr != nil {
p.log.Warningf("Failed to fetch kubernetes services: %#v (using previous ones)", servicesErr)
}
p.recentErrors++
if p.recentErrors > maxRecentErrors {
p.log.Warningf("Too many recent kubernetes errors, restarting")
os.Exit(1)
}
return p.lastUpdate, nil
} else {
p.recentErrors = 0
update := &k8sUpdate{
log: p.log,
nodeExporterPort: p.nodeExporterPort,
nodes: nodes.Items,
services: services.Items,
etcdTLSConfig: p.ETCDTLSConfig,
kubeletTLSConfig: p.KubeletTLSConfig,
}
p.lastUpdate = update
return update, nil
}
}
// Extract data from fleet to create node_exporter targets
func (p *k8sUpdate) CreateNodes() ([]service.ScrapeConfig, error) {
// Build scrape config list
scNode := service.StaticConfig{}
scNode.Label("source", "node")
scEtcd := service.StaticConfig{}
scEtcd.Label("source", "etcd")
for _, node := range p.nodes {
for _, addr := range node.Status.Addresses {
if addr.Type == "InternalIP" {
ip := addr.Address
p.log.Debugf("found kubernetes node %s", ip)
scNode.Targets = append(scNode.Targets, fmt.Sprintf("%s:%d", ip, p.nodeExporterPort))
if node.Labels["core"] == "true" {
scEtcd.Targets = append(scEtcd.Targets, fmt.Sprintf("%s:2379", ip))
}
}
}
}
scrapeConfigNode := service.ScrapeConfig{
JobName: "node",
StaticConfigs: []service.StaticConfig{scNode},
RelabelConfigs: []service.RelabelConfig{
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "instance",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$1",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "port",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$2",
},
},
}
scrapeConfigETCD := service.ScrapeConfig{
JobName: "etcd",
StaticConfigs: []service.StaticConfig{scEtcd},
RelabelConfigs: []service.RelabelConfig{
service.RelabelConfig{
Action: "labeldrop",
Regex: "etcd_debugging.*",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "instance",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$1",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "port",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$2",
},
},
}
if p.etcdTLSConfig.IsConfigured() {
scrapeConfigETCD.Scheme = "https"
scrapeConfigETCD.TLSConfig = &service.TLSConfig{
CAFile: p.etcdTLSConfig.CAFile,
CertFile: p.etcdTLSConfig.CertFile,
KeyFile: p.etcdTLSConfig.KeyFile,
InsecureSkipVerify: true,
}
}
scrapeConfigK8sNodes := service.ScrapeConfig{
JobName: "kubernetes-nodes",
ScrapeInterval: "5m",
KubernetesConfigs: []service.KubernetesSDConfig{
service.KubernetesSDConfig{
Role: "node",
},
},
RelabelConfigs: []service.RelabelConfig{
service.RelabelConfig{
Action: "labelmap",
Regex: "__meta_kubernetes_node_label_(.+)",
},
},
}
if p.kubeletTLSConfig.IsConfigured() {
scrapeConfigK8sNodes.Scheme = "https"
scrapeConfigK8sNodes.TLSConfig = &service.TLSConfig{
CAFile: p.kubeletTLSConfig.CAFile,
CertFile: p.kubeletTLSConfig.CertFile,
KeyFile: p.kubeletTLSConfig.KeyFile,
InsecureSkipVerify: true,
}
}
scrapeConfigK8sEndpoinds := service.ScrapeConfig{
JobName: "kubernetes-endpoints",
KubernetesConfigs: []service.KubernetesSDConfig{
service.KubernetesSDConfig{
Role: "endpoints",
},
},
RelabelConfigs: []service.RelabelConfig{
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_service_annotation_prometheus_io_scrape"},
Action: "keep",
Regex: "true",
},
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_service_annotation_prometheus_io_scheme"},
Action: "replace",
TargetLabel: "__scheme__",
Regex: "(https?)",
},
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_service_annotation_prometheus_io_path"},
Action: "replace",
TargetLabel: "__metrics_path__",
Regex: "(.+)",
},
service.RelabelConfig{
SourceLabels: []string{"__address__", "__meta_kubernetes_service_annotation_prometheus_io_port"},
Action: "replace",
TargetLabel: "__address__",
Regex: `(.+)(?::\d+);(\d+)`,
Replacement: "$1:$2",
},
service.RelabelConfig{
Action: "labelmap",
Regex: "__meta_kubernetes_service_label_(.+)",
},
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_namespace"},
Action: "replace",
TargetLabel: "kubernetes_namespace",
},
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_pod_name"},
Action: "replace",
TargetLabel: "kubernetes_pod_name",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "instance",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$1",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "port",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$2",
},
service.RelabelConfig{
SourceLabels: []string{"j2_job_name"},
Action: "replace",
TargetLabel: "job",
},
service.RelabelConfig{
SourceLabels: []string{"j2_taskgroup_name"},
Action: "replace",
TargetLabel: "taskgroup",
},
},
}
return []service.ScrapeConfig{scrapeConfigNode, scrapeConfigETCD, scrapeConfigK8sNodes, scrapeConfigK8sEndpoinds}, nil
}
// CreateRules creates all rules this plugin is aware of.
// The returns string list should contain the content of the various rules.
func (p *k8sUpdate) CreateRules() ([]string, error) {
// Build URL list
var urls []string
for _, svc := range p.services {
ann, ok := svc.Annotations[metricsAnnotation]
if !ok || ann == "" {
continue
}
var metricsRecords []api.MetricsServiceRecord
if err := json.Unmarshal([]byte(ann), &metricsRecords); err != nil {
p.log.Errorf("Failed to unmarshal metrics annotation in service '%s.%s': %#v", svc.Namespace, svc.Name, err)
continue
}
// Get service IP
if svc.Spec.Type != k8s.ServiceTypeClusterIP {
p.log.Errorf("Cannot put metrics rules in services of type other than ClusterIP ('%s.%s')", svc.Namespace, svc.Name)
continue
}
clusterIP := svc.Spec.ClusterIP
// Collect URLs
for _, m := range metricsRecords {
if m.RulesPath == "" {
continue
}
u := url.URL{
Scheme: "http",
Host: net.JoinHostPort(clusterIP, strconv.Itoa(m.ServicePort)),
Path: m.RulesPath,
}
urls = append(urls, u.String())
}
}
if len(urls) == 0 {
return nil, nil
}
// Fetch rules from URLs
rulesChan := make(chan string, len(urls))
wg := sync.WaitGroup{}
for _, url := range urls {
wg.Add(1)
go func(url string) {
defer wg.Done()
p.log.Debugf("fetching rules from %s", url)
resp, err := http.Get(url)
if err != nil {
p.log.Errorf("Failed to fetch rule from '%s': %#v", url, err)
return
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
p.log.Errorf("Failed to fetch rule from '%s': Status %d", url, resp.StatusCode)
return
}
defer resp.Body.Close()
raw, err := ioutil.ReadAll(resp.Body)
if err != nil {
p.log.Errorf("Failed to read rule from '%s': %#v", url, err)
return
}
rulesChan <- string(raw)
p.log.Debugf("done fetching rules from %s", url)
}(url)
}
wg.Wait()
close(rulesChan)
var result []string
for rule := range rulesChan {
result = append(result, rule)
}
p.log.Debugf("Found %d rules", len(result))
return result, nil
}
| Update | identifier_name |
kubernetes.go | // Copyright (c) 2017 Pulcy.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"strconv"
"sync"
"github.com/juju/errgo"
"github.com/op/go-logging"
"github.com/spf13/pflag"
k8s "github.com/YakLabs/k8s-client"
k8s_http "github.com/YakLabs/k8s-client/http"
api "github.com/pulcy/prometheus-conf-api"
"github.com/pulcy/prometheus-conf/service"
"github.com/pulcy/prometheus-conf/util"
)
var (
maskAny = errgo.MaskFunc(errgo.Any)
)
const (
logName = "kubernetes"
metricsAnnotation = "j2.pulcy.com/metrics"
maxRecentErrors = 30
)
type k8sPlugin struct {
LogLevel string
ETCDTLSConfig service.TLSConfig
KubeletTLSConfig service.TLSConfig
log *logging.Logger
client k8s.Client
lastUpdate *k8sUpdate
recentErrors int
nodeExporterPort int
}
type k8sUpdate struct {
log *logging.Logger
nodeExporterPort int
nodes []k8s.Node
services []k8s.Service
etcdTLSConfig service.TLSConfig
kubeletTLSConfig service.TLSConfig
}
func init() {
service.RegisterPlugin("kubernetes", &k8sPlugin{
log: logging.MustGetLogger(logName),
})
}
| flagSet.StringVar(&p.ETCDTLSConfig.KeyFile, "kubernetes-etcd-key-file", "", "Private key file used by ETCD")
flagSet.StringVar(&p.KubeletTLSConfig.CAFile, "kubelet-ca-file", "", "CA certificate used by Kubelet")
flagSet.StringVar(&p.KubeletTLSConfig.CertFile, "kubelet-cert-file", "", "Public key file used by Kubelet")
flagSet.StringVar(&p.KubeletTLSConfig.KeyFile, "kubelet-key-file", "", "Private key file used by Kubelet")
flagSet.StringVar(&p.LogLevel, "kubernetes-log-level", "", "Log level of kubernetes plugin")
}
// Start the plugin. Send a value on the given channel to trigger an update of the configuration.
func (p *k8sPlugin) Start(config service.ServiceConfig, trigger chan string) error {
if err := util.SetLogLevel(p.LogLevel, config.LogLevel, logName); err != nil {
return maskAny(err)
}
// Setup kubernetes client
p.nodeExporterPort = config.NodeExporterPort
c, err := k8s_http.NewInCluster()
if err != nil {
p.log.Infof("No kubernetes available: %v", err)
return nil
}
p.client = c
// Watch nodes for changes
go func() {
for {
nodeEvents := make(chan k8s.NodeWatchEvent)
go func() {
for evt := range nodeEvents {
if evt.Type() == k8s.WatchEventTypeAdded || evt.Type() == k8s.WatchEventTypeDeleted {
p.log.Debugf("got node event of type %s", evt.Type())
trigger <- fmt.Sprintf("node-%s", evt.Type())
}
}
}()
if err := p.client.WatchNodes(nil, nodeEvents); err != nil {
p.log.Errorf("failed to watch nodes: %#v", err)
}
}
}()
// Watch services for changes
go func() {
for {
serviceEvents := make(chan k8s.ServiceWatchEvent)
go func() {
for evt := range serviceEvents {
p.log.Debugf("got service event of type %s", evt.Type())
trigger <- fmt.Sprintf("service-%s", evt.Type())
}
}()
if err := p.client.WatchServices("", nil, serviceEvents); err != nil {
p.log.Errorf("failed to watch services: %#v", err)
}
}
}()
// No custom triggers here, just update once in a while.
return nil
}
func (p *k8sPlugin) Update() (service.PluginUpdate, error) {
if p.client == nil {
return nil, nil
}
// Get nodes
p.log.Debugf("fetching kubernetes nodes")
nodes, nodesErr := p.client.ListNodes(nil)
// Get services
p.log.Debugf("fetching kubernetes services")
services, servicesErr := p.client.ListServices("", nil)
if nodesErr != nil || servicesErr != nil {
if nodesErr != nil {
p.log.Warningf("Failed to fetch kubernetes nodes: %#v (using previous ones)", nodesErr)
}
if servicesErr != nil {
p.log.Warningf("Failed to fetch kubernetes services: %#v (using previous ones)", servicesErr)
}
p.recentErrors++
if p.recentErrors > maxRecentErrors {
p.log.Warningf("Too many recent kubernetes errors, restarting")
os.Exit(1)
}
return p.lastUpdate, nil
} else {
p.recentErrors = 0
update := &k8sUpdate{
log: p.log,
nodeExporterPort: p.nodeExporterPort,
nodes: nodes.Items,
services: services.Items,
etcdTLSConfig: p.ETCDTLSConfig,
kubeletTLSConfig: p.KubeletTLSConfig,
}
p.lastUpdate = update
return update, nil
}
}
// Extract data from fleet to create node_exporter targets
func (p *k8sUpdate) CreateNodes() ([]service.ScrapeConfig, error) {
// Build scrape config list
scNode := service.StaticConfig{}
scNode.Label("source", "node")
scEtcd := service.StaticConfig{}
scEtcd.Label("source", "etcd")
for _, node := range p.nodes {
for _, addr := range node.Status.Addresses {
if addr.Type == "InternalIP" {
ip := addr.Address
p.log.Debugf("found kubernetes node %s", ip)
scNode.Targets = append(scNode.Targets, fmt.Sprintf("%s:%d", ip, p.nodeExporterPort))
if node.Labels["core"] == "true" {
scEtcd.Targets = append(scEtcd.Targets, fmt.Sprintf("%s:2379", ip))
}
}
}
}
scrapeConfigNode := service.ScrapeConfig{
JobName: "node",
StaticConfigs: []service.StaticConfig{scNode},
RelabelConfigs: []service.RelabelConfig{
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "instance",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$1",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "port",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$2",
},
},
}
scrapeConfigETCD := service.ScrapeConfig{
JobName: "etcd",
StaticConfigs: []service.StaticConfig{scEtcd},
RelabelConfigs: []service.RelabelConfig{
service.RelabelConfig{
Action: "labeldrop",
Regex: "etcd_debugging.*",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "instance",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$1",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "port",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$2",
},
},
}
if p.etcdTLSConfig.IsConfigured() {
scrapeConfigETCD.Scheme = "https"
scrapeConfigETCD.TLSConfig = &service.TLSConfig{
CAFile: p.etcdTLSConfig.CAFile,
CertFile: p.etcdTLSConfig.CertFile,
KeyFile: p.etcdTLSConfig.KeyFile,
InsecureSkipVerify: true,
}
}
scrapeConfigK8sNodes := service.ScrapeConfig{
JobName: "kubernetes-nodes",
ScrapeInterval: "5m",
KubernetesConfigs: []service.KubernetesSDConfig{
service.KubernetesSDConfig{
Role: "node",
},
},
RelabelConfigs: []service.RelabelConfig{
service.RelabelConfig{
Action: "labelmap",
Regex: "__meta_kubernetes_node_label_(.+)",
},
},
}
if p.kubeletTLSConfig.IsConfigured() {
scrapeConfigK8sNodes.Scheme = "https"
scrapeConfigK8sNodes.TLSConfig = &service.TLSConfig{
CAFile: p.kubeletTLSConfig.CAFile,
CertFile: p.kubeletTLSConfig.CertFile,
KeyFile: p.kubeletTLSConfig.KeyFile,
InsecureSkipVerify: true,
}
}
scrapeConfigK8sEndpoinds := service.ScrapeConfig{
JobName: "kubernetes-endpoints",
KubernetesConfigs: []service.KubernetesSDConfig{
service.KubernetesSDConfig{
Role: "endpoints",
},
},
RelabelConfigs: []service.RelabelConfig{
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_service_annotation_prometheus_io_scrape"},
Action: "keep",
Regex: "true",
},
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_service_annotation_prometheus_io_scheme"},
Action: "replace",
TargetLabel: "__scheme__",
Regex: "(https?)",
},
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_service_annotation_prometheus_io_path"},
Action: "replace",
TargetLabel: "__metrics_path__",
Regex: "(.+)",
},
service.RelabelConfig{
SourceLabels: []string{"__address__", "__meta_kubernetes_service_annotation_prometheus_io_port"},
Action: "replace",
TargetLabel: "__address__",
Regex: `(.+)(?::\d+);(\d+)`,
Replacement: "$1:$2",
},
service.RelabelConfig{
Action: "labelmap",
Regex: "__meta_kubernetes_service_label_(.+)",
},
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_namespace"},
Action: "replace",
TargetLabel: "kubernetes_namespace",
},
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_pod_name"},
Action: "replace",
TargetLabel: "kubernetes_pod_name",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "instance",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$1",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "port",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$2",
},
service.RelabelConfig{
SourceLabels: []string{"j2_job_name"},
Action: "replace",
TargetLabel: "job",
},
service.RelabelConfig{
SourceLabels: []string{"j2_taskgroup_name"},
Action: "replace",
TargetLabel: "taskgroup",
},
},
}
return []service.ScrapeConfig{scrapeConfigNode, scrapeConfigETCD, scrapeConfigK8sNodes, scrapeConfigK8sEndpoinds}, nil
}
// CreateRules creates all rules this plugin is aware of.
// The returns string list should contain the content of the various rules.
func (p *k8sUpdate) CreateRules() ([]string, error) {
// Build URL list
var urls []string
for _, svc := range p.services {
ann, ok := svc.Annotations[metricsAnnotation]
if !ok || ann == "" {
continue
}
var metricsRecords []api.MetricsServiceRecord
if err := json.Unmarshal([]byte(ann), &metricsRecords); err != nil {
p.log.Errorf("Failed to unmarshal metrics annotation in service '%s.%s': %#v", svc.Namespace, svc.Name, err)
continue
}
// Get service IP
if svc.Spec.Type != k8s.ServiceTypeClusterIP {
p.log.Errorf("Cannot put metrics rules in services of type other than ClusterIP ('%s.%s')", svc.Namespace, svc.Name)
continue
}
clusterIP := svc.Spec.ClusterIP
// Collect URLs
for _, m := range metricsRecords {
if m.RulesPath == "" {
continue
}
u := url.URL{
Scheme: "http",
Host: net.JoinHostPort(clusterIP, strconv.Itoa(m.ServicePort)),
Path: m.RulesPath,
}
urls = append(urls, u.String())
}
}
if len(urls) == 0 {
return nil, nil
}
// Fetch rules from URLs
rulesChan := make(chan string, len(urls))
wg := sync.WaitGroup{}
for _, url := range urls {
wg.Add(1)
go func(url string) {
defer wg.Done()
p.log.Debugf("fetching rules from %s", url)
resp, err := http.Get(url)
if err != nil {
p.log.Errorf("Failed to fetch rule from '%s': %#v", url, err)
return
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
p.log.Errorf("Failed to fetch rule from '%s': Status %d", url, resp.StatusCode)
return
}
defer resp.Body.Close()
raw, err := ioutil.ReadAll(resp.Body)
if err != nil {
p.log.Errorf("Failed to read rule from '%s': %#v", url, err)
return
}
rulesChan <- string(raw)
p.log.Debugf("done fetching rules from %s", url)
}(url)
}
wg.Wait()
close(rulesChan)
var result []string
for rule := range rulesChan {
result = append(result, rule)
}
p.log.Debugf("Found %d rules", len(result))
return result, nil
} | // Configure the command line flags needed by the plugin.
func (p *k8sPlugin) Setup(flagSet *pflag.FlagSet) {
flagSet.StringVar(&p.ETCDTLSConfig.CAFile, "kubernetes-etcd-ca-file", "", "CA certificate used by ETCD")
flagSet.StringVar(&p.ETCDTLSConfig.CertFile, "kubernetes-etcd-cert-file", "", "Public key file used by ETCD") | random_line_split |
kubernetes.go | // Copyright (c) 2017 Pulcy.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"strconv"
"sync"
"github.com/juju/errgo"
"github.com/op/go-logging"
"github.com/spf13/pflag"
k8s "github.com/YakLabs/k8s-client"
k8s_http "github.com/YakLabs/k8s-client/http"
api "github.com/pulcy/prometheus-conf-api"
"github.com/pulcy/prometheus-conf/service"
"github.com/pulcy/prometheus-conf/util"
)
var (
maskAny = errgo.MaskFunc(errgo.Any)
)
const (
logName = "kubernetes"
metricsAnnotation = "j2.pulcy.com/metrics"
maxRecentErrors = 30
)
type k8sPlugin struct {
LogLevel string
ETCDTLSConfig service.TLSConfig
KubeletTLSConfig service.TLSConfig
log *logging.Logger
client k8s.Client
lastUpdate *k8sUpdate
recentErrors int
nodeExporterPort int
}
type k8sUpdate struct {
log *logging.Logger
nodeExporterPort int
nodes []k8s.Node
services []k8s.Service
etcdTLSConfig service.TLSConfig
kubeletTLSConfig service.TLSConfig
}
func init() {
service.RegisterPlugin("kubernetes", &k8sPlugin{
log: logging.MustGetLogger(logName),
})
}
// Configure the command line flags needed by the plugin.
func (p *k8sPlugin) Setup(flagSet *pflag.FlagSet) |
// Start the plugin. Send a value on the given channel to trigger an update of the configuration.
func (p *k8sPlugin) Start(config service.ServiceConfig, trigger chan string) error {
if err := util.SetLogLevel(p.LogLevel, config.LogLevel, logName); err != nil {
return maskAny(err)
}
// Setup kubernetes client
p.nodeExporterPort = config.NodeExporterPort
c, err := k8s_http.NewInCluster()
if err != nil {
p.log.Infof("No kubernetes available: %v", err)
return nil
}
p.client = c
// Watch nodes for changes
go func() {
for {
nodeEvents := make(chan k8s.NodeWatchEvent)
go func() {
for evt := range nodeEvents {
if evt.Type() == k8s.WatchEventTypeAdded || evt.Type() == k8s.WatchEventTypeDeleted {
p.log.Debugf("got node event of type %s", evt.Type())
trigger <- fmt.Sprintf("node-%s", evt.Type())
}
}
}()
if err := p.client.WatchNodes(nil, nodeEvents); err != nil {
p.log.Errorf("failed to watch nodes: %#v", err)
}
}
}()
// Watch services for changes
go func() {
for {
serviceEvents := make(chan k8s.ServiceWatchEvent)
go func() {
for evt := range serviceEvents {
p.log.Debugf("got service event of type %s", evt.Type())
trigger <- fmt.Sprintf("service-%s", evt.Type())
}
}()
if err := p.client.WatchServices("", nil, serviceEvents); err != nil {
p.log.Errorf("failed to watch services: %#v", err)
}
}
}()
// No custom triggers here, just update once in a while.
return nil
}
func (p *k8sPlugin) Update() (service.PluginUpdate, error) {
if p.client == nil {
return nil, nil
}
// Get nodes
p.log.Debugf("fetching kubernetes nodes")
nodes, nodesErr := p.client.ListNodes(nil)
// Get services
p.log.Debugf("fetching kubernetes services")
services, servicesErr := p.client.ListServices("", nil)
if nodesErr != nil || servicesErr != nil {
if nodesErr != nil {
p.log.Warningf("Failed to fetch kubernetes nodes: %#v (using previous ones)", nodesErr)
}
if servicesErr != nil {
p.log.Warningf("Failed to fetch kubernetes services: %#v (using previous ones)", servicesErr)
}
p.recentErrors++
if p.recentErrors > maxRecentErrors {
p.log.Warningf("Too many recent kubernetes errors, restarting")
os.Exit(1)
}
return p.lastUpdate, nil
} else {
p.recentErrors = 0
update := &k8sUpdate{
log: p.log,
nodeExporterPort: p.nodeExporterPort,
nodes: nodes.Items,
services: services.Items,
etcdTLSConfig: p.ETCDTLSConfig,
kubeletTLSConfig: p.KubeletTLSConfig,
}
p.lastUpdate = update
return update, nil
}
}
// Extract data from fleet to create node_exporter targets
func (p *k8sUpdate) CreateNodes() ([]service.ScrapeConfig, error) {
// Build scrape config list
scNode := service.StaticConfig{}
scNode.Label("source", "node")
scEtcd := service.StaticConfig{}
scEtcd.Label("source", "etcd")
for _, node := range p.nodes {
for _, addr := range node.Status.Addresses {
if addr.Type == "InternalIP" {
ip := addr.Address
p.log.Debugf("found kubernetes node %s", ip)
scNode.Targets = append(scNode.Targets, fmt.Sprintf("%s:%d", ip, p.nodeExporterPort))
if node.Labels["core"] == "true" {
scEtcd.Targets = append(scEtcd.Targets, fmt.Sprintf("%s:2379", ip))
}
}
}
}
scrapeConfigNode := service.ScrapeConfig{
JobName: "node",
StaticConfigs: []service.StaticConfig{scNode},
RelabelConfigs: []service.RelabelConfig{
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "instance",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$1",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "port",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$2",
},
},
}
scrapeConfigETCD := service.ScrapeConfig{
JobName: "etcd",
StaticConfigs: []service.StaticConfig{scEtcd},
RelabelConfigs: []service.RelabelConfig{
service.RelabelConfig{
Action: "labeldrop",
Regex: "etcd_debugging.*",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "instance",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$1",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "port",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$2",
},
},
}
if p.etcdTLSConfig.IsConfigured() {
scrapeConfigETCD.Scheme = "https"
scrapeConfigETCD.TLSConfig = &service.TLSConfig{
CAFile: p.etcdTLSConfig.CAFile,
CertFile: p.etcdTLSConfig.CertFile,
KeyFile: p.etcdTLSConfig.KeyFile,
InsecureSkipVerify: true,
}
}
scrapeConfigK8sNodes := service.ScrapeConfig{
JobName: "kubernetes-nodes",
ScrapeInterval: "5m",
KubernetesConfigs: []service.KubernetesSDConfig{
service.KubernetesSDConfig{
Role: "node",
},
},
RelabelConfigs: []service.RelabelConfig{
service.RelabelConfig{
Action: "labelmap",
Regex: "__meta_kubernetes_node_label_(.+)",
},
},
}
if p.kubeletTLSConfig.IsConfigured() {
scrapeConfigK8sNodes.Scheme = "https"
scrapeConfigK8sNodes.TLSConfig = &service.TLSConfig{
CAFile: p.kubeletTLSConfig.CAFile,
CertFile: p.kubeletTLSConfig.CertFile,
KeyFile: p.kubeletTLSConfig.KeyFile,
InsecureSkipVerify: true,
}
}
scrapeConfigK8sEndpoinds := service.ScrapeConfig{
JobName: "kubernetes-endpoints",
KubernetesConfigs: []service.KubernetesSDConfig{
service.KubernetesSDConfig{
Role: "endpoints",
},
},
RelabelConfigs: []service.RelabelConfig{
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_service_annotation_prometheus_io_scrape"},
Action: "keep",
Regex: "true",
},
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_service_annotation_prometheus_io_scheme"},
Action: "replace",
TargetLabel: "__scheme__",
Regex: "(https?)",
},
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_service_annotation_prometheus_io_path"},
Action: "replace",
TargetLabel: "__metrics_path__",
Regex: "(.+)",
},
service.RelabelConfig{
SourceLabels: []string{"__address__", "__meta_kubernetes_service_annotation_prometheus_io_port"},
Action: "replace",
TargetLabel: "__address__",
Regex: `(.+)(?::\d+);(\d+)`,
Replacement: "$1:$2",
},
service.RelabelConfig{
Action: "labelmap",
Regex: "__meta_kubernetes_service_label_(.+)",
},
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_namespace"},
Action: "replace",
TargetLabel: "kubernetes_namespace",
},
service.RelabelConfig{
SourceLabels: []string{"__meta_kubernetes_pod_name"},
Action: "replace",
TargetLabel: "kubernetes_pod_name",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "instance",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$1",
},
service.RelabelConfig{
SourceLabels: []string{"__address__"},
Action: "replace",
TargetLabel: "port",
Regex: `(.+)(?::)(\d+)`,
Replacement: "$2",
},
service.RelabelConfig{
SourceLabels: []string{"j2_job_name"},
Action: "replace",
TargetLabel: "job",
},
service.RelabelConfig{
SourceLabels: []string{"j2_taskgroup_name"},
Action: "replace",
TargetLabel: "taskgroup",
},
},
}
return []service.ScrapeConfig{scrapeConfigNode, scrapeConfigETCD, scrapeConfigK8sNodes, scrapeConfigK8sEndpoinds}, nil
}
// CreateRules creates all rules this plugin is aware of.
// The returns string list should contain the content of the various rules.
func (p *k8sUpdate) CreateRules() ([]string, error) {
// Build URL list
var urls []string
for _, svc := range p.services {
ann, ok := svc.Annotations[metricsAnnotation]
if !ok || ann == "" {
continue
}
var metricsRecords []api.MetricsServiceRecord
if err := json.Unmarshal([]byte(ann), &metricsRecords); err != nil {
p.log.Errorf("Failed to unmarshal metrics annotation in service '%s.%s': %#v", svc.Namespace, svc.Name, err)
continue
}
// Get service IP
if svc.Spec.Type != k8s.ServiceTypeClusterIP {
p.log.Errorf("Cannot put metrics rules in services of type other than ClusterIP ('%s.%s')", svc.Namespace, svc.Name)
continue
}
clusterIP := svc.Spec.ClusterIP
// Collect URLs
for _, m := range metricsRecords {
if m.RulesPath == "" {
continue
}
u := url.URL{
Scheme: "http",
Host: net.JoinHostPort(clusterIP, strconv.Itoa(m.ServicePort)),
Path: m.RulesPath,
}
urls = append(urls, u.String())
}
}
if len(urls) == 0 {
return nil, nil
}
// Fetch rules from URLs
rulesChan := make(chan string, len(urls))
wg := sync.WaitGroup{}
for _, url := range urls {
wg.Add(1)
go func(url string) {
defer wg.Done()
p.log.Debugf("fetching rules from %s", url)
resp, err := http.Get(url)
if err != nil {
p.log.Errorf("Failed to fetch rule from '%s': %#v", url, err)
return
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
p.log.Errorf("Failed to fetch rule from '%s': Status %d", url, resp.StatusCode)
return
}
defer resp.Body.Close()
raw, err := ioutil.ReadAll(resp.Body)
if err != nil {
p.log.Errorf("Failed to read rule from '%s': %#v", url, err)
return
}
rulesChan <- string(raw)
p.log.Debugf("done fetching rules from %s", url)
}(url)
}
wg.Wait()
close(rulesChan)
var result []string
for rule := range rulesChan {
result = append(result, rule)
}
p.log.Debugf("Found %d rules", len(result))
return result, nil
}
| {
flagSet.StringVar(&p.ETCDTLSConfig.CAFile, "kubernetes-etcd-ca-file", "", "CA certificate used by ETCD")
flagSet.StringVar(&p.ETCDTLSConfig.CertFile, "kubernetes-etcd-cert-file", "", "Public key file used by ETCD")
flagSet.StringVar(&p.ETCDTLSConfig.KeyFile, "kubernetes-etcd-key-file", "", "Private key file used by ETCD")
flagSet.StringVar(&p.KubeletTLSConfig.CAFile, "kubelet-ca-file", "", "CA certificate used by Kubelet")
flagSet.StringVar(&p.KubeletTLSConfig.CertFile, "kubelet-cert-file", "", "Public key file used by Kubelet")
flagSet.StringVar(&p.KubeletTLSConfig.KeyFile, "kubelet-key-file", "", "Private key file used by Kubelet")
flagSet.StringVar(&p.LogLevel, "kubernetes-log-level", "", "Log level of kubernetes plugin")
} | identifier_body |
tracing.py | import re
import uuid
import contextlib
from datetime import datetime
import sentry_sdk
from sentry_sdk.utils import capture_internal_exceptions, logger, to_string
from sentry_sdk._compat import PY2
from sentry_sdk._types import MYPY
if PY2:
from collections import Mapping
else:
from collections.abc import Mapping
if MYPY:
import typing
from typing import Generator
from typing import Optional
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
_traceparent_header_format_re = re.compile(
"^[ \t]*" # whitespace
"([0-9a-f]{32})?" # trace_id
"-?([0-9a-f]{16})?" # span_id
"-?([01])?" # sampled
"[ \t]*$" # whitespace
)
class EnvironHeaders(Mapping): # type: ignore
def __init__(
self,
environ, # type: typing.Mapping[str, str]
prefix="HTTP_", # type: str
):
# type: (...) -> None
self.environ = environ
self.prefix = prefix
def __getitem__(self, key):
# type: (str) -> Optional[Any]
return self.environ[self.prefix + key.replace("-", "_").upper()]
def __len__(self):
# type: () -> int
return sum(1 for _ in iter(self))
def __iter__(self):
# type: () -> Generator[str, None, None]
for k in self.environ:
if not isinstance(k, str):
continue
k = k.replace("-", "_").upper()
if not k.startswith(self.prefix):
continue
yield k[len(self.prefix) :]
class _SpanRecorder(object):
__slots__ = ("maxlen", "finished_spans", "open_span_count")
def __init__(self, maxlen):
# type: (int) -> None
self.maxlen = maxlen
self.open_span_count = 0 # type: int
self.finished_spans = [] # type: List[Span]
def start_span(self, span):
# type: (Span) -> None
# This is just so that we don't run out of memory while recording a lot
# of spans. At some point we just stop and flush out the start of the
# trace tree (i.e. the first n spans with the smallest
# start_timestamp).
self.open_span_count += 1
if self.open_span_count > self.maxlen:
span._span_recorder = None
def finish_span(self, span):
# type: (Span) -> None
self.finished_spans.append(span)
class Span(object):
__slots__ = (
"trace_id",
"span_id",
"parent_span_id",
"same_process_as_parent",
"sampled",
"transaction",
"op",
"description",
"start_timestamp",
"timestamp",
"_tags",
"_data",
"_span_recorder",
"hub",
"_context_manager_state",
)
def __init__(
self,
trace_id=None, # type: Optional[str]
span_id=None, # type: Optional[str]
parent_span_id=None, # type: Optional[str]
same_process_as_parent=True, # type: bool
sampled=None, # type: Optional[bool]
transaction=None, # type: Optional[str]
op=None, # type: Optional[str]
description=None, # type: Optional[str]
hub=None, # type: Optional[sentry_sdk.Hub]
):
# type: (...) -> None
self.trace_id = trace_id or uuid.uuid4().hex
self.span_id = span_id or uuid.uuid4().hex[16:]
self.parent_span_id = parent_span_id
self.same_process_as_parent = same_process_as_parent
self.sampled = sampled
self.transaction = transaction
self.op = op
self.description = description
self.hub = hub
self._tags = {} # type: Dict[str, str]
self._data = {} # type: Dict[str, Any]
self.start_timestamp = datetime.utcnow()
#: End timestamp of span
self.timestamp = None # type: Optional[datetime]
self._span_recorder = None # type: Optional[_SpanRecorder]
def init_finished_spans(self, maxlen):
# type: (int) -> None
if self._span_recorder is None:
self._span_recorder = _SpanRecorder(maxlen)
self._span_recorder.start_span(self)
def __repr__(self):
# type: () -> str
return (
"<%s(transaction=%r, trace_id=%r, span_id=%r, parent_span_id=%r, sampled=%r)>"
% (
self.__class__.__name__,
self.transaction,
self.trace_id,
self.span_id,
self.parent_span_id,
self.sampled,
)
)
def __enter__(self):
# type: () -> Span
hub = self.hub or sentry_sdk.Hub.current
_, scope = hub._stack[-1]
old_span = scope.span
scope.span = self
self._context_manager_state = (hub, scope, old_span)
return self
def __exit__(self, ty, value, tb):
# type: (Optional[Any], Optional[Any], Optional[Any]) -> None
if value is not None:
self._tags.setdefault("status", "internal_error")
hub, scope, old_span = self._context_manager_state
del self._context_manager_state
self.finish(hub)
scope.span = old_span
def new_span(self, **kwargs):
# type: (**Any) -> Span
rv = type(self)(
trace_id=self.trace_id,
span_id=None,
parent_span_id=self.span_id,
sampled=self.sampled,
**kwargs
)
rv._span_recorder = self._span_recorder
return rv
@classmethod
def continue_from_environ(cls, environ):
# type: (typing.Mapping[str, str]) -> Span
return cls.continue_from_headers(EnvironHeaders(environ))
@classmethod
def continue_from_headers(cls, headers):
# type: (typing.Mapping[str, str]) -> Span
parent = cls.from_traceparent(headers.get("sentry-trace"))
if parent is None:
return cls()
parent.same_process_as_parent = False
return parent
def iter_headers(self):
# type: () -> Generator[Tuple[str, str], None, None]
yield "sentry-trace", self.to_traceparent()
@classmethod
def from_traceparent(cls, traceparent):
# type: (Optional[str]) -> Optional[Span]
if not traceparent:
return None
if traceparent.startswith("00-") and traceparent.endswith("-00"):
traceparent = traceparent[3:-3]
match = _traceparent_header_format_re.match(str(traceparent))
if match is None:
return None
trace_id, span_id, sampled_str = match.groups()
if trace_id is not None:
trace_id = "{:032x}".format(int(trace_id, 16))
if span_id is not None:
span_id = "{:016x}".format(int(span_id, 16))
if sampled_str:
sampled = sampled_str != "0" # type: Optional[bool]
else:
sampled = None
return cls(trace_id=trace_id, parent_span_id=span_id, sampled=sampled)
def to_traceparent(self):
# type: () -> str
sampled = ""
if self.sampled is True:
sampled = "1"
if self.sampled is False:
sampled = "0"
return "%s-%s-%s" % (self.trace_id, self.span_id, sampled)
def to_legacy_traceparent(self):
# type: () -> str
return "00-%s-%s-00" % (self.trace_id, self.span_id)
def set_tag(self, key, value):
# type: (str, Any) -> None
|
def set_data(self, key, value):
# type: (str, Any) -> None
self._data[key] = value
def set_status(self, value):
# type: (str) -> None
self.set_tag("status", value)
def set_http_status(self, http_status):
# type: (int) -> None
self.set_tag("http.status_code", http_status)
if http_status < 400:
self.set_status("ok")
elif 400 <= http_status < 500:
if http_status == 403:
self.set_status("permission_denied")
elif http_status == 404:
self.set_status("not_found")
elif http_status == 429:
self.set_status("resource_exhausted")
elif http_status == 413:
self.set_status("failed_precondition")
elif http_status == 401:
self.set_status("unauthenticated")
elif http_status == 409:
self.set_status("already_exists")
else:
self.set_status("invalid_argument")
elif 500 <= http_status < 600:
if http_status == 504:
self.set_status("deadline_exceeded")
elif http_status == 501:
self.set_status("unimplemented")
elif http_status == 503:
self.set_status("unavailable")
else:
self.set_status("internal_error")
else:
self.set_status("unknown_error")
def is_success(self):
# type: () -> bool
return self._tags.get("status") == "ok"
def finish(self, hub=None):
# type: (Optional[sentry_sdk.Hub]) -> Optional[str]
hub = hub or self.hub or sentry_sdk.Hub.current
if self.timestamp is not None:
# This transaction is already finished, so we should not flush it again.
return None
self.timestamp = datetime.utcnow()
_maybe_create_breadcrumbs_from_span(hub, self)
if self._span_recorder is None:
return None
self._span_recorder.finish_span(self)
if self.transaction is None:
# If this has no transaction set we assume there's a parent
# transaction for this span that would be flushed out eventually.
return None
client = hub.client
if client is None:
# We have no client and therefore nowhere to send this transaction
# event.
return None
if not self.sampled:
# At this point a `sampled = None` should have already been
# resolved to a concrete decision. If `sampled` is `None`, it's
# likely that somebody used `with sentry_sdk.Hub.start_span(..)` on a
# non-transaction span and later decided to make it a transaction.
if self.sampled is None:
logger.warning("Discarding transaction Span without sampling decision")
return None
return hub.capture_event(
{
"type": "transaction",
"transaction": self.transaction,
"contexts": {"trace": self.get_trace_context()},
"tags": self._tags,
"timestamp": self.timestamp,
"start_timestamp": self.start_timestamp,
"spans": [
s.to_json(client)
for s in self._span_recorder.finished_spans
if s is not self
],
}
)
def to_json(self, client):
# type: (Optional[sentry_sdk.Client]) -> Dict[str, Any]
rv = {
"trace_id": self.trace_id,
"span_id": self.span_id,
"parent_span_id": self.parent_span_id,
"same_process_as_parent": self.same_process_as_parent,
"op": self.op,
"description": self.description,
"start_timestamp": self.start_timestamp,
"timestamp": self.timestamp,
} # type: Dict[str, Any]
transaction = self.transaction
if transaction:
rv["transaction"] = transaction
tags = self._tags
if tags:
rv["tags"] = tags
data = self._data
if data:
rv["data"] = data
return rv
def get_trace_context(self):
# type: () -> Any
rv = {
"trace_id": self.trace_id,
"span_id": self.span_id,
"parent_span_id": self.parent_span_id,
"op": self.op,
"description": self.description,
}
if "status" in self._tags:
rv["status"] = self._tags["status"]
return rv
def _format_sql(cursor, sql):
# type: (Any, str) -> Optional[str]
real_sql = None
# If we're using psycopg2, it could be that we're
# looking at a query that uses Composed objects. Use psycopg2's mogrify
# function to format the query. We lose per-parameter trimming but gain
# accuracy in formatting.
try:
if hasattr(cursor, "mogrify"):
real_sql = cursor.mogrify(sql)
if isinstance(real_sql, bytes):
real_sql = real_sql.decode(cursor.connection.encoding)
except Exception:
real_sql = None
return real_sql or to_string(sql)
@contextlib.contextmanager
def record_sql_queries(
hub, # type: sentry_sdk.Hub
cursor, # type: Any
query, # type: Any
params_list, # type: Any
paramstyle, # type: Optional[str]
executemany, # type: bool
):
# type: (...) -> Generator[Span, None, None]
# TODO: Bring back capturing of params by default
if hub.client and hub.client.options["_experiments"].get(
"record_sql_params", False
):
if not params_list or params_list == [None]:
params_list = None
if paramstyle == "pyformat":
paramstyle = "format"
else:
params_list = None
paramstyle = None
query = _format_sql(cursor, query)
data = {}
if params_list is not None:
data["db.params"] = params_list
if paramstyle is not None:
data["db.paramstyle"] = paramstyle
if executemany:
data["db.executemany"] = True
with capture_internal_exceptions():
hub.add_breadcrumb(message=query, category="query", data=data)
with hub.start_span(op="db", description=query) as span:
for k, v in data.items():
span.set_data(k, v)
yield span
def _maybe_create_breadcrumbs_from_span(hub, span):
# type: (sentry_sdk.Hub, Span) -> None
if span.op == "redis":
hub.add_breadcrumb(
message=span.description, type="redis", category="redis", data=span._tags
)
elif span.op == "http":
hub.add_breadcrumb(type="http", category="httplib", data=span._data)
elif span.op == "subprocess":
hub.add_breadcrumb(
type="subprocess",
category="subprocess",
message=span.description,
data=span._data,
)
| self._tags[key] = value | identifier_body |
tracing.py | import re
import uuid
import contextlib
from datetime import datetime
import sentry_sdk
from sentry_sdk.utils import capture_internal_exceptions, logger, to_string
from sentry_sdk._compat import PY2
from sentry_sdk._types import MYPY
if PY2:
from collections import Mapping
else:
from collections.abc import Mapping
if MYPY:
import typing
from typing import Generator
from typing import Optional
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
_traceparent_header_format_re = re.compile(
"^[ \t]*" # whitespace
"([0-9a-f]{32})?" # trace_id
"-?([0-9a-f]{16})?" # span_id
"-?([01])?" # sampled
"[ \t]*$" # whitespace
)
class EnvironHeaders(Mapping): # type: ignore
def __init__(
self,
environ, # type: typing.Mapping[str, str]
prefix="HTTP_", # type: str
):
# type: (...) -> None
self.environ = environ
self.prefix = prefix
def __getitem__(self, key):
# type: (str) -> Optional[Any]
return self.environ[self.prefix + key.replace("-", "_").upper()]
def __len__(self):
# type: () -> int
return sum(1 for _ in iter(self))
def __iter__(self):
# type: () -> Generator[str, None, None]
for k in self.environ:
if not isinstance(k, str):
continue
k = k.replace("-", "_").upper()
if not k.startswith(self.prefix):
continue
yield k[len(self.prefix) :]
class _SpanRecorder(object):
__slots__ = ("maxlen", "finished_spans", "open_span_count")
def __init__(self, maxlen):
# type: (int) -> None
self.maxlen = maxlen
self.open_span_count = 0 # type: int
self.finished_spans = [] # type: List[Span]
def start_span(self, span):
# type: (Span) -> None
# This is just so that we don't run out of memory while recording a lot
# of spans. At some point we just stop and flush out the start of the
# trace tree (i.e. the first n spans with the smallest
# start_timestamp).
self.open_span_count += 1
if self.open_span_count > self.maxlen:
span._span_recorder = None
def finish_span(self, span):
# type: (Span) -> None
self.finished_spans.append(span)
class Span(object):
__slots__ = (
"trace_id",
"span_id",
"parent_span_id",
"same_process_as_parent",
"sampled",
"transaction",
"op",
"description",
"start_timestamp",
"timestamp",
"_tags",
"_data",
"_span_recorder",
"hub",
"_context_manager_state",
)
def __init__(
self,
trace_id=None, # type: Optional[str]
span_id=None, # type: Optional[str]
parent_span_id=None, # type: Optional[str]
same_process_as_parent=True, # type: bool
sampled=None, # type: Optional[bool]
transaction=None, # type: Optional[str]
op=None, # type: Optional[str]
description=None, # type: Optional[str]
hub=None, # type: Optional[sentry_sdk.Hub]
):
# type: (...) -> None
self.trace_id = trace_id or uuid.uuid4().hex
self.span_id = span_id or uuid.uuid4().hex[16:]
self.parent_span_id = parent_span_id
self.same_process_as_parent = same_process_as_parent
self.sampled = sampled
self.transaction = transaction
self.op = op
self.description = description
self.hub = hub
self._tags = {} # type: Dict[str, str]
self._data = {} # type: Dict[str, Any]
self.start_timestamp = datetime.utcnow()
#: End timestamp of span
self.timestamp = None # type: Optional[datetime]
self._span_recorder = None # type: Optional[_SpanRecorder]
def init_finished_spans(self, maxlen):
# type: (int) -> None
if self._span_recorder is None:
self._span_recorder = _SpanRecorder(maxlen)
self._span_recorder.start_span(self)
def __repr__(self):
# type: () -> str
return (
"<%s(transaction=%r, trace_id=%r, span_id=%r, parent_span_id=%r, sampled=%r)>"
% (
self.__class__.__name__,
self.transaction,
self.trace_id,
self.span_id,
self.parent_span_id,
self.sampled,
)
)
def __enter__(self):
# type: () -> Span
hub = self.hub or sentry_sdk.Hub.current
_, scope = hub._stack[-1]
old_span = scope.span
scope.span = self
self._context_manager_state = (hub, scope, old_span)
return self
def __exit__(self, ty, value, tb):
# type: (Optional[Any], Optional[Any], Optional[Any]) -> None
if value is not None:
self._tags.setdefault("status", "internal_error")
hub, scope, old_span = self._context_manager_state
del self._context_manager_state
self.finish(hub)
scope.span = old_span
def new_span(self, **kwargs):
# type: (**Any) -> Span
rv = type(self)(
trace_id=self.trace_id,
span_id=None,
parent_span_id=self.span_id,
sampled=self.sampled,
**kwargs
)
rv._span_recorder = self._span_recorder
return rv
@classmethod
def continue_from_environ(cls, environ):
# type: (typing.Mapping[str, str]) -> Span
return cls.continue_from_headers(EnvironHeaders(environ))
@classmethod
def continue_from_headers(cls, headers):
# type: (typing.Mapping[str, str]) -> Span
parent = cls.from_traceparent(headers.get("sentry-trace"))
if parent is None:
return cls()
parent.same_process_as_parent = False
return parent
def iter_headers(self):
# type: () -> Generator[Tuple[str, str], None, None]
yield "sentry-trace", self.to_traceparent()
@classmethod
def from_traceparent(cls, traceparent):
# type: (Optional[str]) -> Optional[Span]
if not traceparent:
return None
if traceparent.startswith("00-") and traceparent.endswith("-00"):
traceparent = traceparent[3:-3]
match = _traceparent_header_format_re.match(str(traceparent))
if match is None:
return None
trace_id, span_id, sampled_str = match.groups()
if trace_id is not None:
trace_id = "{:032x}".format(int(trace_id, 16))
if span_id is not None:
span_id = "{:016x}".format(int(span_id, 16))
if sampled_str:
sampled = sampled_str != "0" # type: Optional[bool]
else:
sampled = None
return cls(trace_id=trace_id, parent_span_id=span_id, sampled=sampled)
def to_traceparent(self):
# type: () -> str
sampled = ""
if self.sampled is True:
sampled = "1"
if self.sampled is False:
sampled = "0"
return "%s-%s-%s" % (self.trace_id, self.span_id, sampled)
def to_legacy_traceparent(self):
# type: () -> str
return "00-%s-%s-00" % (self.trace_id, self.span_id)
def set_tag(self, key, value):
# type: (str, Any) -> None
self._tags[key] = value
def set_data(self, key, value):
# type: (str, Any) -> None
self._data[key] = value
def set_status(self, value):
# type: (str) -> None
self.set_tag("status", value)
def set_http_status(self, http_status):
# type: (int) -> None
self.set_tag("http.status_code", http_status)
if http_status < 400:
self.set_status("ok")
elif 400 <= http_status < 500:
if http_status == 403:
self.set_status("permission_denied")
elif http_status == 404:
self.set_status("not_found")
elif http_status == 429:
self.set_status("resource_exhausted")
elif http_status == 413:
self.set_status("failed_precondition")
elif http_status == 401:
self.set_status("unauthenticated")
elif http_status == 409:
self.set_status("already_exists")
else:
self.set_status("invalid_argument")
elif 500 <= http_status < 600:
if http_status == 504:
self.set_status("deadline_exceeded")
elif http_status == 501:
self.set_status("unimplemented")
elif http_status == 503:
self.set_status("unavailable")
else:
self.set_status("internal_error")
else:
self.set_status("unknown_error")
def is_success(self):
# type: () -> bool
return self._tags.get("status") == "ok"
def finish(self, hub=None):
# type: (Optional[sentry_sdk.Hub]) -> Optional[str]
hub = hub or self.hub or sentry_sdk.Hub.current
if self.timestamp is not None:
# This transaction is already finished, so we should not flush it again.
return None
self.timestamp = datetime.utcnow()
_maybe_create_breadcrumbs_from_span(hub, self)
if self._span_recorder is None:
return None
self._span_recorder.finish_span(self)
if self.transaction is None:
# If this has no transaction set we assume there's a parent
# transaction for this span that would be flushed out eventually.
return None
client = hub.client
if client is None:
# We have no client and therefore nowhere to send this transaction
# event.
return None
if not self.sampled:
# At this point a `sampled = None` should have already been
# resolved to a concrete decision. If `sampled` is `None`, it's
# likely that somebody used `with sentry_sdk.Hub.start_span(..)` on a
# non-transaction span and later decided to make it a transaction.
if self.sampled is None:
logger.warning("Discarding transaction Span without sampling decision")
return None
return hub.capture_event(
{
"type": "transaction",
"transaction": self.transaction,
"contexts": {"trace": self.get_trace_context()},
"tags": self._tags,
"timestamp": self.timestamp,
"start_timestamp": self.start_timestamp,
"spans": [
s.to_json(client)
for s in self._span_recorder.finished_spans
if s is not self
],
}
)
def to_json(self, client):
# type: (Optional[sentry_sdk.Client]) -> Dict[str, Any]
rv = {
"trace_id": self.trace_id,
"span_id": self.span_id,
"parent_span_id": self.parent_span_id,
"same_process_as_parent": self.same_process_as_parent,
"op": self.op,
"description": self.description,
"start_timestamp": self.start_timestamp,
"timestamp": self.timestamp,
} # type: Dict[str, Any]
transaction = self.transaction
if transaction:
rv["transaction"] = transaction
tags = self._tags
if tags:
rv["tags"] = tags
data = self._data
if data:
rv["data"] = data
return rv
def get_trace_context(self):
# type: () -> Any
rv = {
"trace_id": self.trace_id,
"span_id": self.span_id,
"parent_span_id": self.parent_span_id,
"op": self.op,
"description": self.description,
}
if "status" in self._tags:
rv["status"] = self._tags["status"]
return rv
def _format_sql(cursor, sql):
# type: (Any, str) -> Optional[str]
real_sql = None
# If we're using psycopg2, it could be that we're
# looking at a query that uses Composed objects. Use psycopg2's mogrify
# function to format the query. We lose per-parameter trimming but gain
# accuracy in formatting.
try:
if hasattr(cursor, "mogrify"):
real_sql = cursor.mogrify(sql)
if isinstance(real_sql, bytes):
real_sql = real_sql.decode(cursor.connection.encoding)
except Exception:
real_sql = None
return real_sql or to_string(sql)
@contextlib.contextmanager
def record_sql_queries(
hub, # type: sentry_sdk.Hub
cursor, # type: Any
query, # type: Any
params_list, # type: Any
paramstyle, # type: Optional[str]
executemany, # type: bool
):
# type: (...) -> Generator[Span, None, None]
# TODO: Bring back capturing of params by default
if hub.client and hub.client.options["_experiments"].get(
"record_sql_params", False
):
if not params_list or params_list == [None]:
params_list = None
if paramstyle == "pyformat":
paramstyle = "format"
else:
|
query = _format_sql(cursor, query)
data = {}
if params_list is not None:
data["db.params"] = params_list
if paramstyle is not None:
data["db.paramstyle"] = paramstyle
if executemany:
data["db.executemany"] = True
with capture_internal_exceptions():
hub.add_breadcrumb(message=query, category="query", data=data)
with hub.start_span(op="db", description=query) as span:
for k, v in data.items():
span.set_data(k, v)
yield span
def _maybe_create_breadcrumbs_from_span(hub, span):
# type: (sentry_sdk.Hub, Span) -> None
if span.op == "redis":
hub.add_breadcrumb(
message=span.description, type="redis", category="redis", data=span._tags
)
elif span.op == "http":
hub.add_breadcrumb(type="http", category="httplib", data=span._data)
elif span.op == "subprocess":
hub.add_breadcrumb(
type="subprocess",
category="subprocess",
message=span.description,
data=span._data,
)
| params_list = None
paramstyle = None | conditional_block |
tracing.py | import re
import uuid
import contextlib
from datetime import datetime
import sentry_sdk
from sentry_sdk.utils import capture_internal_exceptions, logger, to_string
from sentry_sdk._compat import PY2
from sentry_sdk._types import MYPY
if PY2:
from collections import Mapping
else:
from collections.abc import Mapping
if MYPY:
import typing
from typing import Generator
from typing import Optional
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
_traceparent_header_format_re = re.compile(
"^[ \t]*" # whitespace
"([0-9a-f]{32})?" # trace_id
"-?([0-9a-f]{16})?" # span_id
"-?([01])?" # sampled
"[ \t]*$" # whitespace
)
class EnvironHeaders(Mapping): # type: ignore
def __init__(
self,
environ, # type: typing.Mapping[str, str]
prefix="HTTP_", # type: str | self.prefix = prefix
def __getitem__(self, key):
# type: (str) -> Optional[Any]
return self.environ[self.prefix + key.replace("-", "_").upper()]
def __len__(self):
# type: () -> int
return sum(1 for _ in iter(self))
def __iter__(self):
# type: () -> Generator[str, None, None]
for k in self.environ:
if not isinstance(k, str):
continue
k = k.replace("-", "_").upper()
if not k.startswith(self.prefix):
continue
yield k[len(self.prefix) :]
class _SpanRecorder(object):
__slots__ = ("maxlen", "finished_spans", "open_span_count")
def __init__(self, maxlen):
# type: (int) -> None
self.maxlen = maxlen
self.open_span_count = 0 # type: int
self.finished_spans = [] # type: List[Span]
def start_span(self, span):
# type: (Span) -> None
# This is just so that we don't run out of memory while recording a lot
# of spans. At some point we just stop and flush out the start of the
# trace tree (i.e. the first n spans with the smallest
# start_timestamp).
self.open_span_count += 1
if self.open_span_count > self.maxlen:
span._span_recorder = None
def finish_span(self, span):
# type: (Span) -> None
self.finished_spans.append(span)
class Span(object):
__slots__ = (
"trace_id",
"span_id",
"parent_span_id",
"same_process_as_parent",
"sampled",
"transaction",
"op",
"description",
"start_timestamp",
"timestamp",
"_tags",
"_data",
"_span_recorder",
"hub",
"_context_manager_state",
)
def __init__(
self,
trace_id=None, # type: Optional[str]
span_id=None, # type: Optional[str]
parent_span_id=None, # type: Optional[str]
same_process_as_parent=True, # type: bool
sampled=None, # type: Optional[bool]
transaction=None, # type: Optional[str]
op=None, # type: Optional[str]
description=None, # type: Optional[str]
hub=None, # type: Optional[sentry_sdk.Hub]
):
# type: (...) -> None
self.trace_id = trace_id or uuid.uuid4().hex
self.span_id = span_id or uuid.uuid4().hex[16:]
self.parent_span_id = parent_span_id
self.same_process_as_parent = same_process_as_parent
self.sampled = sampled
self.transaction = transaction
self.op = op
self.description = description
self.hub = hub
self._tags = {} # type: Dict[str, str]
self._data = {} # type: Dict[str, Any]
self.start_timestamp = datetime.utcnow()
#: End timestamp of span
self.timestamp = None # type: Optional[datetime]
self._span_recorder = None # type: Optional[_SpanRecorder]
def init_finished_spans(self, maxlen):
# type: (int) -> None
if self._span_recorder is None:
self._span_recorder = _SpanRecorder(maxlen)
self._span_recorder.start_span(self)
def __repr__(self):
# type: () -> str
return (
"<%s(transaction=%r, trace_id=%r, span_id=%r, parent_span_id=%r, sampled=%r)>"
% (
self.__class__.__name__,
self.transaction,
self.trace_id,
self.span_id,
self.parent_span_id,
self.sampled,
)
)
def __enter__(self):
# type: () -> Span
hub = self.hub or sentry_sdk.Hub.current
_, scope = hub._stack[-1]
old_span = scope.span
scope.span = self
self._context_manager_state = (hub, scope, old_span)
return self
def __exit__(self, ty, value, tb):
# type: (Optional[Any], Optional[Any], Optional[Any]) -> None
if value is not None:
self._tags.setdefault("status", "internal_error")
hub, scope, old_span = self._context_manager_state
del self._context_manager_state
self.finish(hub)
scope.span = old_span
def new_span(self, **kwargs):
# type: (**Any) -> Span
rv = type(self)(
trace_id=self.trace_id,
span_id=None,
parent_span_id=self.span_id,
sampled=self.sampled,
**kwargs
)
rv._span_recorder = self._span_recorder
return rv
@classmethod
def continue_from_environ(cls, environ):
# type: (typing.Mapping[str, str]) -> Span
return cls.continue_from_headers(EnvironHeaders(environ))
@classmethod
def continue_from_headers(cls, headers):
# type: (typing.Mapping[str, str]) -> Span
parent = cls.from_traceparent(headers.get("sentry-trace"))
if parent is None:
return cls()
parent.same_process_as_parent = False
return parent
def iter_headers(self):
# type: () -> Generator[Tuple[str, str], None, None]
yield "sentry-trace", self.to_traceparent()
@classmethod
def from_traceparent(cls, traceparent):
# type: (Optional[str]) -> Optional[Span]
if not traceparent:
return None
if traceparent.startswith("00-") and traceparent.endswith("-00"):
traceparent = traceparent[3:-3]
match = _traceparent_header_format_re.match(str(traceparent))
if match is None:
return None
trace_id, span_id, sampled_str = match.groups()
if trace_id is not None:
trace_id = "{:032x}".format(int(trace_id, 16))
if span_id is not None:
span_id = "{:016x}".format(int(span_id, 16))
if sampled_str:
sampled = sampled_str != "0" # type: Optional[bool]
else:
sampled = None
return cls(trace_id=trace_id, parent_span_id=span_id, sampled=sampled)
def to_traceparent(self):
# type: () -> str
sampled = ""
if self.sampled is True:
sampled = "1"
if self.sampled is False:
sampled = "0"
return "%s-%s-%s" % (self.trace_id, self.span_id, sampled)
def to_legacy_traceparent(self):
# type: () -> str
return "00-%s-%s-00" % (self.trace_id, self.span_id)
def set_tag(self, key, value):
# type: (str, Any) -> None
self._tags[key] = value
def set_data(self, key, value):
# type: (str, Any) -> None
self._data[key] = value
def set_status(self, value):
# type: (str) -> None
self.set_tag("status", value)
def set_http_status(self, http_status):
# type: (int) -> None
self.set_tag("http.status_code", http_status)
if http_status < 400:
self.set_status("ok")
elif 400 <= http_status < 500:
if http_status == 403:
self.set_status("permission_denied")
elif http_status == 404:
self.set_status("not_found")
elif http_status == 429:
self.set_status("resource_exhausted")
elif http_status == 413:
self.set_status("failed_precondition")
elif http_status == 401:
self.set_status("unauthenticated")
elif http_status == 409:
self.set_status("already_exists")
else:
self.set_status("invalid_argument")
elif 500 <= http_status < 600:
if http_status == 504:
self.set_status("deadline_exceeded")
elif http_status == 501:
self.set_status("unimplemented")
elif http_status == 503:
self.set_status("unavailable")
else:
self.set_status("internal_error")
else:
self.set_status("unknown_error")
def is_success(self):
# type: () -> bool
return self._tags.get("status") == "ok"
def finish(self, hub=None):
# type: (Optional[sentry_sdk.Hub]) -> Optional[str]
hub = hub or self.hub or sentry_sdk.Hub.current
if self.timestamp is not None:
# This transaction is already finished, so we should not flush it again.
return None
self.timestamp = datetime.utcnow()
_maybe_create_breadcrumbs_from_span(hub, self)
if self._span_recorder is None:
return None
self._span_recorder.finish_span(self)
if self.transaction is None:
# If this has no transaction set we assume there's a parent
# transaction for this span that would be flushed out eventually.
return None
client = hub.client
if client is None:
# We have no client and therefore nowhere to send this transaction
# event.
return None
if not self.sampled:
# At this point a `sampled = None` should have already been
# resolved to a concrete decision. If `sampled` is `None`, it's
# likely that somebody used `with sentry_sdk.Hub.start_span(..)` on a
# non-transaction span and later decided to make it a transaction.
if self.sampled is None:
logger.warning("Discarding transaction Span without sampling decision")
return None
return hub.capture_event(
{
"type": "transaction",
"transaction": self.transaction,
"contexts": {"trace": self.get_trace_context()},
"tags": self._tags,
"timestamp": self.timestamp,
"start_timestamp": self.start_timestamp,
"spans": [
s.to_json(client)
for s in self._span_recorder.finished_spans
if s is not self
],
}
)
def to_json(self, client):
# type: (Optional[sentry_sdk.Client]) -> Dict[str, Any]
rv = {
"trace_id": self.trace_id,
"span_id": self.span_id,
"parent_span_id": self.parent_span_id,
"same_process_as_parent": self.same_process_as_parent,
"op": self.op,
"description": self.description,
"start_timestamp": self.start_timestamp,
"timestamp": self.timestamp,
} # type: Dict[str, Any]
transaction = self.transaction
if transaction:
rv["transaction"] = transaction
tags = self._tags
if tags:
rv["tags"] = tags
data = self._data
if data:
rv["data"] = data
return rv
def get_trace_context(self):
# type: () -> Any
rv = {
"trace_id": self.trace_id,
"span_id": self.span_id,
"parent_span_id": self.parent_span_id,
"op": self.op,
"description": self.description,
}
if "status" in self._tags:
rv["status"] = self._tags["status"]
return rv
def _format_sql(cursor, sql):
# type: (Any, str) -> Optional[str]
real_sql = None
# If we're using psycopg2, it could be that we're
# looking at a query that uses Composed objects. Use psycopg2's mogrify
# function to format the query. We lose per-parameter trimming but gain
# accuracy in formatting.
try:
if hasattr(cursor, "mogrify"):
real_sql = cursor.mogrify(sql)
if isinstance(real_sql, bytes):
real_sql = real_sql.decode(cursor.connection.encoding)
except Exception:
real_sql = None
return real_sql or to_string(sql)
@contextlib.contextmanager
def record_sql_queries(
hub, # type: sentry_sdk.Hub
cursor, # type: Any
query, # type: Any
params_list, # type: Any
paramstyle, # type: Optional[str]
executemany, # type: bool
):
# type: (...) -> Generator[Span, None, None]
# TODO: Bring back capturing of params by default
if hub.client and hub.client.options["_experiments"].get(
"record_sql_params", False
):
if not params_list or params_list == [None]:
params_list = None
if paramstyle == "pyformat":
paramstyle = "format"
else:
params_list = None
paramstyle = None
query = _format_sql(cursor, query)
data = {}
if params_list is not None:
data["db.params"] = params_list
if paramstyle is not None:
data["db.paramstyle"] = paramstyle
if executemany:
data["db.executemany"] = True
with capture_internal_exceptions():
hub.add_breadcrumb(message=query, category="query", data=data)
with hub.start_span(op="db", description=query) as span:
for k, v in data.items():
span.set_data(k, v)
yield span
def _maybe_create_breadcrumbs_from_span(hub, span):
# type: (sentry_sdk.Hub, Span) -> None
if span.op == "redis":
hub.add_breadcrumb(
message=span.description, type="redis", category="redis", data=span._tags
)
elif span.op == "http":
hub.add_breadcrumb(type="http", category="httplib", data=span._data)
elif span.op == "subprocess":
hub.add_breadcrumb(
type="subprocess",
category="subprocess",
message=span.description,
data=span._data,
) | ):
# type: (...) -> None
self.environ = environ | random_line_split |
tracing.py | import re
import uuid
import contextlib
from datetime import datetime
import sentry_sdk
from sentry_sdk.utils import capture_internal_exceptions, logger, to_string
from sentry_sdk._compat import PY2
from sentry_sdk._types import MYPY
if PY2:
from collections import Mapping
else:
from collections.abc import Mapping
if MYPY:
import typing
from typing import Generator
from typing import Optional
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
_traceparent_header_format_re = re.compile(
"^[ \t]*" # whitespace
"([0-9a-f]{32})?" # trace_id
"-?([0-9a-f]{16})?" # span_id
"-?([01])?" # sampled
"[ \t]*$" # whitespace
)
class EnvironHeaders(Mapping): # type: ignore
def __init__(
self,
environ, # type: typing.Mapping[str, str]
prefix="HTTP_", # type: str
):
# type: (...) -> None
self.environ = environ
self.prefix = prefix
def __getitem__(self, key):
# type: (str) -> Optional[Any]
return self.environ[self.prefix + key.replace("-", "_").upper()]
def __len__(self):
# type: () -> int
return sum(1 for _ in iter(self))
def __iter__(self):
# type: () -> Generator[str, None, None]
for k in self.environ:
if not isinstance(k, str):
continue
k = k.replace("-", "_").upper()
if not k.startswith(self.prefix):
continue
yield k[len(self.prefix) :]
class _SpanRecorder(object):
__slots__ = ("maxlen", "finished_spans", "open_span_count")
def __init__(self, maxlen):
# type: (int) -> None
self.maxlen = maxlen
self.open_span_count = 0 # type: int
self.finished_spans = [] # type: List[Span]
def start_span(self, span):
# type: (Span) -> None
# This is just so that we don't run out of memory while recording a lot
# of spans. At some point we just stop and flush out the start of the
# trace tree (i.e. the first n spans with the smallest
# start_timestamp).
self.open_span_count += 1
if self.open_span_count > self.maxlen:
span._span_recorder = None
def finish_span(self, span):
# type: (Span) -> None
self.finished_spans.append(span)
class Span(object):
__slots__ = (
"trace_id",
"span_id",
"parent_span_id",
"same_process_as_parent",
"sampled",
"transaction",
"op",
"description",
"start_timestamp",
"timestamp",
"_tags",
"_data",
"_span_recorder",
"hub",
"_context_manager_state",
)
def __init__(
self,
trace_id=None, # type: Optional[str]
span_id=None, # type: Optional[str]
parent_span_id=None, # type: Optional[str]
same_process_as_parent=True, # type: bool
sampled=None, # type: Optional[bool]
transaction=None, # type: Optional[str]
op=None, # type: Optional[str]
description=None, # type: Optional[str]
hub=None, # type: Optional[sentry_sdk.Hub]
):
# type: (...) -> None
self.trace_id = trace_id or uuid.uuid4().hex
self.span_id = span_id or uuid.uuid4().hex[16:]
self.parent_span_id = parent_span_id
self.same_process_as_parent = same_process_as_parent
self.sampled = sampled
self.transaction = transaction
self.op = op
self.description = description
self.hub = hub
self._tags = {} # type: Dict[str, str]
self._data = {} # type: Dict[str, Any]
self.start_timestamp = datetime.utcnow()
#: End timestamp of span
self.timestamp = None # type: Optional[datetime]
self._span_recorder = None # type: Optional[_SpanRecorder]
def init_finished_spans(self, maxlen):
# type: (int) -> None
if self._span_recorder is None:
self._span_recorder = _SpanRecorder(maxlen)
self._span_recorder.start_span(self)
def __repr__(self):
# type: () -> str
return (
"<%s(transaction=%r, trace_id=%r, span_id=%r, parent_span_id=%r, sampled=%r)>"
% (
self.__class__.__name__,
self.transaction,
self.trace_id,
self.span_id,
self.parent_span_id,
self.sampled,
)
)
def | (self):
# type: () -> Span
hub = self.hub or sentry_sdk.Hub.current
_, scope = hub._stack[-1]
old_span = scope.span
scope.span = self
self._context_manager_state = (hub, scope, old_span)
return self
def __exit__(self, ty, value, tb):
# type: (Optional[Any], Optional[Any], Optional[Any]) -> None
if value is not None:
self._tags.setdefault("status", "internal_error")
hub, scope, old_span = self._context_manager_state
del self._context_manager_state
self.finish(hub)
scope.span = old_span
def new_span(self, **kwargs):
# type: (**Any) -> Span
rv = type(self)(
trace_id=self.trace_id,
span_id=None,
parent_span_id=self.span_id,
sampled=self.sampled,
**kwargs
)
rv._span_recorder = self._span_recorder
return rv
@classmethod
def continue_from_environ(cls, environ):
# type: (typing.Mapping[str, str]) -> Span
return cls.continue_from_headers(EnvironHeaders(environ))
@classmethod
def continue_from_headers(cls, headers):
# type: (typing.Mapping[str, str]) -> Span
parent = cls.from_traceparent(headers.get("sentry-trace"))
if parent is None:
return cls()
parent.same_process_as_parent = False
return parent
def iter_headers(self):
# type: () -> Generator[Tuple[str, str], None, None]
yield "sentry-trace", self.to_traceparent()
@classmethod
def from_traceparent(cls, traceparent):
# type: (Optional[str]) -> Optional[Span]
if not traceparent:
return None
if traceparent.startswith("00-") and traceparent.endswith("-00"):
traceparent = traceparent[3:-3]
match = _traceparent_header_format_re.match(str(traceparent))
if match is None:
return None
trace_id, span_id, sampled_str = match.groups()
if trace_id is not None:
trace_id = "{:032x}".format(int(trace_id, 16))
if span_id is not None:
span_id = "{:016x}".format(int(span_id, 16))
if sampled_str:
sampled = sampled_str != "0" # type: Optional[bool]
else:
sampled = None
return cls(trace_id=trace_id, parent_span_id=span_id, sampled=sampled)
def to_traceparent(self):
# type: () -> str
sampled = ""
if self.sampled is True:
sampled = "1"
if self.sampled is False:
sampled = "0"
return "%s-%s-%s" % (self.trace_id, self.span_id, sampled)
def to_legacy_traceparent(self):
# type: () -> str
return "00-%s-%s-00" % (self.trace_id, self.span_id)
def set_tag(self, key, value):
# type: (str, Any) -> None
self._tags[key] = value
def set_data(self, key, value):
# type: (str, Any) -> None
self._data[key] = value
def set_status(self, value):
# type: (str) -> None
self.set_tag("status", value)
def set_http_status(self, http_status):
# type: (int) -> None
self.set_tag("http.status_code", http_status)
if http_status < 400:
self.set_status("ok")
elif 400 <= http_status < 500:
if http_status == 403:
self.set_status("permission_denied")
elif http_status == 404:
self.set_status("not_found")
elif http_status == 429:
self.set_status("resource_exhausted")
elif http_status == 413:
self.set_status("failed_precondition")
elif http_status == 401:
self.set_status("unauthenticated")
elif http_status == 409:
self.set_status("already_exists")
else:
self.set_status("invalid_argument")
elif 500 <= http_status < 600:
if http_status == 504:
self.set_status("deadline_exceeded")
elif http_status == 501:
self.set_status("unimplemented")
elif http_status == 503:
self.set_status("unavailable")
else:
self.set_status("internal_error")
else:
self.set_status("unknown_error")
def is_success(self):
# type: () -> bool
return self._tags.get("status") == "ok"
def finish(self, hub=None):
# type: (Optional[sentry_sdk.Hub]) -> Optional[str]
hub = hub or self.hub or sentry_sdk.Hub.current
if self.timestamp is not None:
# This transaction is already finished, so we should not flush it again.
return None
self.timestamp = datetime.utcnow()
_maybe_create_breadcrumbs_from_span(hub, self)
if self._span_recorder is None:
return None
self._span_recorder.finish_span(self)
if self.transaction is None:
# If this has no transaction set we assume there's a parent
# transaction for this span that would be flushed out eventually.
return None
client = hub.client
if client is None:
# We have no client and therefore nowhere to send this transaction
# event.
return None
if not self.sampled:
# At this point a `sampled = None` should have already been
# resolved to a concrete decision. If `sampled` is `None`, it's
# likely that somebody used `with sentry_sdk.Hub.start_span(..)` on a
# non-transaction span and later decided to make it a transaction.
if self.sampled is None:
logger.warning("Discarding transaction Span without sampling decision")
return None
return hub.capture_event(
{
"type": "transaction",
"transaction": self.transaction,
"contexts": {"trace": self.get_trace_context()},
"tags": self._tags,
"timestamp": self.timestamp,
"start_timestamp": self.start_timestamp,
"spans": [
s.to_json(client)
for s in self._span_recorder.finished_spans
if s is not self
],
}
)
def to_json(self, client):
# type: (Optional[sentry_sdk.Client]) -> Dict[str, Any]
rv = {
"trace_id": self.trace_id,
"span_id": self.span_id,
"parent_span_id": self.parent_span_id,
"same_process_as_parent": self.same_process_as_parent,
"op": self.op,
"description": self.description,
"start_timestamp": self.start_timestamp,
"timestamp": self.timestamp,
} # type: Dict[str, Any]
transaction = self.transaction
if transaction:
rv["transaction"] = transaction
tags = self._tags
if tags:
rv["tags"] = tags
data = self._data
if data:
rv["data"] = data
return rv
def get_trace_context(self):
# type: () -> Any
rv = {
"trace_id": self.trace_id,
"span_id": self.span_id,
"parent_span_id": self.parent_span_id,
"op": self.op,
"description": self.description,
}
if "status" in self._tags:
rv["status"] = self._tags["status"]
return rv
def _format_sql(cursor, sql):
# type: (Any, str) -> Optional[str]
real_sql = None
# If we're using psycopg2, it could be that we're
# looking at a query that uses Composed objects. Use psycopg2's mogrify
# function to format the query. We lose per-parameter trimming but gain
# accuracy in formatting.
try:
if hasattr(cursor, "mogrify"):
real_sql = cursor.mogrify(sql)
if isinstance(real_sql, bytes):
real_sql = real_sql.decode(cursor.connection.encoding)
except Exception:
real_sql = None
return real_sql or to_string(sql)
@contextlib.contextmanager
def record_sql_queries(
hub, # type: sentry_sdk.Hub
cursor, # type: Any
query, # type: Any
params_list, # type: Any
paramstyle, # type: Optional[str]
executemany, # type: bool
):
# type: (...) -> Generator[Span, None, None]
# TODO: Bring back capturing of params by default
if hub.client and hub.client.options["_experiments"].get(
"record_sql_params", False
):
if not params_list or params_list == [None]:
params_list = None
if paramstyle == "pyformat":
paramstyle = "format"
else:
params_list = None
paramstyle = None
query = _format_sql(cursor, query)
data = {}
if params_list is not None:
data["db.params"] = params_list
if paramstyle is not None:
data["db.paramstyle"] = paramstyle
if executemany:
data["db.executemany"] = True
with capture_internal_exceptions():
hub.add_breadcrumb(message=query, category="query", data=data)
with hub.start_span(op="db", description=query) as span:
for k, v in data.items():
span.set_data(k, v)
yield span
def _maybe_create_breadcrumbs_from_span(hub, span):
# type: (sentry_sdk.Hub, Span) -> None
if span.op == "redis":
hub.add_breadcrumb(
message=span.description, type="redis", category="redis", data=span._tags
)
elif span.op == "http":
hub.add_breadcrumb(type="http", category="httplib", data=span._data)
elif span.op == "subprocess":
hub.add_breadcrumb(
type="subprocess",
category="subprocess",
message=span.description,
data=span._data,
)
| __enter__ | identifier_name |
main.rs | #![feature(plugin, decl_macro, custom_derive, type_ascription)] // Compiler plugins
#![plugin(rocket_codegen)] // rocket code generator
extern crate rocket;
extern crate rabe;
extern crate serde;
extern crate serde_json;
extern crate rustc_serialize;
extern crate blake2_rfc;
extern crate rocket_simpleauth;
extern crate rand;
#[macro_use] extern crate rocket_contrib;
#[macro_use] extern crate serde_derive;
#[macro_use] extern crate diesel;
use std::error::*;
use std::fs::*;
use std::sync::{Once, ONCE_INIT};
use rand::Rng;
use rand::os::OsRng;
use rocket_contrib::{Json};
use rocket::response::status::BadRequest;
use rocket::http::*;
use rocket::request::FromRequest;
use rocket::request::Request;
use rocket::outcome::Outcome;
use diesel::*;
use std::str;
use std::str::FromStr;
use std::env;
use rabe::schemes::bsw;
use blake2_rfc::blake2b::*;
pub mod schema;
// Change the alias to `Box<error::Error>`.
type BoxedResult<T> = std::result::Result<T, Box<Error>>;
enum SCHEMES {
bsw
}
impl FromStr for SCHEMES {
type Err = ();
fn from_str(s: &str) -> Result<SCHEMES, ()> {
match s {
"bsw" => Ok(SCHEMES::bsw),
_ => Err(()),
}
}
}
// ----------------------------------------------------
// Internal structs follow
// ----------------------------------------------------
struct ApiKey(String);
impl<'t, 'r> FromRequest<'t, 'r> for ApiKey {
type Error = ();
fn from_request(request: &'t Request<'r>) -> Outcome<ApiKey, (Status,()), ()> {
let keys: Vec<_> = request.headers().get("Authorization").collect();
if keys.len() != 1 {
return Outcome::Failure((Status::BadRequest, ()));
}
println!("Got API key {}", keys[0]);
let key = keys[0];
if !is_valid(keys[0].to_string()) {
// return Outcome::Forward(());
return Outcome::Failure((Status::Unauthorized, ()));
}
return Outcome::Success(ApiKey(key.to_string()));
}
}
// -----------------------------------------------------
// Message formats follow
// -----------------------------------------------------
#[derive(Serialize, Deserialize)]
struct Message {
contents: String
}
#[derive(Serialize, Deserialize)]
struct SetupMsg {
scheme: String,
attributes: Vec<String>
}
#[derive(Serialize, Deserialize)]
struct KeyGenMsg {
attributes: Vec<String>,
scheme: String,
}
#[derive(Serialize, Deserialize)]
struct EncMessage {
plaintext :String,
policy : String, // A json serialized policy that is understood by the scheme assigned to the session
session_id : String // Session ID unique per (user,scheme)
}
#[derive(Serialize, Deserialize)]
struct DecMessage {
ct: String,
session_id: String, // Session ID unique per (user,scheme)
username: String,
password: String
}
#[derive(Serialize, Deserialize)]
struct ListAttrMsg {
username : String,
password : String
}
#[derive(Serialize, Deserialize)]
struct User {
username: String,
password: String,
attributes: Vec<String>,
random_session_id: String // Reference to session
}
// -----------------------------------------------------
// REST APIs follow
// -----------------------------------------------------
#[post(path="/encrypt", format="application/json", data="<d>")]
fn encrypt(d:Json<EncMessage>) -> Result<Json<String>, BadRequest<String>> {
// Get active session (panics if not available)
let conn = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &d.session_id).unwrap();
// Get key material needed for encryption
let key_material: Vec<String> = serde_json::from_str(&session.key_material.as_str()).unwrap();
let pk_string : &String = &key_material[0];
let plaintext: &Vec<u8> = &d.plaintext.as_bytes().to_vec();
let pk : bsw::CpAbePublicKey = serde_json::from_str(pk_string.as_str()).unwrap(); // TODO NotNice: need to convert to scheme-specific type here. Should be generic trait w/ function "KeyMaterial.get_public_key()"
println!("plaintext {:?}", plaintext);
println!("policy {:?}", &d.policy);
let res = bsw::encrypt(&pk, &d.policy, plaintext).unwrap();
Ok(Json(serde_json::to_string_pretty(&res).unwrap()))
}
#[post(path="/decrypt", format="application/json", data="<d>")]
fn decrypt(d:Json<DecMessage>) -> Result<Json<String>, BadRequest<String>> {
println!("Decryption demanded with ciphertext {}", &d.ct);
// Get session from DB and extract key material needed for decryption
let conn = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &d.session_id).unwrap();
let users: Vec<schema::User> = _db_get_users_by_apikey(&conn, &session.random_session_id);
println!("Users {:?}", users.len());
let user: schema::User = users.into_iter().take_while(|u| u.username.eq(&d.username)).next().unwrap();
let key_material: String = user.key_material;
match session.scheme.parse::<SCHEMES>() {
Ok(SCHEMES::bsw) => {
let ct: bsw::CpAbeCiphertext = serde_json::from_str(&d.ct).unwrap();
let sk: bsw::CpAbeSecretKey = serde_json::from_str(&key_material).unwrap();
// Decrypt ciphertext
let res = bsw::decrypt(&sk, &ct).unwrap();
let s = match str::from_utf8(&res) {
Ok(v) => v,
Err(e) => panic!("Invalid UTF-8 sequence: {}", e),
};
Ok(Json(s.to_string()))
},
Err(_) => Err(BadRequest(Some(format!("Unsupported scheme {} of session {}", session.scheme, session.random_session_id))))
}
}
#[post(path="/add_user", format="application/json", data="<d>")]
fn | (d:Json<User>) -> Result<(), BadRequest<String>> {
let ref username: String = d.username;
let ref passwd: String = d.password;
let ref random_session_id = d.random_session_id;
let salt: i32 = 1234; // TODO use random salt when storing hashed user passwords
println!("Adding user {} {} {} {}", &username, &passwd, salt, random_session_id);
// Create keys for the user
let conn = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &random_session_id).unwrap();
let scheme: String = session.scheme;
match scheme.parse::<SCHEMES>() {
Ok(SCHEMES::bsw) => {
let master_key_material: Vec<String> = serde_json::from_str(&session.key_material).unwrap();
let master_pk: bsw::CpAbePublicKey = serde_json::from_str(&master_key_material[0]).unwrap();
let master_mk: bsw::CpAbeMasterKey = serde_json::from_str(&master_key_material[1]).unwrap();
let user_sk: bsw::CpAbeSecretKey = bsw::keygen(&master_pk, &master_mk, &d.attributes).unwrap();
match db_add_user(&conn, &username, &passwd, salt, &d.attributes, &serde_json::to_string(&user_sk).unwrap(), random_session_id) {
Err(e) => {println!("Nope! {}", e); return Err(BadRequest(Some(format!("Failure adding userpk failure: {}", e))))},
Ok(_r) => return Ok(())
}
},
Err(_) => { return Err(BadRequest(Some(format!("Scheme {} not supported", scheme)))); }
}
}
#[post(path="/list_attrs", format="application/json", data="<d>")]
fn list_attrs(d:Json<ListAttrMsg>, key: ApiKey) -> Result<(String), BadRequest<String>> {
let conn: MysqlConnection = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &key.0).unwrap();
return Ok(session.key_material);
}
#[post(path="/setup", format="application/json", data="<d>")]
fn setup(d:Json<SetupMsg>) -> Result<(String), BadRequest<String>> {
let param: SetupMsg = d.into_inner();
let conn: MysqlConnection = db_connect();
let attributes: String = serde_json::to_string(¶m.attributes).unwrap();
// Setup of a new session. Create keys first
let key_gen_params = KeyGenMsg {
attributes: param.attributes,
scheme: "bsw".to_string()
};
println!("Creating key for {} attributes", key_gen_params.attributes.len());
let key_material: Vec<String> = match _keygen(key_gen_params) { // TODO NotNice: keygen returns a vector of strings. Instead it should return some Box<KeyMaterial> with functions like get_public_key() etc.
Ok(material) => material,
Err(e) => { return Err(BadRequest(Some(format!("Failure to create keys {}",e)))); }
};
// Write new session to database and return its id
let session = db_create_session(&conn, &String::from("bsw"), &key_material, &attributes);
return Ok(session.unwrap());
}
fn _keygen(param: KeyGenMsg) -> Result<Vec<String>, String> {
let scheme: String = param.scheme;
match scheme.parse::<SCHEMES>() {
Ok(SCHEMES::bsw) => {
// Generating mk
let (pk, mk): (bsw::CpAbePublicKey,bsw::CpAbeMasterKey) = bsw::setup();
let mut _attributes = param.attributes;
//Generating attribute keys
let res:bsw::CpAbeSecretKey = bsw::keygen(&pk, &mk, &_attributes).unwrap();
Ok(vec![serde_json::to_string(&pk).unwrap(),
serde_json::to_string(&mk).unwrap(),
serde_json::to_string(&res).unwrap()])
},
Err(e) => Err("Unsupported scheme".to_string())
}
}
// ------------------------------------------------------------
// Internal methods follow
// ------------------------------------------------------------
fn db_connect() -> MysqlConnection {
let database_url : String = env::var("DATABASE_URL").expect("DATABASE_URL must be set");
MysqlConnection::establish(&database_url).expect(&format!("Error connecting to {}", database_url)) // TODO Replace MysqlConnection with more generic "Connection"?
}
/// Adds a user to database.
fn db_add_user(conn: &MysqlConnection, username: &String, passwd: &String, salt: i32, attributes: &Vec<String>, key_material: &String, random_session_id: &String) -> Result<usize, String> {
use schema::users;
use schema::sessions;
// Get primary key from sessions table
let session: schema::Session = match sessions::table.filter(sessions::random_session_id.eq(random_session_id)).first::<schema::Session>(conn) {
Ok(s) => s,
Err(_e) => {return Err(format!("No session with random id {} present. Cannot add user.", random_session_id)); }
};
match users::table
.filter(users::username.eq(username.to_string()))
.filter(users::session_id.eq(session.id))
.first::<schema::User>(conn) {
Ok(_u) => return Err("User already exists for this session".to_string()),
Err(_e) => {}
};
let user = schema::NewUser {
username: username.to_string(),
password: passwd.to_string(), // TODO store salted hash of pwd.
attributes: serde_json::to_string(attributes).unwrap(),
key_material: key_material.to_string(),
salt: salt,
session_id: session.id
};
match diesel::insert_into(users::table)
.values(&user)
.execute(conn) {
Ok(id) => Ok(id),
Err(_e) => Err("Could not insert user".to_string())
}
}
fn db_create_session(conn: &MysqlConnection, scheme: &String, key_material: &Vec<String>, attributes: &String) -> Result<String, String> {
use schema::sessions;
println!("Got scheme {}", scheme);
match scheme.parse::<SCHEMES>() {
Ok(_scheme) => {
let session_id: String = OsRng::new().unwrap().next_u64().to_string();
let session = schema::NewSession {
is_initialized: false,
scheme: scheme.to_string(),
random_session_id: session_id.clone(),
key_material: serde_json::to_string(key_material).unwrap(),
attributes: attributes.to_string()
};
println!("Key material is {}", session.key_material);
// Return auto-gen'd session id
match diesel::insert_into(sessions::table)
.values(&session)
.execute(conn) {
Ok(_usize) => Ok(session_id),
Err(_e) => Err("Could not insert into sessions".to_string())
}
}
Err(_) => Err("Invalid scheme".to_string())
}
}
fn db_get_session_by_api_key(conn: &MysqlConnection, api_key: &String) -> Result<schema::Session, diesel::result::Error> {
use schema::sessions;
sessions::table.filter(sessions::random_session_id.eq(api_key))
.first::<schema::Session>(conn)
}
fn _db_get_user_by_username<'a>(conn: &MysqlConnection, user: &'a String) -> Option<schema::User> {
use schema::users;
match users::table.filter(users::username.eq(user))
.first::<schema::User>(conn) {
Ok(u) => Some(u),
Err(_) => None
}
}
fn _db_get_users_by_apikey<'a>(conn: &MysqlConnection, api_key: &String) -> Vec<schema::User> {
use schema::sessions;
use schema::users;
users::table
.inner_join(sessions::table)
.filter(sessions::random_session_id.eq(api_key))
.get_results::<(schema::User, schema::Session)>(conn)
.expect("Could not load users by API key {}")
.into_iter().map(|(user, _session)| user).collect()
}
/// TODO Use to create salted hashed passwords
fn _to_db_passwd(plain_password: String, salt: i32) -> Blake2bResult {
let salted_pwd = plain_password + &salt.to_string();
let res = blake2b(64, &[], salted_pwd.as_bytes());
return res;
}
fn rocket() -> rocket::Rocket {
rocket::ignite().mount("/", routes![setup, list_attrs, encrypt, decrypt, add_user])
}
fn main() {
rocket().launch();
}
/// Returns true if `key` is a valid API key string.
fn is_valid(api_key: String) -> bool {
use schema::users;
use schema::sessions;
let k : String = match api_key.starts_with("Bearer ") {
true => api_key.replace("Bearer ", ""),
false => api_key
};
let conn = db_connect();
match users::table
.inner_join(sessions::table)
.filter(users::session_id.eq(sessions::id))
.filter(sessions::random_session_id.eq(k))
.count()
.get_result::<i64>(&conn) {
Ok(_) => return true,
Err(_e) => return false
}
}
// -----------------------------------------------
// Tests follow
// -----------------------------------------------
#[cfg(test)]
mod tests {
use super::rocket;
use rocket::local::Client;
use rocket::http::Status;
use super::*;
#[test]
fn test_db_user() {
let con = db_connect();
// make sure we have a session to test with
let session_id: String = db_create_session(&con, &String::from("bsw"), &vec!["".to_string()], &"".to_string()).unwrap();
// Write user into db
let user: String = "bla".to_string();
let passwd: String = "blubb".to_string();
let key_material: String = "".to_string();
let attributes: Vec<String>= vec!("".to_string());
let salt: i32 = 1234;
let result: usize = db_add_user(&con, &user, &passwd, salt, &attributes, &key_material, &session_id).unwrap();
assert!(result > 0);
// Check that it is there
let u: schema::User = _db_get_user_by_username(&con, &user).unwrap();
assert_eq!(u.username, user);
}
#[test]
fn test_db_session() {
let con = db_connect();
// Create a user
let user: String = "bla".to_string();
let passwd: String = "blubb".to_string();
let salt: i32 = 1234;
// Setup of a new session. Create keys
let key_gen_parms = KeyGenMsg {
attributes: vec!["attribute_1".to_string(), "attribute_2".to_string()],
scheme: "bsw".to_string()
};
let key_material: Vec<String> = _keygen(key_gen_parms).unwrap();
let attributes: Vec<String> = vec!(String::from(""));
let scheme: String = "bsw".to_string();
let session_id: String = db_create_session(&con, &scheme, &key_material, &serde_json::to_string(&attributes).unwrap()).expect("Could not create session");
println!("Got session id {}", session_id);
db_add_user(&con, &user, &passwd, salt, &attributes, &serde_json::to_string(&key_material).unwrap(), &session_id).expect("Failure adding user");
}
#[test]
fn test_setup() {
let client = Client::new(rocket()).expect("valid rocket instance");
println!("Have rocket");
// Set up scheme
let setup_msg: SetupMsg = SetupMsg {
scheme: "bsw".to_string(),
attributes: vec!("attribute_1".to_string(), "attribute_2".to_string())
};
let mut response = client.post("/setup")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&setup_msg)).expect("Setting up bsw"))
.dispatch();
assert_eq!(response.status(), Status::Ok);
let session_id: String = response.body_string().unwrap();
println!("SETUP RETURNED {}", &session_id);
// Create user
let user = User {
random_session_id: session_id,
username : String::from("admin"),
password : String::from("admin"),
attributes: vec!("attribute_1".to_string())
};
let response_add: rocket::local::LocalResponse = client.post("/add_user")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&user)).expect("Attribute serialization"))
.dispatch();
//assert_eq!(response_add.status(), Status::Ok);
}
#[test]
fn test_encrypt_decrypt() {
let client = Client::new(rocket()).expect("valid rocket instance");
// Set up scheme
let setup_msg: SetupMsg = SetupMsg {
scheme: "bsw".to_string(),
attributes: vec!("attribute_1".to_string(), "attribute_2".to_string())
};
let mut response = client.post("/setup")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&setup_msg)).expect("Setting up bsw"))
.dispatch();
assert_eq!(response.status(), Status::Ok);
let session_id : &String = &response.body_string().unwrap();
println!("Setup returned SessionID {}",session_id);
// Create user
let user = User {
random_session_id: session_id.to_string(),
username : String::from("admin"),
password : String::from("admin"),
attributes: vec!("attribute_1".to_string())
};
let _response_add = client.post("/add_user")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&user)).expect("Attribute serialization"))
.dispatch();
// Encrypt some text for a policy
let policy:String = String::from(r#"{"OR": [{"ATT": "attribute_1"}, {"ATT": "attribute_2"}]}"#);
let msg : EncMessage = EncMessage {
plaintext : "Encrypt me".into(),
policy : policy,
session_id : session_id.clone()
};
let mut resp_enc = client.post("/encrypt")
.header(ContentType::JSON)
.body(serde_json::to_string(&msg).expect("Encryption"))
.dispatch();
assert_eq!(resp_enc.status(), Status::Ok);
let ct:String = resp_enc.body_string().unwrap();
let ct_json = serde_json::from_str(&ct).unwrap();
// Decrypt again
let c : DecMessage = DecMessage {
ct: ct_json,
session_id: session_id.clone(),
username: String::from("admin"),
password: String::from("admin")
};
let mut resp_dec = client.post("/decrypt")
.header(ContentType::JSON)
.body(serde_json::to_string(&c).unwrap())
.dispatch();
let pt_hex: String = resp_dec.body_string().unwrap();
println!("HEX: {}", pt_hex);
let mut pt: String = serde_json::from_str(&pt_hex).expect("From json");
pt = pt.trim().to_string();
println!("RESULT: {}", pt);
assert_eq!(pt, "Encrypt me");
}
} | add_user | identifier_name |
main.rs | #![feature(plugin, decl_macro, custom_derive, type_ascription)] // Compiler plugins
#![plugin(rocket_codegen)] // rocket code generator
extern crate rocket;
extern crate rabe;
extern crate serde;
extern crate serde_json;
extern crate rustc_serialize;
extern crate blake2_rfc;
extern crate rocket_simpleauth;
extern crate rand;
#[macro_use] extern crate rocket_contrib;
#[macro_use] extern crate serde_derive;
#[macro_use] extern crate diesel;
use std::error::*;
use std::fs::*;
use std::sync::{Once, ONCE_INIT};
use rand::Rng;
use rand::os::OsRng;
use rocket_contrib::{Json};
use rocket::response::status::BadRequest;
use rocket::http::*;
use rocket::request::FromRequest;
use rocket::request::Request;
use rocket::outcome::Outcome;
use diesel::*;
use std::str;
use std::str::FromStr;
use std::env;
use rabe::schemes::bsw;
use blake2_rfc::blake2b::*;
pub mod schema;
// Change the alias to `Box<error::Error>`.
type BoxedResult<T> = std::result::Result<T, Box<Error>>;
enum SCHEMES {
bsw
}
impl FromStr for SCHEMES {
type Err = ();
fn from_str(s: &str) -> Result<SCHEMES, ()> {
match s {
"bsw" => Ok(SCHEMES::bsw),
_ => Err(()),
}
}
}
// ----------------------------------------------------
// Internal structs follow
// ----------------------------------------------------
struct ApiKey(String);
impl<'t, 'r> FromRequest<'t, 'r> for ApiKey {
type Error = ();
fn from_request(request: &'t Request<'r>) -> Outcome<ApiKey, (Status,()), ()> {
let keys: Vec<_> = request.headers().get("Authorization").collect();
if keys.len() != 1 {
return Outcome::Failure((Status::BadRequest, ()));
}
println!("Got API key {}", keys[0]);
let key = keys[0];
if !is_valid(keys[0].to_string()) {
// return Outcome::Forward(());
return Outcome::Failure((Status::Unauthorized, ()));
}
return Outcome::Success(ApiKey(key.to_string()));
}
}
// -----------------------------------------------------
// Message formats follow
// -----------------------------------------------------
#[derive(Serialize, Deserialize)]
struct Message {
contents: String
}
#[derive(Serialize, Deserialize)]
struct SetupMsg {
scheme: String,
attributes: Vec<String>
}
#[derive(Serialize, Deserialize)]
struct KeyGenMsg {
attributes: Vec<String>,
scheme: String,
}
#[derive(Serialize, Deserialize)]
struct EncMessage {
plaintext :String,
policy : String, // A json serialized policy that is understood by the scheme assigned to the session
session_id : String // Session ID unique per (user,scheme)
}
#[derive(Serialize, Deserialize)]
struct DecMessage {
ct: String,
session_id: String, // Session ID unique per (user,scheme)
username: String,
password: String
}
#[derive(Serialize, Deserialize)]
struct ListAttrMsg {
username : String,
password : String
}
#[derive(Serialize, Deserialize)]
struct User {
username: String,
password: String,
attributes: Vec<String>,
random_session_id: String // Reference to session
}
// -----------------------------------------------------
// REST APIs follow
// -----------------------------------------------------
#[post(path="/encrypt", format="application/json", data="<d>")]
fn encrypt(d:Json<EncMessage>) -> Result<Json<String>, BadRequest<String>> {
// Get active session (panics if not available)
let conn = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &d.session_id).unwrap();
// Get key material needed for encryption
let key_material: Vec<String> = serde_json::from_str(&session.key_material.as_str()).unwrap();
let pk_string : &String = &key_material[0];
let plaintext: &Vec<u8> = &d.plaintext.as_bytes().to_vec();
let pk : bsw::CpAbePublicKey = serde_json::from_str(pk_string.as_str()).unwrap(); // TODO NotNice: need to convert to scheme-specific type here. Should be generic trait w/ function "KeyMaterial.get_public_key()"
println!("plaintext {:?}", plaintext);
println!("policy {:?}", &d.policy);
let res = bsw::encrypt(&pk, &d.policy, plaintext).unwrap();
Ok(Json(serde_json::to_string_pretty(&res).unwrap()))
}
#[post(path="/decrypt", format="application/json", data="<d>")]
fn decrypt(d:Json<DecMessage>) -> Result<Json<String>, BadRequest<String>> {
println!("Decryption demanded with ciphertext {}", &d.ct);
// Get session from DB and extract key material needed for decryption
let conn = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &d.session_id).unwrap();
let users: Vec<schema::User> = _db_get_users_by_apikey(&conn, &session.random_session_id);
println!("Users {:?}", users.len());
let user: schema::User = users.into_iter().take_while(|u| u.username.eq(&d.username)).next().unwrap();
let key_material: String = user.key_material;
match session.scheme.parse::<SCHEMES>() {
Ok(SCHEMES::bsw) => {
let ct: bsw::CpAbeCiphertext = serde_json::from_str(&d.ct).unwrap();
let sk: bsw::CpAbeSecretKey = serde_json::from_str(&key_material).unwrap();
// Decrypt ciphertext
let res = bsw::decrypt(&sk, &ct).unwrap();
let s = match str::from_utf8(&res) {
Ok(v) => v,
Err(e) => panic!("Invalid UTF-8 sequence: {}", e),
};
Ok(Json(s.to_string()))
},
Err(_) => Err(BadRequest(Some(format!("Unsupported scheme {} of session {}", session.scheme, session.random_session_id))))
}
}
#[post(path="/add_user", format="application/json", data="<d>")]
fn add_user(d:Json<User>) -> Result<(), BadRequest<String>> {
let ref username: String = d.username;
let ref passwd: String = d.password;
let ref random_session_id = d.random_session_id;
let salt: i32 = 1234; // TODO use random salt when storing hashed user passwords
println!("Adding user {} {} {} {}", &username, &passwd, salt, random_session_id);
// Create keys for the user
let conn = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &random_session_id).unwrap();
let scheme: String = session.scheme;
match scheme.parse::<SCHEMES>() {
Ok(SCHEMES::bsw) => {
let master_key_material: Vec<String> = serde_json::from_str(&session.key_material).unwrap();
let master_pk: bsw::CpAbePublicKey = serde_json::from_str(&master_key_material[0]).unwrap();
let master_mk: bsw::CpAbeMasterKey = serde_json::from_str(&master_key_material[1]).unwrap();
let user_sk: bsw::CpAbeSecretKey = bsw::keygen(&master_pk, &master_mk, &d.attributes).unwrap();
match db_add_user(&conn, &username, &passwd, salt, &d.attributes, &serde_json::to_string(&user_sk).unwrap(), random_session_id) {
Err(e) => {println!("Nope! {}", e); return Err(BadRequest(Some(format!("Failure adding userpk failure: {}", e))))},
Ok(_r) => return Ok(())
}
},
Err(_) => { return Err(BadRequest(Some(format!("Scheme {} not supported", scheme)))); }
}
}
#[post(path="/list_attrs", format="application/json", data="<d>")]
fn list_attrs(d:Json<ListAttrMsg>, key: ApiKey) -> Result<(String), BadRequest<String>> {
let conn: MysqlConnection = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &key.0).unwrap();
return Ok(session.key_material);
}
#[post(path="/setup", format="application/json", data="<d>")]
fn setup(d:Json<SetupMsg>) -> Result<(String), BadRequest<String>> {
let param: SetupMsg = d.into_inner();
let conn: MysqlConnection = db_connect();
let attributes: String = serde_json::to_string(¶m.attributes).unwrap();
// Setup of a new session. Create keys first
let key_gen_params = KeyGenMsg {
attributes: param.attributes,
scheme: "bsw".to_string()
};
println!("Creating key for {} attributes", key_gen_params.attributes.len());
let key_material: Vec<String> = match _keygen(key_gen_params) { // TODO NotNice: keygen returns a vector of strings. Instead it should return some Box<KeyMaterial> with functions like get_public_key() etc.
Ok(material) => material,
Err(e) => { return Err(BadRequest(Some(format!("Failure to create keys {}",e)))); }
};
// Write new session to database and return its id
let session = db_create_session(&conn, &String::from("bsw"), &key_material, &attributes);
return Ok(session.unwrap());
}
fn _keygen(param: KeyGenMsg) -> Result<Vec<String>, String> {
let scheme: String = param.scheme;
match scheme.parse::<SCHEMES>() {
Ok(SCHEMES::bsw) => {
// Generating mk
let (pk, mk): (bsw::CpAbePublicKey,bsw::CpAbeMasterKey) = bsw::setup();
let mut _attributes = param.attributes;
//Generating attribute keys
let res:bsw::CpAbeSecretKey = bsw::keygen(&pk, &mk, &_attributes).unwrap();
Ok(vec![serde_json::to_string(&pk).unwrap(),
serde_json::to_string(&mk).unwrap(),
serde_json::to_string(&res).unwrap()])
},
Err(e) => Err("Unsupported scheme".to_string())
}
}
// ------------------------------------------------------------
// Internal methods follow
// ------------------------------------------------------------
fn db_connect() -> MysqlConnection {
let database_url : String = env::var("DATABASE_URL").expect("DATABASE_URL must be set");
MysqlConnection::establish(&database_url).expect(&format!("Error connecting to {}", database_url)) // TODO Replace MysqlConnection with more generic "Connection"?
}
/// Adds a user to database.
fn db_add_user(conn: &MysqlConnection, username: &String, passwd: &String, salt: i32, attributes: &Vec<String>, key_material: &String, random_session_id: &String) -> Result<usize, String> {
use schema::users;
use schema::sessions;
// Get primary key from sessions table
let session: schema::Session = match sessions::table.filter(sessions::random_session_id.eq(random_session_id)).first::<schema::Session>(conn) {
Ok(s) => s,
Err(_e) => {return Err(format!("No session with random id {} present. Cannot add user.", random_session_id)); }
};
match users::table
.filter(users::username.eq(username.to_string()))
.filter(users::session_id.eq(session.id))
.first::<schema::User>(conn) {
Ok(_u) => return Err("User already exists for this session".to_string()),
Err(_e) => {}
};
let user = schema::NewUser {
username: username.to_string(),
password: passwd.to_string(), // TODO store salted hash of pwd.
attributes: serde_json::to_string(attributes).unwrap(),
key_material: key_material.to_string(),
salt: salt,
session_id: session.id
};
match diesel::insert_into(users::table)
.values(&user)
.execute(conn) {
Ok(id) => Ok(id),
Err(_e) => Err("Could not insert user".to_string())
}
}
fn db_create_session(conn: &MysqlConnection, scheme: &String, key_material: &Vec<String>, attributes: &String) -> Result<String, String> |
fn db_get_session_by_api_key(conn: &MysqlConnection, api_key: &String) -> Result<schema::Session, diesel::result::Error> {
use schema::sessions;
sessions::table.filter(sessions::random_session_id.eq(api_key))
.first::<schema::Session>(conn)
}
fn _db_get_user_by_username<'a>(conn: &MysqlConnection, user: &'a String) -> Option<schema::User> {
use schema::users;
match users::table.filter(users::username.eq(user))
.first::<schema::User>(conn) {
Ok(u) => Some(u),
Err(_) => None
}
}
fn _db_get_users_by_apikey<'a>(conn: &MysqlConnection, api_key: &String) -> Vec<schema::User> {
use schema::sessions;
use schema::users;
users::table
.inner_join(sessions::table)
.filter(sessions::random_session_id.eq(api_key))
.get_results::<(schema::User, schema::Session)>(conn)
.expect("Could not load users by API key {}")
.into_iter().map(|(user, _session)| user).collect()
}
/// TODO Use to create salted hashed passwords
fn _to_db_passwd(plain_password: String, salt: i32) -> Blake2bResult {
let salted_pwd = plain_password + &salt.to_string();
let res = blake2b(64, &[], salted_pwd.as_bytes());
return res;
}
fn rocket() -> rocket::Rocket {
rocket::ignite().mount("/", routes![setup, list_attrs, encrypt, decrypt, add_user])
}
fn main() {
rocket().launch();
}
/// Returns true if `key` is a valid API key string.
fn is_valid(api_key: String) -> bool {
use schema::users;
use schema::sessions;
let k : String = match api_key.starts_with("Bearer ") {
true => api_key.replace("Bearer ", ""),
false => api_key
};
let conn = db_connect();
match users::table
.inner_join(sessions::table)
.filter(users::session_id.eq(sessions::id))
.filter(sessions::random_session_id.eq(k))
.count()
.get_result::<i64>(&conn) {
Ok(_) => return true,
Err(_e) => return false
}
}
// -----------------------------------------------
// Tests follow
// -----------------------------------------------
#[cfg(test)]
mod tests {
use super::rocket;
use rocket::local::Client;
use rocket::http::Status;
use super::*;
#[test]
fn test_db_user() {
let con = db_connect();
// make sure we have a session to test with
let session_id: String = db_create_session(&con, &String::from("bsw"), &vec!["".to_string()], &"".to_string()).unwrap();
// Write user into db
let user: String = "bla".to_string();
let passwd: String = "blubb".to_string();
let key_material: String = "".to_string();
let attributes: Vec<String>= vec!("".to_string());
let salt: i32 = 1234;
let result: usize = db_add_user(&con, &user, &passwd, salt, &attributes, &key_material, &session_id).unwrap();
assert!(result > 0);
// Check that it is there
let u: schema::User = _db_get_user_by_username(&con, &user).unwrap();
assert_eq!(u.username, user);
}
#[test]
fn test_db_session() {
let con = db_connect();
// Create a user
let user: String = "bla".to_string();
let passwd: String = "blubb".to_string();
let salt: i32 = 1234;
// Setup of a new session. Create keys
let key_gen_parms = KeyGenMsg {
attributes: vec!["attribute_1".to_string(), "attribute_2".to_string()],
scheme: "bsw".to_string()
};
let key_material: Vec<String> = _keygen(key_gen_parms).unwrap();
let attributes: Vec<String> = vec!(String::from(""));
let scheme: String = "bsw".to_string();
let session_id: String = db_create_session(&con, &scheme, &key_material, &serde_json::to_string(&attributes).unwrap()).expect("Could not create session");
println!("Got session id {}", session_id);
db_add_user(&con, &user, &passwd, salt, &attributes, &serde_json::to_string(&key_material).unwrap(), &session_id).expect("Failure adding user");
}
#[test]
fn test_setup() {
let client = Client::new(rocket()).expect("valid rocket instance");
println!("Have rocket");
// Set up scheme
let setup_msg: SetupMsg = SetupMsg {
scheme: "bsw".to_string(),
attributes: vec!("attribute_1".to_string(), "attribute_2".to_string())
};
let mut response = client.post("/setup")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&setup_msg)).expect("Setting up bsw"))
.dispatch();
assert_eq!(response.status(), Status::Ok);
let session_id: String = response.body_string().unwrap();
println!("SETUP RETURNED {}", &session_id);
// Create user
let user = User {
random_session_id: session_id,
username : String::from("admin"),
password : String::from("admin"),
attributes: vec!("attribute_1".to_string())
};
let response_add: rocket::local::LocalResponse = client.post("/add_user")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&user)).expect("Attribute serialization"))
.dispatch();
//assert_eq!(response_add.status(), Status::Ok);
}
#[test]
fn test_encrypt_decrypt() {
let client = Client::new(rocket()).expect("valid rocket instance");
// Set up scheme
let setup_msg: SetupMsg = SetupMsg {
scheme: "bsw".to_string(),
attributes: vec!("attribute_1".to_string(), "attribute_2".to_string())
};
let mut response = client.post("/setup")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&setup_msg)).expect("Setting up bsw"))
.dispatch();
assert_eq!(response.status(), Status::Ok);
let session_id : &String = &response.body_string().unwrap();
println!("Setup returned SessionID {}",session_id);
// Create user
let user = User {
random_session_id: session_id.to_string(),
username : String::from("admin"),
password : String::from("admin"),
attributes: vec!("attribute_1".to_string())
};
let _response_add = client.post("/add_user")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&user)).expect("Attribute serialization"))
.dispatch();
// Encrypt some text for a policy
let policy:String = String::from(r#"{"OR": [{"ATT": "attribute_1"}, {"ATT": "attribute_2"}]}"#);
let msg : EncMessage = EncMessage {
plaintext : "Encrypt me".into(),
policy : policy,
session_id : session_id.clone()
};
let mut resp_enc = client.post("/encrypt")
.header(ContentType::JSON)
.body(serde_json::to_string(&msg).expect("Encryption"))
.dispatch();
assert_eq!(resp_enc.status(), Status::Ok);
let ct:String = resp_enc.body_string().unwrap();
let ct_json = serde_json::from_str(&ct).unwrap();
// Decrypt again
let c : DecMessage = DecMessage {
ct: ct_json,
session_id: session_id.clone(),
username: String::from("admin"),
password: String::from("admin")
};
let mut resp_dec = client.post("/decrypt")
.header(ContentType::JSON)
.body(serde_json::to_string(&c).unwrap())
.dispatch();
let pt_hex: String = resp_dec.body_string().unwrap();
println!("HEX: {}", pt_hex);
let mut pt: String = serde_json::from_str(&pt_hex).expect("From json");
pt = pt.trim().to_string();
println!("RESULT: {}", pt);
assert_eq!(pt, "Encrypt me");
}
} | {
use schema::sessions;
println!("Got scheme {}", scheme);
match scheme.parse::<SCHEMES>() {
Ok(_scheme) => {
let session_id: String = OsRng::new().unwrap().next_u64().to_string();
let session = schema::NewSession {
is_initialized: false,
scheme: scheme.to_string(),
random_session_id: session_id.clone(),
key_material: serde_json::to_string(key_material).unwrap(),
attributes: attributes.to_string()
};
println!("Key material is {}", session.key_material);
// Return auto-gen'd session id
match diesel::insert_into(sessions::table)
.values(&session)
.execute(conn) {
Ok(_usize) => Ok(session_id),
Err(_e) => Err("Could not insert into sessions".to_string())
}
}
Err(_) => Err("Invalid scheme".to_string())
}
} | identifier_body |
main.rs | #![feature(plugin, decl_macro, custom_derive, type_ascription)] // Compiler plugins
#![plugin(rocket_codegen)] // rocket code generator
extern crate rocket;
extern crate rabe;
extern crate serde;
extern crate serde_json;
extern crate rustc_serialize;
extern crate blake2_rfc;
extern crate rocket_simpleauth;
extern crate rand;
#[macro_use] extern crate rocket_contrib;
#[macro_use] extern crate serde_derive;
#[macro_use] extern crate diesel;
use std::error::*;
use std::fs::*;
use std::sync::{Once, ONCE_INIT};
use rand::Rng;
use rand::os::OsRng;
use rocket_contrib::{Json};
use rocket::response::status::BadRequest;
use rocket::http::*;
use rocket::request::FromRequest;
use rocket::request::Request;
use rocket::outcome::Outcome;
use diesel::*;
use std::str;
use std::str::FromStr;
use std::env;
use rabe::schemes::bsw;
use blake2_rfc::blake2b::*;
pub mod schema;
// Change the alias to `Box<error::Error>`.
type BoxedResult<T> = std::result::Result<T, Box<Error>>;
enum SCHEMES {
bsw
}
impl FromStr for SCHEMES {
type Err = ();
fn from_str(s: &str) -> Result<SCHEMES, ()> {
match s {
"bsw" => Ok(SCHEMES::bsw),
_ => Err(()),
}
}
}
// ----------------------------------------------------
// Internal structs follow
// ----------------------------------------------------
struct ApiKey(String);
impl<'t, 'r> FromRequest<'t, 'r> for ApiKey {
type Error = ();
fn from_request(request: &'t Request<'r>) -> Outcome<ApiKey, (Status,()), ()> {
let keys: Vec<_> = request.headers().get("Authorization").collect();
if keys.len() != 1 {
return Outcome::Failure((Status::BadRequest, ()));
}
println!("Got API key {}", keys[0]);
let key = keys[0];
if !is_valid(keys[0].to_string()) {
// return Outcome::Forward(()); | }
// -----------------------------------------------------
// Message formats follow
// -----------------------------------------------------
#[derive(Serialize, Deserialize)]
struct Message {
contents: String
}
#[derive(Serialize, Deserialize)]
struct SetupMsg {
scheme: String,
attributes: Vec<String>
}
#[derive(Serialize, Deserialize)]
struct KeyGenMsg {
attributes: Vec<String>,
scheme: String,
}
#[derive(Serialize, Deserialize)]
struct EncMessage {
plaintext :String,
policy : String, // A json serialized policy that is understood by the scheme assigned to the session
session_id : String // Session ID unique per (user,scheme)
}
#[derive(Serialize, Deserialize)]
struct DecMessage {
ct: String,
session_id: String, // Session ID unique per (user,scheme)
username: String,
password: String
}
#[derive(Serialize, Deserialize)]
struct ListAttrMsg {
username : String,
password : String
}
#[derive(Serialize, Deserialize)]
struct User {
username: String,
password: String,
attributes: Vec<String>,
random_session_id: String // Reference to session
}
// -----------------------------------------------------
// REST APIs follow
// -----------------------------------------------------
#[post(path="/encrypt", format="application/json", data="<d>")]
fn encrypt(d:Json<EncMessage>) -> Result<Json<String>, BadRequest<String>> {
// Get active session (panics if not available)
let conn = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &d.session_id).unwrap();
// Get key material needed for encryption
let key_material: Vec<String> = serde_json::from_str(&session.key_material.as_str()).unwrap();
let pk_string : &String = &key_material[0];
let plaintext: &Vec<u8> = &d.plaintext.as_bytes().to_vec();
let pk : bsw::CpAbePublicKey = serde_json::from_str(pk_string.as_str()).unwrap(); // TODO NotNice: need to convert to scheme-specific type here. Should be generic trait w/ function "KeyMaterial.get_public_key()"
println!("plaintext {:?}", plaintext);
println!("policy {:?}", &d.policy);
let res = bsw::encrypt(&pk, &d.policy, plaintext).unwrap();
Ok(Json(serde_json::to_string_pretty(&res).unwrap()))
}
#[post(path="/decrypt", format="application/json", data="<d>")]
fn decrypt(d:Json<DecMessage>) -> Result<Json<String>, BadRequest<String>> {
println!("Decryption demanded with ciphertext {}", &d.ct);
// Get session from DB and extract key material needed for decryption
let conn = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &d.session_id).unwrap();
let users: Vec<schema::User> = _db_get_users_by_apikey(&conn, &session.random_session_id);
println!("Users {:?}", users.len());
let user: schema::User = users.into_iter().take_while(|u| u.username.eq(&d.username)).next().unwrap();
let key_material: String = user.key_material;
match session.scheme.parse::<SCHEMES>() {
Ok(SCHEMES::bsw) => {
let ct: bsw::CpAbeCiphertext = serde_json::from_str(&d.ct).unwrap();
let sk: bsw::CpAbeSecretKey = serde_json::from_str(&key_material).unwrap();
// Decrypt ciphertext
let res = bsw::decrypt(&sk, &ct).unwrap();
let s = match str::from_utf8(&res) {
Ok(v) => v,
Err(e) => panic!("Invalid UTF-8 sequence: {}", e),
};
Ok(Json(s.to_string()))
},
Err(_) => Err(BadRequest(Some(format!("Unsupported scheme {} of session {}", session.scheme, session.random_session_id))))
}
}
#[post(path="/add_user", format="application/json", data="<d>")]
fn add_user(d:Json<User>) -> Result<(), BadRequest<String>> {
let ref username: String = d.username;
let ref passwd: String = d.password;
let ref random_session_id = d.random_session_id;
let salt: i32 = 1234; // TODO use random salt when storing hashed user passwords
println!("Adding user {} {} {} {}", &username, &passwd, salt, random_session_id);
// Create keys for the user
let conn = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &random_session_id).unwrap();
let scheme: String = session.scheme;
match scheme.parse::<SCHEMES>() {
Ok(SCHEMES::bsw) => {
let master_key_material: Vec<String> = serde_json::from_str(&session.key_material).unwrap();
let master_pk: bsw::CpAbePublicKey = serde_json::from_str(&master_key_material[0]).unwrap();
let master_mk: bsw::CpAbeMasterKey = serde_json::from_str(&master_key_material[1]).unwrap();
let user_sk: bsw::CpAbeSecretKey = bsw::keygen(&master_pk, &master_mk, &d.attributes).unwrap();
match db_add_user(&conn, &username, &passwd, salt, &d.attributes, &serde_json::to_string(&user_sk).unwrap(), random_session_id) {
Err(e) => {println!("Nope! {}", e); return Err(BadRequest(Some(format!("Failure adding userpk failure: {}", e))))},
Ok(_r) => return Ok(())
}
},
Err(_) => { return Err(BadRequest(Some(format!("Scheme {} not supported", scheme)))); }
}
}
#[post(path="/list_attrs", format="application/json", data="<d>")]
fn list_attrs(d:Json<ListAttrMsg>, key: ApiKey) -> Result<(String), BadRequest<String>> {
let conn: MysqlConnection = db_connect();
let session: schema::Session = db_get_session_by_api_key(&conn, &key.0).unwrap();
return Ok(session.key_material);
}
#[post(path="/setup", format="application/json", data="<d>")]
fn setup(d:Json<SetupMsg>) -> Result<(String), BadRequest<String>> {
let param: SetupMsg = d.into_inner();
let conn: MysqlConnection = db_connect();
let attributes: String = serde_json::to_string(¶m.attributes).unwrap();
// Setup of a new session. Create keys first
let key_gen_params = KeyGenMsg {
attributes: param.attributes,
scheme: "bsw".to_string()
};
println!("Creating key for {} attributes", key_gen_params.attributes.len());
let key_material: Vec<String> = match _keygen(key_gen_params) { // TODO NotNice: keygen returns a vector of strings. Instead it should return some Box<KeyMaterial> with functions like get_public_key() etc.
Ok(material) => material,
Err(e) => { return Err(BadRequest(Some(format!("Failure to create keys {}",e)))); }
};
// Write new session to database and return its id
let session = db_create_session(&conn, &String::from("bsw"), &key_material, &attributes);
return Ok(session.unwrap());
}
fn _keygen(param: KeyGenMsg) -> Result<Vec<String>, String> {
let scheme: String = param.scheme;
match scheme.parse::<SCHEMES>() {
Ok(SCHEMES::bsw) => {
// Generating mk
let (pk, mk): (bsw::CpAbePublicKey,bsw::CpAbeMasterKey) = bsw::setup();
let mut _attributes = param.attributes;
//Generating attribute keys
let res:bsw::CpAbeSecretKey = bsw::keygen(&pk, &mk, &_attributes).unwrap();
Ok(vec![serde_json::to_string(&pk).unwrap(),
serde_json::to_string(&mk).unwrap(),
serde_json::to_string(&res).unwrap()])
},
Err(e) => Err("Unsupported scheme".to_string())
}
}
// ------------------------------------------------------------
// Internal methods follow
// ------------------------------------------------------------
fn db_connect() -> MysqlConnection {
let database_url : String = env::var("DATABASE_URL").expect("DATABASE_URL must be set");
MysqlConnection::establish(&database_url).expect(&format!("Error connecting to {}", database_url)) // TODO Replace MysqlConnection with more generic "Connection"?
}
/// Adds a user to database.
fn db_add_user(conn: &MysqlConnection, username: &String, passwd: &String, salt: i32, attributes: &Vec<String>, key_material: &String, random_session_id: &String) -> Result<usize, String> {
use schema::users;
use schema::sessions;
// Get primary key from sessions table
let session: schema::Session = match sessions::table.filter(sessions::random_session_id.eq(random_session_id)).first::<schema::Session>(conn) {
Ok(s) => s,
Err(_e) => {return Err(format!("No session with random id {} present. Cannot add user.", random_session_id)); }
};
match users::table
.filter(users::username.eq(username.to_string()))
.filter(users::session_id.eq(session.id))
.first::<schema::User>(conn) {
Ok(_u) => return Err("User already exists for this session".to_string()),
Err(_e) => {}
};
let user = schema::NewUser {
username: username.to_string(),
password: passwd.to_string(), // TODO store salted hash of pwd.
attributes: serde_json::to_string(attributes).unwrap(),
key_material: key_material.to_string(),
salt: salt,
session_id: session.id
};
match diesel::insert_into(users::table)
.values(&user)
.execute(conn) {
Ok(id) => Ok(id),
Err(_e) => Err("Could not insert user".to_string())
}
}
fn db_create_session(conn: &MysqlConnection, scheme: &String, key_material: &Vec<String>, attributes: &String) -> Result<String, String> {
use schema::sessions;
println!("Got scheme {}", scheme);
match scheme.parse::<SCHEMES>() {
Ok(_scheme) => {
let session_id: String = OsRng::new().unwrap().next_u64().to_string();
let session = schema::NewSession {
is_initialized: false,
scheme: scheme.to_string(),
random_session_id: session_id.clone(),
key_material: serde_json::to_string(key_material).unwrap(),
attributes: attributes.to_string()
};
println!("Key material is {}", session.key_material);
// Return auto-gen'd session id
match diesel::insert_into(sessions::table)
.values(&session)
.execute(conn) {
Ok(_usize) => Ok(session_id),
Err(_e) => Err("Could not insert into sessions".to_string())
}
}
Err(_) => Err("Invalid scheme".to_string())
}
}
fn db_get_session_by_api_key(conn: &MysqlConnection, api_key: &String) -> Result<schema::Session, diesel::result::Error> {
use schema::sessions;
sessions::table.filter(sessions::random_session_id.eq(api_key))
.first::<schema::Session>(conn)
}
fn _db_get_user_by_username<'a>(conn: &MysqlConnection, user: &'a String) -> Option<schema::User> {
use schema::users;
match users::table.filter(users::username.eq(user))
.first::<schema::User>(conn) {
Ok(u) => Some(u),
Err(_) => None
}
}
fn _db_get_users_by_apikey<'a>(conn: &MysqlConnection, api_key: &String) -> Vec<schema::User> {
use schema::sessions;
use schema::users;
users::table
.inner_join(sessions::table)
.filter(sessions::random_session_id.eq(api_key))
.get_results::<(schema::User, schema::Session)>(conn)
.expect("Could not load users by API key {}")
.into_iter().map(|(user, _session)| user).collect()
}
/// TODO Use to create salted hashed passwords
fn _to_db_passwd(plain_password: String, salt: i32) -> Blake2bResult {
let salted_pwd = plain_password + &salt.to_string();
let res = blake2b(64, &[], salted_pwd.as_bytes());
return res;
}
fn rocket() -> rocket::Rocket {
rocket::ignite().mount("/", routes![setup, list_attrs, encrypt, decrypt, add_user])
}
fn main() {
rocket().launch();
}
/// Returns true if `key` is a valid API key string.
fn is_valid(api_key: String) -> bool {
use schema::users;
use schema::sessions;
let k : String = match api_key.starts_with("Bearer ") {
true => api_key.replace("Bearer ", ""),
false => api_key
};
let conn = db_connect();
match users::table
.inner_join(sessions::table)
.filter(users::session_id.eq(sessions::id))
.filter(sessions::random_session_id.eq(k))
.count()
.get_result::<i64>(&conn) {
Ok(_) => return true,
Err(_e) => return false
}
}
// -----------------------------------------------
// Tests follow
// -----------------------------------------------
#[cfg(test)]
mod tests {
use super::rocket;
use rocket::local::Client;
use rocket::http::Status;
use super::*;
#[test]
fn test_db_user() {
let con = db_connect();
// make sure we have a session to test with
let session_id: String = db_create_session(&con, &String::from("bsw"), &vec!["".to_string()], &"".to_string()).unwrap();
// Write user into db
let user: String = "bla".to_string();
let passwd: String = "blubb".to_string();
let key_material: String = "".to_string();
let attributes: Vec<String>= vec!("".to_string());
let salt: i32 = 1234;
let result: usize = db_add_user(&con, &user, &passwd, salt, &attributes, &key_material, &session_id).unwrap();
assert!(result > 0);
// Check that it is there
let u: schema::User = _db_get_user_by_username(&con, &user).unwrap();
assert_eq!(u.username, user);
}
#[test]
fn test_db_session() {
let con = db_connect();
// Create a user
let user: String = "bla".to_string();
let passwd: String = "blubb".to_string();
let salt: i32 = 1234;
// Setup of a new session. Create keys
let key_gen_parms = KeyGenMsg {
attributes: vec!["attribute_1".to_string(), "attribute_2".to_string()],
scheme: "bsw".to_string()
};
let key_material: Vec<String> = _keygen(key_gen_parms).unwrap();
let attributes: Vec<String> = vec!(String::from(""));
let scheme: String = "bsw".to_string();
let session_id: String = db_create_session(&con, &scheme, &key_material, &serde_json::to_string(&attributes).unwrap()).expect("Could not create session");
println!("Got session id {}", session_id);
db_add_user(&con, &user, &passwd, salt, &attributes, &serde_json::to_string(&key_material).unwrap(), &session_id).expect("Failure adding user");
}
#[test]
fn test_setup() {
let client = Client::new(rocket()).expect("valid rocket instance");
println!("Have rocket");
// Set up scheme
let setup_msg: SetupMsg = SetupMsg {
scheme: "bsw".to_string(),
attributes: vec!("attribute_1".to_string(), "attribute_2".to_string())
};
let mut response = client.post("/setup")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&setup_msg)).expect("Setting up bsw"))
.dispatch();
assert_eq!(response.status(), Status::Ok);
let session_id: String = response.body_string().unwrap();
println!("SETUP RETURNED {}", &session_id);
// Create user
let user = User {
random_session_id: session_id,
username : String::from("admin"),
password : String::from("admin"),
attributes: vec!("attribute_1".to_string())
};
let response_add: rocket::local::LocalResponse = client.post("/add_user")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&user)).expect("Attribute serialization"))
.dispatch();
//assert_eq!(response_add.status(), Status::Ok);
}
#[test]
fn test_encrypt_decrypt() {
let client = Client::new(rocket()).expect("valid rocket instance");
// Set up scheme
let setup_msg: SetupMsg = SetupMsg {
scheme: "bsw".to_string(),
attributes: vec!("attribute_1".to_string(), "attribute_2".to_string())
};
let mut response = client.post("/setup")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&setup_msg)).expect("Setting up bsw"))
.dispatch();
assert_eq!(response.status(), Status::Ok);
let session_id : &String = &response.body_string().unwrap();
println!("Setup returned SessionID {}",session_id);
// Create user
let user = User {
random_session_id: session_id.to_string(),
username : String::from("admin"),
password : String::from("admin"),
attributes: vec!("attribute_1".to_string())
};
let _response_add = client.post("/add_user")
.header(ContentType::JSON)
.body(serde_json::to_string(&json!(&user)).expect("Attribute serialization"))
.dispatch();
// Encrypt some text for a policy
let policy:String = String::from(r#"{"OR": [{"ATT": "attribute_1"}, {"ATT": "attribute_2"}]}"#);
let msg : EncMessage = EncMessage {
plaintext : "Encrypt me".into(),
policy : policy,
session_id : session_id.clone()
};
let mut resp_enc = client.post("/encrypt")
.header(ContentType::JSON)
.body(serde_json::to_string(&msg).expect("Encryption"))
.dispatch();
assert_eq!(resp_enc.status(), Status::Ok);
let ct:String = resp_enc.body_string().unwrap();
let ct_json = serde_json::from_str(&ct).unwrap();
// Decrypt again
let c : DecMessage = DecMessage {
ct: ct_json,
session_id: session_id.clone(),
username: String::from("admin"),
password: String::from("admin")
};
let mut resp_dec = client.post("/decrypt")
.header(ContentType::JSON)
.body(serde_json::to_string(&c).unwrap())
.dispatch();
let pt_hex: String = resp_dec.body_string().unwrap();
println!("HEX: {}", pt_hex);
let mut pt: String = serde_json::from_str(&pt_hex).expect("From json");
pt = pt.trim().to_string();
println!("RESULT: {}", pt);
assert_eq!(pt, "Encrypt me");
}
} | return Outcome::Failure((Status::Unauthorized, ()));
}
return Outcome::Success(ApiKey(key.to_string()));
} | random_line_split |
network_listener.py | # -*- coding: utf-8 -*-
from operator import attrgetter
import logging
import StringIO
import struct
import dpkt
from netifaces import interfaces, ifaddresses, AF_INET
from sniff import PcapWrapper
import http
ACK = dpkt.tcp.TH_ACK
SYN = dpkt.tcp.TH_SYN
FIN = dpkt.tcp.TH_FIN
RST = dpkt.tcp.TH_RST
PUSH = dpkt.tcp.TH_PUSH
def ipaddr_string(addr):
return '.'.join(str(octet) for octet in struct.unpack('B' * len(addr), addr))
class NetworkFileListener(object):
def __init__(self, interface=None, mime_types=None):
self.pc = None
self.on_file_complete = None
self.packet_streams = {}
self.local_ips = self.detect_local_ips()
logging.info("Local IP Addresses: %s" % ', '.join(self.local_ips))
self.interface = interface
self.mime_types = mime_types
def detect_local_ips(self):
"""Determine all of the local ip addresses for this machine
This allows us to flag traffic as inbound or outbound.
"""
result = set()
for ifaceName in interfaces():
try:
address = [i['addr'] for i in ifaddresses(ifaceName)[AF_INET]]
except:
pass
result.add(address[0])
return tuple(result)
def start(self):
if self.pc is not None:
raise Exception('Already listening.')
self.pc = PcapWrapper(self.interface, filters='tcp')
try:
self.pc.loop(self._on_packet)
except KeyboardInterrupt:
pass
if self.pc.human_stats is not None:
logging.info(self.pc.human_stats)
def _on_packet(self, pkt):
try:
self._handle_packet(pkt)
except Exception as e:
logging.exception(e)
def _parse_tcp_packet(self, data):
if len(data) == 0 or data is None:
return None
if data.startswith('GET') or data.startswith('POST'):
return None
try:
return dpkt.http.Reqsponse(data)
except:
pass
return None
def _handle_packet(self, pkt):
eth = dpkt.ethernet.Ethernet(pkt)
ip = eth.data
tcp = ip.data
data = tcp.data
is_outbound = ipaddr_string(ip.dst) not in self.local_ips
direction = 'outbound' if is_outbound else 'inbound'
connection_hash = hash_packet(eth, outbound=is_outbound)
if ipaddr_string(ip.src) in self.local_ips and ipaddr_string(ip.dst) in self.local_ips:
# ignore packets that exist only on this computer
return
# lets first check if this is a http request instead of a response
if data.startswith('GET') or data.startswith('POST'):
if is_outbound:
# ignore inbound http request
_msg = 'Detected an %s HTTP Request from %s to %s'
logging.debug(_msg % (direction, ipaddr_string(ip.src), ipaddr_string(ip.dst)))
self._handle_request(connection_hash, tcp)
elif not is_outbound:
stream = self.packet_streams.get(connection_hash)
if stream is not None:
self._handle_response(stream, tcp)
def _handle_request(self, connection_hash, tcp_pkt):
if http.has_complete_headers(tcp_pkt.data):
req = http.parse_request(tcp_pkt.data)
logging.debug('Request URL: %s' % req['host'] + req['path'])
logging.debug('Storing stream %s.' % connection_hash)
self.packet_streams[connection_hash] = TcpStream(connection_hash)
def _delete_stream(self, stream):
if stream.id in self.packet_streams:
del self.packet_streams[stream.id]
def _handle_response(self, stream, tcp_pkt):
had_headers = (stream.headers is not None)
stream.add_packet(tcp_pkt)
if not had_headers and stream.headers is not None:
# this will happen the first packet that contain http header
if self.mime_types is not None:
mime_type = stream.headers.get('content-type', '').split(';')[0].strip()
if mime_type not in self.mime_types:
logging.debug('Ignoring mime_type %s' % mime_type)
self._delete_stream(stream)
return
if stream.is_finished:
if stream.id not in self.packet_streams:
# probably just a retransmission
return
self._delete_stream(stream)
if stream.is_valid:
self._on_request_complete(stream)
else:
_msg = "Stream was invalid at %.1f%% with %i bytes loaded"
logging.error(_msg % (stream.progress * 100, stream.http_bytes_loaded))
if self.pc.human_stats is not None:
logging.info(self.pc.human_stats)
def _on_request_complete(self, stream):
headers = stream.headers
if headers is not None:
mime_type = headers.get('content-type')
_msg = "Successfully observed a file with %i bytes and mime-type %s"
logging.info(_msg % (stream.http_content_length, stream.headers.get('content-type', '')))
f = RawFile(stream.content, mime_type)
self._on_file_complete(f)
def _on_file_complete(self, f):
if self.on_file_complete is not None:
|
def iter_packets(iterable):
"""Sorts an iterable of packets and removes the duplicates"""
prev = None
for i in sorted(iterable, key=attrgetter('seq')):
if prev is None or prev.seq != i.seq:
prev = i
yield i
def hash_packet(eth, outbound=False):
"""Hashes a packet to determine the tcp stream it is part of """
ip = eth.data
tcp = ip.data
return '%s:%i' % (ipaddr_string(ip.dst if outbound else ip.src),
tcp.sport if outbound else tcp.dport
)
def parse_flags(flags):
result = []
for flag, name in ((ACK, 'ACK'), (SYN, 'SYN'), (PUSH, 'PUSH'), (RST, 'RST')):
if flags & flag:
result.append(name)
return result
class RawFile(object):
def __init__(self, content, mime_type):
self.content = content
self.mime_type = mime_type
class TcpStream(object):
def __init__(self, id):
self.id = id
self.buffer = {}
self.packets = None
self.base_seq = None
self.next_seq = None
self.header_data = ''
self.headers = None
self.is_http = None
self.is_finished = False
self.is_valid = True
self.http_content_length = None
self.http_bytes_loaded = 0
def add_packet(self, packet):
if self.is_finished:
return
#vals = []
#for k in ('ack', 'dport', 'flags', 'off', 'off_x2', 'seq', 'sport', 'sum', 'urp', 'win'):
# vals.append('%s=%s' % (k, getattr(packet, k)))
#vals.append(parse_flags(packet.flags))
if self.base_seq is None:
# we do not yet know the first seq
if packet.data.startswith('HTTP'):
self.is_http = True
self._on_first_packet(packet)
else:
self.buffer[packet.seq] = packet
else:
if packet.seq == self.next_seq:
# the exact next packet
self._on_next_packet(packet)
elif packet.seq < self.next_seq:
# retransmission
pass
else:
#out of order packet
self.buffer[packet.seq] = packet
# if the buffer grows to be > than 2K assume something went wrong
if len(self.buffer) > 2000:
self.is_finished = True
self.is_valid = False
logging.error("Packet buffer filled up")
def rel_seq(self, packet):
return packet.seq - self.base_seq
@property
def content(self):
return self.packets
@property
def progress(self):
if self.http_content_length is None:
return 0
if self.http_content_length in (0, self.http_bytes_loaded):
return 1
return float(self.http_bytes_loaded) / float(self.http_content_length)
def _on_first_packet(self, packet):
# check if this is actually the first packet
if self.base_seq is None:
self.base_seq = packet.seq
self.next_seq = packet.seq
self._on_next_packet(packet)
def _on_next_packet(self, packet):
self._append_packet(packet)
self._check_buffer()
def _check_buffer(self):
"""Looks in the buffer to see if we have the next packet, if so append
it and continue till there are no packets left.
"""
count = 0
for packet in self.remove_buffered_packets():
self._append_packet(packet)
count += 1
if count > 0:
logging.debug('Removed %i items from the buffer, %i left.' % (count, len(self.buffer)))
def remove_buffered_packets(self):
"""Iterates over next packets in the buffer and removes them"""
seq = self.next_seq
while True:
p = self.buffer.pop(seq, None)
if p is None:
break
else:
seq += len(p.data)
yield p
def _append_packet(self, packet):
"""Appends a packet to the end of the list of received packets and processes it"""
self.next_seq += len(packet.data)
if self.headers is not None:
if self.packets is None:
self.packets = StringIO.StringIO()
self.packets.write(packet.data)
self.http_bytes_loaded += len(packet.data)
else:
self.header_data += packet.data
# check if we have enough packets for the entire http header
if self.is_http and self.headers is None:
if http.has_complete_headers(self.header_data):
resp = http.parse_response(self.header_data)
self.header_data = None
if self.packets is None:
self.packets = StringIO.StringIO()
self.packets.write(resp['body'])
self.headers = resp['headers']
self.http_bytes_loaded = len(resp['body'])
self._on_http_headers()
# check if we have finished the request
if self.http_content_length is not None:
if self.http_content_length == self.http_bytes_loaded:
self.is_finished = True
elif self.http_content_length < self.http_bytes_loaded:
logging.error("Received data was longer than the content length header")
self.is_valid = False
self.is_finished = True
self._handle_ordered_packet(packet)
def _on_last_packet(self, packet):
self.is_finished = True
def _handle_ordered_packet(self, packet):
"""This will eventually provide a way for a callback receive packets in order"""
pass
def _on_http_headers(self):
content_length = self.headers.get('content-length', None)
if content_length is not None:
content_length = int(content_length)
self.http_content_length = content_length
| self.on_file_complete(f) | conditional_block |
network_listener.py | # -*- coding: utf-8 -*-
from operator import attrgetter
import logging
import StringIO
import struct
import dpkt
from netifaces import interfaces, ifaddresses, AF_INET
from sniff import PcapWrapper
import http
ACK = dpkt.tcp.TH_ACK
SYN = dpkt.tcp.TH_SYN
FIN = dpkt.tcp.TH_FIN
RST = dpkt.tcp.TH_RST
PUSH = dpkt.tcp.TH_PUSH
def ipaddr_string(addr):
return '.'.join(str(octet) for octet in struct.unpack('B' * len(addr), addr))
class NetworkFileListener(object):
def __init__(self, interface=None, mime_types=None):
self.pc = None
self.on_file_complete = None
self.packet_streams = {}
self.local_ips = self.detect_local_ips()
logging.info("Local IP Addresses: %s" % ', '.join(self.local_ips))
self.interface = interface
self.mime_types = mime_types
def detect_local_ips(self):
"""Determine all of the local ip addresses for this machine
This allows us to flag traffic as inbound or outbound.
"""
result = set()
for ifaceName in interfaces():
try:
address = [i['addr'] for i in ifaddresses(ifaceName)[AF_INET]]
except:
pass
result.add(address[0])
return tuple(result)
def start(self):
if self.pc is not None:
raise Exception('Already listening.')
self.pc = PcapWrapper(self.interface, filters='tcp')
try:
self.pc.loop(self._on_packet)
except KeyboardInterrupt:
pass
if self.pc.human_stats is not None:
logging.info(self.pc.human_stats)
def _on_packet(self, pkt):
try:
self._handle_packet(pkt)
except Exception as e:
logging.exception(e)
def _parse_tcp_packet(self, data):
if len(data) == 0 or data is None:
return None
if data.startswith('GET') or data.startswith('POST'):
return None
try:
return dpkt.http.Reqsponse(data)
except:
pass
return None
def _handle_packet(self, pkt):
eth = dpkt.ethernet.Ethernet(pkt)
ip = eth.data
tcp = ip.data
data = tcp.data
is_outbound = ipaddr_string(ip.dst) not in self.local_ips
direction = 'outbound' if is_outbound else 'inbound'
connection_hash = hash_packet(eth, outbound=is_outbound)
if ipaddr_string(ip.src) in self.local_ips and ipaddr_string(ip.dst) in self.local_ips:
# ignore packets that exist only on this computer
return
# lets first check if this is a http request instead of a response
if data.startswith('GET') or data.startswith('POST'):
if is_outbound:
# ignore inbound http request
_msg = 'Detected an %s HTTP Request from %s to %s'
logging.debug(_msg % (direction, ipaddr_string(ip.src), ipaddr_string(ip.dst)))
self._handle_request(connection_hash, tcp)
elif not is_outbound:
stream = self.packet_streams.get(connection_hash)
if stream is not None:
self._handle_response(stream, tcp)
def _handle_request(self, connection_hash, tcp_pkt):
if http.has_complete_headers(tcp_pkt.data):
req = http.parse_request(tcp_pkt.data)
logging.debug('Request URL: %s' % req['host'] + req['path'])
logging.debug('Storing stream %s.' % connection_hash)
self.packet_streams[connection_hash] = TcpStream(connection_hash)
def _delete_stream(self, stream):
if stream.id in self.packet_streams:
del self.packet_streams[stream.id]
def _handle_response(self, stream, tcp_pkt):
had_headers = (stream.headers is not None)
stream.add_packet(tcp_pkt)
if not had_headers and stream.headers is not None:
# this will happen the first packet that contain http header
if self.mime_types is not None:
mime_type = stream.headers.get('content-type', '').split(';')[0].strip()
if mime_type not in self.mime_types:
logging.debug('Ignoring mime_type %s' % mime_type)
self._delete_stream(stream)
return
if stream.is_finished:
if stream.id not in self.packet_streams:
# probably just a retransmission
return
self._delete_stream(stream)
if stream.is_valid:
self._on_request_complete(stream)
else:
_msg = "Stream was invalid at %.1f%% with %i bytes loaded"
logging.error(_msg % (stream.progress * 100, stream.http_bytes_loaded))
if self.pc.human_stats is not None:
logging.info(self.pc.human_stats)
def _on_request_complete(self, stream):
headers = stream.headers
if headers is not None:
mime_type = headers.get('content-type')
_msg = "Successfully observed a file with %i bytes and mime-type %s"
logging.info(_msg % (stream.http_content_length, stream.headers.get('content-type', '')))
f = RawFile(stream.content, mime_type)
self._on_file_complete(f)
def _on_file_complete(self, f):
if self.on_file_complete is not None:
self.on_file_complete(f)
def iter_packets(iterable):
"""Sorts an iterable of packets and removes the duplicates"""
prev = None
for i in sorted(iterable, key=attrgetter('seq')):
if prev is None or prev.seq != i.seq:
prev = i
yield i
def hash_packet(eth, outbound=False):
"""Hashes a packet to determine the tcp stream it is part of """
ip = eth.data
tcp = ip.data
return '%s:%i' % (ipaddr_string(ip.dst if outbound else ip.src),
tcp.sport if outbound else tcp.dport
)
def parse_flags(flags):
|
class RawFile(object):
def __init__(self, content, mime_type):
self.content = content
self.mime_type = mime_type
class TcpStream(object):
def __init__(self, id):
self.id = id
self.buffer = {}
self.packets = None
self.base_seq = None
self.next_seq = None
self.header_data = ''
self.headers = None
self.is_http = None
self.is_finished = False
self.is_valid = True
self.http_content_length = None
self.http_bytes_loaded = 0
def add_packet(self, packet):
if self.is_finished:
return
#vals = []
#for k in ('ack', 'dport', 'flags', 'off', 'off_x2', 'seq', 'sport', 'sum', 'urp', 'win'):
# vals.append('%s=%s' % (k, getattr(packet, k)))
#vals.append(parse_flags(packet.flags))
if self.base_seq is None:
# we do not yet know the first seq
if packet.data.startswith('HTTP'):
self.is_http = True
self._on_first_packet(packet)
else:
self.buffer[packet.seq] = packet
else:
if packet.seq == self.next_seq:
# the exact next packet
self._on_next_packet(packet)
elif packet.seq < self.next_seq:
# retransmission
pass
else:
#out of order packet
self.buffer[packet.seq] = packet
# if the buffer grows to be > than 2K assume something went wrong
if len(self.buffer) > 2000:
self.is_finished = True
self.is_valid = False
logging.error("Packet buffer filled up")
def rel_seq(self, packet):
return packet.seq - self.base_seq
@property
def content(self):
return self.packets
@property
def progress(self):
if self.http_content_length is None:
return 0
if self.http_content_length in (0, self.http_bytes_loaded):
return 1
return float(self.http_bytes_loaded) / float(self.http_content_length)
def _on_first_packet(self, packet):
# check if this is actually the first packet
if self.base_seq is None:
self.base_seq = packet.seq
self.next_seq = packet.seq
self._on_next_packet(packet)
def _on_next_packet(self, packet):
self._append_packet(packet)
self._check_buffer()
def _check_buffer(self):
"""Looks in the buffer to see if we have the next packet, if so append
it and continue till there are no packets left.
"""
count = 0
for packet in self.remove_buffered_packets():
self._append_packet(packet)
count += 1
if count > 0:
logging.debug('Removed %i items from the buffer, %i left.' % (count, len(self.buffer)))
def remove_buffered_packets(self):
"""Iterates over next packets in the buffer and removes them"""
seq = self.next_seq
while True:
p = self.buffer.pop(seq, None)
if p is None:
break
else:
seq += len(p.data)
yield p
def _append_packet(self, packet):
"""Appends a packet to the end of the list of received packets and processes it"""
self.next_seq += len(packet.data)
if self.headers is not None:
if self.packets is None:
self.packets = StringIO.StringIO()
self.packets.write(packet.data)
self.http_bytes_loaded += len(packet.data)
else:
self.header_data += packet.data
# check if we have enough packets for the entire http header
if self.is_http and self.headers is None:
if http.has_complete_headers(self.header_data):
resp = http.parse_response(self.header_data)
self.header_data = None
if self.packets is None:
self.packets = StringIO.StringIO()
self.packets.write(resp['body'])
self.headers = resp['headers']
self.http_bytes_loaded = len(resp['body'])
self._on_http_headers()
# check if we have finished the request
if self.http_content_length is not None:
if self.http_content_length == self.http_bytes_loaded:
self.is_finished = True
elif self.http_content_length < self.http_bytes_loaded:
logging.error("Received data was longer than the content length header")
self.is_valid = False
self.is_finished = True
self._handle_ordered_packet(packet)
def _on_last_packet(self, packet):
self.is_finished = True
def _handle_ordered_packet(self, packet):
"""This will eventually provide a way for a callback receive packets in order"""
pass
def _on_http_headers(self):
content_length = self.headers.get('content-length', None)
if content_length is not None:
content_length = int(content_length)
self.http_content_length = content_length
| result = []
for flag, name in ((ACK, 'ACK'), (SYN, 'SYN'), (PUSH, 'PUSH'), (RST, 'RST')):
if flags & flag:
result.append(name)
return result | identifier_body |
network_listener.py | # -*- coding: utf-8 -*-
from operator import attrgetter
import logging
import StringIO
import struct
import dpkt
from netifaces import interfaces, ifaddresses, AF_INET
from sniff import PcapWrapper
import http
ACK = dpkt.tcp.TH_ACK
SYN = dpkt.tcp.TH_SYN
FIN = dpkt.tcp.TH_FIN
RST = dpkt.tcp.TH_RST
PUSH = dpkt.tcp.TH_PUSH
def ipaddr_string(addr):
return '.'.join(str(octet) for octet in struct.unpack('B' * len(addr), addr))
class NetworkFileListener(object):
def __init__(self, interface=None, mime_types=None):
self.pc = None
self.on_file_complete = None
self.packet_streams = {}
self.local_ips = self.detect_local_ips()
logging.info("Local IP Addresses: %s" % ', '.join(self.local_ips))
self.interface = interface
self.mime_types = mime_types
def detect_local_ips(self):
"""Determine all of the local ip addresses for this machine
This allows us to flag traffic as inbound or outbound.
"""
result = set()
for ifaceName in interfaces():
try:
address = [i['addr'] for i in ifaddresses(ifaceName)[AF_INET]]
except:
pass
result.add(address[0])
return tuple(result)
def start(self):
if self.pc is not None:
raise Exception('Already listening.')
self.pc = PcapWrapper(self.interface, filters='tcp')
try:
self.pc.loop(self._on_packet)
except KeyboardInterrupt:
pass
if self.pc.human_stats is not None:
logging.info(self.pc.human_stats)
def _on_packet(self, pkt):
try:
self._handle_packet(pkt)
except Exception as e:
logging.exception(e)
def _parse_tcp_packet(self, data):
if len(data) == 0 or data is None:
return None
if data.startswith('GET') or data.startswith('POST'):
return None
try:
return dpkt.http.Reqsponse(data)
except:
pass
return None
def _handle_packet(self, pkt):
eth = dpkt.ethernet.Ethernet(pkt)
ip = eth.data
tcp = ip.data
data = tcp.data
is_outbound = ipaddr_string(ip.dst) not in self.local_ips
direction = 'outbound' if is_outbound else 'inbound'
connection_hash = hash_packet(eth, outbound=is_outbound)
if ipaddr_string(ip.src) in self.local_ips and ipaddr_string(ip.dst) in self.local_ips:
# ignore packets that exist only on this computer
return
# lets first check if this is a http request instead of a response
if data.startswith('GET') or data.startswith('POST'):
if is_outbound:
# ignore inbound http request
_msg = 'Detected an %s HTTP Request from %s to %s'
logging.debug(_msg % (direction, ipaddr_string(ip.src), ipaddr_string(ip.dst)))
self._handle_request(connection_hash, tcp)
elif not is_outbound:
stream = self.packet_streams.get(connection_hash)
if stream is not None:
self._handle_response(stream, tcp)
def _handle_request(self, connection_hash, tcp_pkt):
if http.has_complete_headers(tcp_pkt.data):
req = http.parse_request(tcp_pkt.data)
logging.debug('Request URL: %s' % req['host'] + req['path'])
logging.debug('Storing stream %s.' % connection_hash)
self.packet_streams[connection_hash] = TcpStream(connection_hash)
def | (self, stream):
if stream.id in self.packet_streams:
del self.packet_streams[stream.id]
def _handle_response(self, stream, tcp_pkt):
had_headers = (stream.headers is not None)
stream.add_packet(tcp_pkt)
if not had_headers and stream.headers is not None:
# this will happen the first packet that contain http header
if self.mime_types is not None:
mime_type = stream.headers.get('content-type', '').split(';')[0].strip()
if mime_type not in self.mime_types:
logging.debug('Ignoring mime_type %s' % mime_type)
self._delete_stream(stream)
return
if stream.is_finished:
if stream.id not in self.packet_streams:
# probably just a retransmission
return
self._delete_stream(stream)
if stream.is_valid:
self._on_request_complete(stream)
else:
_msg = "Stream was invalid at %.1f%% with %i bytes loaded"
logging.error(_msg % (stream.progress * 100, stream.http_bytes_loaded))
if self.pc.human_stats is not None:
logging.info(self.pc.human_stats)
def _on_request_complete(self, stream):
headers = stream.headers
if headers is not None:
mime_type = headers.get('content-type')
_msg = "Successfully observed a file with %i bytes and mime-type %s"
logging.info(_msg % (stream.http_content_length, stream.headers.get('content-type', '')))
f = RawFile(stream.content, mime_type)
self._on_file_complete(f)
def _on_file_complete(self, f):
if self.on_file_complete is not None:
self.on_file_complete(f)
def iter_packets(iterable):
"""Sorts an iterable of packets and removes the duplicates"""
prev = None
for i in sorted(iterable, key=attrgetter('seq')):
if prev is None or prev.seq != i.seq:
prev = i
yield i
def hash_packet(eth, outbound=False):
"""Hashes a packet to determine the tcp stream it is part of """
ip = eth.data
tcp = ip.data
return '%s:%i' % (ipaddr_string(ip.dst if outbound else ip.src),
tcp.sport if outbound else tcp.dport
)
def parse_flags(flags):
result = []
for flag, name in ((ACK, 'ACK'), (SYN, 'SYN'), (PUSH, 'PUSH'), (RST, 'RST')):
if flags & flag:
result.append(name)
return result
class RawFile(object):
def __init__(self, content, mime_type):
self.content = content
self.mime_type = mime_type
class TcpStream(object):
def __init__(self, id):
self.id = id
self.buffer = {}
self.packets = None
self.base_seq = None
self.next_seq = None
self.header_data = ''
self.headers = None
self.is_http = None
self.is_finished = False
self.is_valid = True
self.http_content_length = None
self.http_bytes_loaded = 0
def add_packet(self, packet):
if self.is_finished:
return
#vals = []
#for k in ('ack', 'dport', 'flags', 'off', 'off_x2', 'seq', 'sport', 'sum', 'urp', 'win'):
# vals.append('%s=%s' % (k, getattr(packet, k)))
#vals.append(parse_flags(packet.flags))
if self.base_seq is None:
# we do not yet know the first seq
if packet.data.startswith('HTTP'):
self.is_http = True
self._on_first_packet(packet)
else:
self.buffer[packet.seq] = packet
else:
if packet.seq == self.next_seq:
# the exact next packet
self._on_next_packet(packet)
elif packet.seq < self.next_seq:
# retransmission
pass
else:
#out of order packet
self.buffer[packet.seq] = packet
# if the buffer grows to be > than 2K assume something went wrong
if len(self.buffer) > 2000:
self.is_finished = True
self.is_valid = False
logging.error("Packet buffer filled up")
def rel_seq(self, packet):
return packet.seq - self.base_seq
@property
def content(self):
return self.packets
@property
def progress(self):
if self.http_content_length is None:
return 0
if self.http_content_length in (0, self.http_bytes_loaded):
return 1
return float(self.http_bytes_loaded) / float(self.http_content_length)
def _on_first_packet(self, packet):
# check if this is actually the first packet
if self.base_seq is None:
self.base_seq = packet.seq
self.next_seq = packet.seq
self._on_next_packet(packet)
def _on_next_packet(self, packet):
self._append_packet(packet)
self._check_buffer()
def _check_buffer(self):
"""Looks in the buffer to see if we have the next packet, if so append
it and continue till there are no packets left.
"""
count = 0
for packet in self.remove_buffered_packets():
self._append_packet(packet)
count += 1
if count > 0:
logging.debug('Removed %i items from the buffer, %i left.' % (count, len(self.buffer)))
def remove_buffered_packets(self):
"""Iterates over next packets in the buffer and removes them"""
seq = self.next_seq
while True:
p = self.buffer.pop(seq, None)
if p is None:
break
else:
seq += len(p.data)
yield p
def _append_packet(self, packet):
"""Appends a packet to the end of the list of received packets and processes it"""
self.next_seq += len(packet.data)
if self.headers is not None:
if self.packets is None:
self.packets = StringIO.StringIO()
self.packets.write(packet.data)
self.http_bytes_loaded += len(packet.data)
else:
self.header_data += packet.data
# check if we have enough packets for the entire http header
if self.is_http and self.headers is None:
if http.has_complete_headers(self.header_data):
resp = http.parse_response(self.header_data)
self.header_data = None
if self.packets is None:
self.packets = StringIO.StringIO()
self.packets.write(resp['body'])
self.headers = resp['headers']
self.http_bytes_loaded = len(resp['body'])
self._on_http_headers()
# check if we have finished the request
if self.http_content_length is not None:
if self.http_content_length == self.http_bytes_loaded:
self.is_finished = True
elif self.http_content_length < self.http_bytes_loaded:
logging.error("Received data was longer than the content length header")
self.is_valid = False
self.is_finished = True
self._handle_ordered_packet(packet)
def _on_last_packet(self, packet):
self.is_finished = True
def _handle_ordered_packet(self, packet):
"""This will eventually provide a way for a callback receive packets in order"""
pass
def _on_http_headers(self):
content_length = self.headers.get('content-length', None)
if content_length is not None:
content_length = int(content_length)
self.http_content_length = content_length
| _delete_stream | identifier_name |
network_listener.py | # -*- coding: utf-8 -*-
from operator import attrgetter
import logging
import StringIO
import struct
import dpkt
from netifaces import interfaces, ifaddresses, AF_INET
from sniff import PcapWrapper
import http
ACK = dpkt.tcp.TH_ACK
SYN = dpkt.tcp.TH_SYN
FIN = dpkt.tcp.TH_FIN
RST = dpkt.tcp.TH_RST
PUSH = dpkt.tcp.TH_PUSH
def ipaddr_string(addr):
return '.'.join(str(octet) for octet in struct.unpack('B' * len(addr), addr))
class NetworkFileListener(object):
def __init__(self, interface=None, mime_types=None):
self.pc = None
self.on_file_complete = None
self.packet_streams = {}
self.local_ips = self.detect_local_ips()
logging.info("Local IP Addresses: %s" % ', '.join(self.local_ips))
self.interface = interface
self.mime_types = mime_types
def detect_local_ips(self):
"""Determine all of the local ip addresses for this machine
This allows us to flag traffic as inbound or outbound.
"""
result = set()
for ifaceName in interfaces():
try:
address = [i['addr'] for i in ifaddresses(ifaceName)[AF_INET]]
except:
pass
result.add(address[0])
return tuple(result)
def start(self):
if self.pc is not None:
raise Exception('Already listening.')
self.pc = PcapWrapper(self.interface, filters='tcp')
try:
self.pc.loop(self._on_packet)
except KeyboardInterrupt:
pass
if self.pc.human_stats is not None:
logging.info(self.pc.human_stats)
def _on_packet(self, pkt):
try:
self._handle_packet(pkt)
except Exception as e:
logging.exception(e)
def _parse_tcp_packet(self, data):
if len(data) == 0 or data is None:
return None
if data.startswith('GET') or data.startswith('POST'):
return None
try:
return dpkt.http.Reqsponse(data)
except:
pass
return None
def _handle_packet(self, pkt):
eth = dpkt.ethernet.Ethernet(pkt)
ip = eth.data
tcp = ip.data
data = tcp.data
is_outbound = ipaddr_string(ip.dst) not in self.local_ips
direction = 'outbound' if is_outbound else 'inbound'
connection_hash = hash_packet(eth, outbound=is_outbound)
if ipaddr_string(ip.src) in self.local_ips and ipaddr_string(ip.dst) in self.local_ips:
# ignore packets that exist only on this computer
return
# lets first check if this is a http request instead of a response
if data.startswith('GET') or data.startswith('POST'):
if is_outbound:
# ignore inbound http request
_msg = 'Detected an %s HTTP Request from %s to %s'
logging.debug(_msg % (direction, ipaddr_string(ip.src), ipaddr_string(ip.dst)))
self._handle_request(connection_hash, tcp)
elif not is_outbound:
stream = self.packet_streams.get(connection_hash)
if stream is not None:
self._handle_response(stream, tcp)
def _handle_request(self, connection_hash, tcp_pkt):
if http.has_complete_headers(tcp_pkt.data):
req = http.parse_request(tcp_pkt.data)
logging.debug('Request URL: %s' % req['host'] + req['path'])
logging.debug('Storing stream %s.' % connection_hash)
self.packet_streams[connection_hash] = TcpStream(connection_hash)
def _delete_stream(self, stream):
if stream.id in self.packet_streams:
del self.packet_streams[stream.id]
def _handle_response(self, stream, tcp_pkt):
had_headers = (stream.headers is not None)
stream.add_packet(tcp_pkt)
if not had_headers and stream.headers is not None:
# this will happen the first packet that contain http header
if self.mime_types is not None:
mime_type = stream.headers.get('content-type', '').split(';')[0].strip()
if mime_type not in self.mime_types:
logging.debug('Ignoring mime_type %s' % mime_type)
self._delete_stream(stream)
return
if stream.is_finished:
if stream.id not in self.packet_streams:
# probably just a retransmission
return
self._delete_stream(stream)
if stream.is_valid:
self._on_request_complete(stream)
else:
_msg = "Stream was invalid at %.1f%% with %i bytes loaded"
logging.error(_msg % (stream.progress * 100, stream.http_bytes_loaded))
if self.pc.human_stats is not None:
logging.info(self.pc.human_stats)
def _on_request_complete(self, stream):
headers = stream.headers
if headers is not None:
mime_type = headers.get('content-type')
_msg = "Successfully observed a file with %i bytes and mime-type %s"
logging.info(_msg % (stream.http_content_length, stream.headers.get('content-type', '')))
f = RawFile(stream.content, mime_type)
self._on_file_complete(f)
def _on_file_complete(self, f):
if self.on_file_complete is not None:
self.on_file_complete(f)
def iter_packets(iterable):
"""Sorts an iterable of packets and removes the duplicates"""
prev = None
for i in sorted(iterable, key=attrgetter('seq')):
if prev is None or prev.seq != i.seq:
prev = i
yield i
def hash_packet(eth, outbound=False):
"""Hashes a packet to determine the tcp stream it is part of """
ip = eth.data
tcp = ip.data
return '%s:%i' % (ipaddr_string(ip.dst if outbound else ip.src),
tcp.sport if outbound else tcp.dport
)
def parse_flags(flags):
result = []
for flag, name in ((ACK, 'ACK'), (SYN, 'SYN'), (PUSH, 'PUSH'), (RST, 'RST')):
if flags & flag:
result.append(name)
return result
class RawFile(object):
def __init__(self, content, mime_type):
self.content = content
self.mime_type = mime_type
class TcpStream(object):
def __init__(self, id):
self.id = id
self.buffer = {}
self.packets = None
self.base_seq = None
self.next_seq = None
self.header_data = ''
self.headers = None
self.is_http = None
self.is_finished = False
self.is_valid = True
self.http_content_length = None
self.http_bytes_loaded = 0
def add_packet(self, packet):
if self.is_finished:
return
#vals = []
#for k in ('ack', 'dport', 'flags', 'off', 'off_x2', 'seq', 'sport', 'sum', 'urp', 'win'):
# vals.append('%s=%s' % (k, getattr(packet, k)))
#vals.append(parse_flags(packet.flags))
if self.base_seq is None:
# we do not yet know the first seq
if packet.data.startswith('HTTP'):
self.is_http = True
self._on_first_packet(packet)
else:
self.buffer[packet.seq] = packet
else:
if packet.seq == self.next_seq:
# the exact next packet
self._on_next_packet(packet)
elif packet.seq < self.next_seq:
# retransmission
pass
else:
#out of order packet
self.buffer[packet.seq] = packet
# if the buffer grows to be > than 2K assume something went wrong
if len(self.buffer) > 2000:
self.is_finished = True
self.is_valid = False
logging.error("Packet buffer filled up") |
@property
def content(self):
return self.packets
@property
def progress(self):
if self.http_content_length is None:
return 0
if self.http_content_length in (0, self.http_bytes_loaded):
return 1
return float(self.http_bytes_loaded) / float(self.http_content_length)
def _on_first_packet(self, packet):
# check if this is actually the first packet
if self.base_seq is None:
self.base_seq = packet.seq
self.next_seq = packet.seq
self._on_next_packet(packet)
def _on_next_packet(self, packet):
self._append_packet(packet)
self._check_buffer()
def _check_buffer(self):
"""Looks in the buffer to see if we have the next packet, if so append
it and continue till there are no packets left.
"""
count = 0
for packet in self.remove_buffered_packets():
self._append_packet(packet)
count += 1
if count > 0:
logging.debug('Removed %i items from the buffer, %i left.' % (count, len(self.buffer)))
def remove_buffered_packets(self):
"""Iterates over next packets in the buffer and removes them"""
seq = self.next_seq
while True:
p = self.buffer.pop(seq, None)
if p is None:
break
else:
seq += len(p.data)
yield p
def _append_packet(self, packet):
"""Appends a packet to the end of the list of received packets and processes it"""
self.next_seq += len(packet.data)
if self.headers is not None:
if self.packets is None:
self.packets = StringIO.StringIO()
self.packets.write(packet.data)
self.http_bytes_loaded += len(packet.data)
else:
self.header_data += packet.data
# check if we have enough packets for the entire http header
if self.is_http and self.headers is None:
if http.has_complete_headers(self.header_data):
resp = http.parse_response(self.header_data)
self.header_data = None
if self.packets is None:
self.packets = StringIO.StringIO()
self.packets.write(resp['body'])
self.headers = resp['headers']
self.http_bytes_loaded = len(resp['body'])
self._on_http_headers()
# check if we have finished the request
if self.http_content_length is not None:
if self.http_content_length == self.http_bytes_loaded:
self.is_finished = True
elif self.http_content_length < self.http_bytes_loaded:
logging.error("Received data was longer than the content length header")
self.is_valid = False
self.is_finished = True
self._handle_ordered_packet(packet)
def _on_last_packet(self, packet):
self.is_finished = True
def _handle_ordered_packet(self, packet):
"""This will eventually provide a way for a callback receive packets in order"""
pass
def _on_http_headers(self):
content_length = self.headers.get('content-length', None)
if content_length is not None:
content_length = int(content_length)
self.http_content_length = content_length |
def rel_seq(self, packet):
return packet.seq - self.base_seq | random_line_split |
wpa_controller.go | // Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
//go:build kubeapiserver
package apiserver
import (
"context"
"math"
"time"
dynamic_client "k8s.io/client-go/dynamic"
dynamic_informer "k8s.io/client-go/dynamic/dynamicinformer"
apis_v1alpha1 "github.com/DataDog/watermarkpodautoscaler/api/v1alpha1"
"github.com/cenkalti/backoff"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"github.com/DataDog/datadog-agent/pkg/clusteragent/custommetrics"
"github.com/DataDog/datadog-agent/pkg/errors"
"github.com/DataDog/datadog-agent/pkg/util/kubernetes/autoscalers"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
const (
crdCheckInitialInterval = time.Second * 5
crdCheckMaxInterval = 5 * time.Minute
crdCheckMultiplier = 2.0
crdCheckMaxElapsedTime = 0
)
var gvrWPA = apis_v1alpha1.GroupVersion.WithResource("watermarkpodautoscalers")
// RunWPA starts the controller to process events about Watermark Pod Autoscalers
func (h *AutoscalersController) RunWPA(stopCh <-chan struct{}, wpaClient dynamic_client.Interface, wpaInformerFactory dynamic_informer.DynamicSharedInformerFactory) {
waitForWPACRD(wpaClient)
// mutate the Autoscaler controller to embed an informer against the WPAs
if err := h.enableWPA(wpaInformerFactory); err != nil {
log.Errorf("impossible to enable WPQ: %v", err)
return
}
defer h.WPAqueue.ShutDown()
log.Infof("Starting WPA Controller ... ")
defer log.Infof("Stopping WPA Controller")
wpaInformerFactory.Start(stopCh)
if !cache.WaitForCacheSync(stopCh, h.wpaListerSynced) {
return
}
wait.Until(h.workerWPA, time.Second, stopCh)
}
type checkAPI func() error
func tryCheckWPACRD(check checkAPI) error {
if err := check(); err != nil {
// Check if this is a known problem of missing CRD registration
if isWPACRDNotFoundError(err) {
return err
}
// In all other cases return a permanent error to prevent from retrying
log.Errorf("WPA CRD check failed: not retryable: %s", err)
return backoff.Permanent(err)
}
log.Info("WPA CRD check successful")
return nil
}
func notifyCheckWPACRD() backoff.Notify {
attempt := 0
return func(err error, delay time.Duration) {
attempt++
mins := int(delay.Minutes())
secs := int(math.Mod(delay.Seconds(), 60))
log.Warnf("WPA CRD missing (attempt=%d): will retry in %dm%ds", attempt, mins, secs)
}
}
func isWPACRDNotFoundError(err error) bool {
status, ok := err.(*apierrors.StatusError)
if !ok {
return false
}
reason := status.Status().Reason
details := status.Status().Details
return reason == v1.StatusReasonNotFound &&
details.Group == apis_v1alpha1.SchemeGroupVersion.Group &&
details.Kind == "watermarkpodautoscalers"
}
func checkWPACRD(wpaClient dynamic_client.Interface) backoff.Operation {
check := func() error {
_, err := wpaClient.Resource(gvrWPA).List(context.TODO(), v1.ListOptions{})
return err
}
return func() error {
return tryCheckWPACRD(check)
}
}
func waitForWPACRD(wpaClient dynamic_client.Interface) {
exp := &backoff.ExponentialBackOff{
InitialInterval: crdCheckInitialInterval,
RandomizationFactor: 0,
Multiplier: crdCheckMultiplier,
MaxInterval: crdCheckMaxInterval,
MaxElapsedTime: crdCheckMaxElapsedTime,
Clock: backoff.SystemClock,
}
exp.Reset()
_ = backoff.RetryNotify(checkWPACRD(wpaClient), exp, notifyCheckWPACRD())
}
// enableWPA adds the handlers to the AutoscalersController to support WPAs
func (h *AutoscalersController) enableWPA(wpaInformerFactory dynamic_informer.DynamicSharedInformerFactory) error {
log.Info("Enabling WPA controller")
genericInformer := wpaInformerFactory.ForResource(gvrWPA)
h.WPAqueue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter(), "wpa-autoscalers")
h.wpaLister = genericInformer.Lister()
h.wpaListerSynced = genericInformer.Informer().HasSynced
if _, err := genericInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: h.addWPAutoscaler,
UpdateFunc: h.updateWPAutoscaler,
DeleteFunc: h.deleteWPAutoscaler,
},
); err != nil {
return err
}
h.mu.Lock()
defer h.mu.Unlock()
h.wpaEnabled = true
return nil
}
func (h *AutoscalersController) | () bool {
h.mu.Lock()
defer h.mu.Unlock()
return h.wpaEnabled
}
func (h *AutoscalersController) workerWPA() {
for h.processNextWPA() {
}
}
func (h *AutoscalersController) processNextWPA() bool {
key, quit := h.WPAqueue.Get()
if quit {
log.Error("WPA controller HPAqueue is shutting down, stopping processing")
return false
}
log.Tracef("Processing %s", key)
defer h.WPAqueue.Done(key)
err := h.syncWPA(key)
h.handleErr(err, key)
// Debug output for unit tests only
if h.autoscalers != nil {
h.autoscalers <- key
}
return true
}
func (h *AutoscalersController) syncWPA(key interface{}) error {
h.mu.Lock()
defer h.mu.Unlock()
ns, name, err := cache.SplitMetaNamespaceKey(key.(string))
if err != nil {
log.Errorf("Could not split the key: %v", err)
return err
}
wpaCachedObj, err := h.wpaLister.ByNamespace(ns).Get(name)
if err != nil {
log.Errorf("Could not retrieve key %s from cache: %v", key, err)
return err
}
wpaCached := &apis_v1alpha1.WatermarkPodAutoscaler{}
err = UnstructuredIntoWPA(wpaCachedObj, wpaCached)
if err != nil {
log.Errorf("Could not cast wpa %s retrieved from cache to wpa structure: %v", key, err)
return err
}
switch {
case errors.IsNotFound(err):
log.Infof("WatermarkPodAutoscaler %v has been deleted but was not caught in the EventHandler. GC will cleanup.", key)
case err != nil:
log.Errorf("Unable to retrieve WatermarkPodAutoscaler %v from store: %v", key, err)
default:
if wpaCached == nil {
log.Errorf("Could not parse empty wpa %s/%s from local store", ns, name)
return ErrIsEmpty
}
emList := autoscalers.InspectWPA(wpaCached)
if len(emList) == 0 {
return nil
}
newMetrics := h.hpaProc.ProcessEMList(emList)
h.toStore.m.Lock()
for metric, value := range newMetrics {
// We should only insert placeholders in the local cache.
h.toStore.data[metric] = value
}
h.toStore.m.Unlock()
log.Tracef("Local batch cache of WPA is %v", h.toStore.data)
}
return err
}
func (h *AutoscalersController) addWPAutoscaler(obj interface{}) {
newAutoscaler := &apis_v1alpha1.WatermarkPodAutoscaler{}
if err := UnstructuredIntoWPA(obj, newAutoscaler); err != nil {
log.Errorf("Unable to cast obj %s to a WPA: %v", obj, err)
return
}
log.Debugf("Adding WPA %s/%s", newAutoscaler.Namespace, newAutoscaler.Name)
h.EventRecorder.Event(newAutoscaler.DeepCopyObject(), corev1.EventTypeNormal, autoscalerNowHandleMsgEvent, "")
h.enqueueWPA(newAutoscaler)
}
func (h *AutoscalersController) updateWPAutoscaler(old, obj interface{}) {
newAutoscaler := &apis_v1alpha1.WatermarkPodAutoscaler{}
if err := UnstructuredIntoWPA(obj, newAutoscaler); err != nil {
log.Errorf("Unable to cast obj %s to a WPA: %v", obj, err)
return
}
oldAutoscaler := &apis_v1alpha1.WatermarkPodAutoscaler{}
if err := UnstructuredIntoWPA(obj, oldAutoscaler); err != nil {
log.Errorf("Unable to cast obj %s to a WPA: %v", obj, err)
h.enqueueWPA(newAutoscaler) // We still want to enqueue the newAutoscaler to get the new change
return
}
if !autoscalers.AutoscalerMetricsUpdate(newAutoscaler.GetObjectMeta(), oldAutoscaler.GetObjectMeta()) {
log.Tracef("Update received for the %s/%s, without a relevant change to the configuration", newAutoscaler.Namespace, newAutoscaler.Name)
return
}
// Need to delete the old object from the local cache. If the labels have changed, the syncAutoscaler would not override the old key.
toDelete := autoscalers.InspectWPA(oldAutoscaler)
h.deleteFromLocalStore(toDelete)
log.Tracef("Processing update event for wpa %s/%s with configuration: %s", newAutoscaler.Namespace, newAutoscaler.Name, newAutoscaler.Annotations)
h.enqueueWPA(newAutoscaler)
}
// Processing the Delete Events in the Eventhandler as obj is deleted from the local store thereafter.
// Only here can we retrieve the content of the WPA to properly process and delete it.
// FIXME we could have an update in the WPAqueue while processing the deletion, we should make
// sure we process them in order instead. For now, the gc logic allows us to recover.
func (h *AutoscalersController) deleteWPAutoscaler(obj interface{}) {
h.mu.Lock()
defer h.mu.Unlock()
toDelete := &custommetrics.MetricsBundle{}
deletedWPA := &apis_v1alpha1.WatermarkPodAutoscaler{}
if err := UnstructuredIntoWPA(obj, deletedWPA); err == nil {
toDelete.External = autoscalers.InspectWPA(deletedWPA)
h.deleteFromLocalStore(toDelete.External)
log.Debugf("Deleting %s/%s from the local cache", deletedWPA.Namespace, deletedWPA.Name)
if !h.isLeaderFunc() {
return
}
log.Infof("Deleting entries of metrics from Ref %s/%s in the Global Store", deletedWPA.Namespace, deletedWPA.Name)
if err := h.store.DeleteExternalMetricValues(toDelete); err != nil {
h.enqueueWPA(deletedWPA)
}
return
}
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Errorf("Could not get object from tombstone %#v", obj)
return
}
if err := UnstructuredIntoWPA(tombstone, deletedWPA); err != nil {
log.Errorf("Tombstone contained object that is not an Autoscaler: %#v", obj)
return
}
log.Debugf("Deleting Metrics from WPA %s/%s", deletedWPA.Namespace, deletedWPA.Name)
toDelete.External = autoscalers.InspectWPA(deletedWPA)
log.Debugf("Deleting %s/%s from the local cache", deletedWPA.Namespace, deletedWPA.Name)
h.deleteFromLocalStore(toDelete.External)
if err := h.store.DeleteExternalMetricValues(toDelete); err != nil {
h.enqueueWPA(deletedWPA)
return
}
}
| isWPAEnabled | identifier_name |
wpa_controller.go | // Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
//go:build kubeapiserver
package apiserver
import (
"context"
"math"
"time"
dynamic_client "k8s.io/client-go/dynamic"
dynamic_informer "k8s.io/client-go/dynamic/dynamicinformer"
apis_v1alpha1 "github.com/DataDog/watermarkpodautoscaler/api/v1alpha1"
"github.com/cenkalti/backoff"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"github.com/DataDog/datadog-agent/pkg/clusteragent/custommetrics"
"github.com/DataDog/datadog-agent/pkg/errors"
"github.com/DataDog/datadog-agent/pkg/util/kubernetes/autoscalers"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
const (
crdCheckInitialInterval = time.Second * 5
crdCheckMaxInterval = 5 * time.Minute
crdCheckMultiplier = 2.0
crdCheckMaxElapsedTime = 0
)
var gvrWPA = apis_v1alpha1.GroupVersion.WithResource("watermarkpodautoscalers")
// RunWPA starts the controller to process events about Watermark Pod Autoscalers
func (h *AutoscalersController) RunWPA(stopCh <-chan struct{}, wpaClient dynamic_client.Interface, wpaInformerFactory dynamic_informer.DynamicSharedInformerFactory) {
waitForWPACRD(wpaClient)
// mutate the Autoscaler controller to embed an informer against the WPAs
if err := h.enableWPA(wpaInformerFactory); err != nil {
log.Errorf("impossible to enable WPQ: %v", err)
return
}
defer h.WPAqueue.ShutDown()
log.Infof("Starting WPA Controller ... ")
defer log.Infof("Stopping WPA Controller")
wpaInformerFactory.Start(stopCh)
if !cache.WaitForCacheSync(stopCh, h.wpaListerSynced) {
return
}
wait.Until(h.workerWPA, time.Second, stopCh)
}
type checkAPI func() error
func tryCheckWPACRD(check checkAPI) error {
if err := check(); err != nil {
// Check if this is a known problem of missing CRD registration
if isWPACRDNotFoundError(err) {
return err
}
// In all other cases return a permanent error to prevent from retrying
log.Errorf("WPA CRD check failed: not retryable: %s", err)
return backoff.Permanent(err)
}
log.Info("WPA CRD check successful")
return nil
}
func notifyCheckWPACRD() backoff.Notify {
attempt := 0
return func(err error, delay time.Duration) {
attempt++
mins := int(delay.Minutes())
secs := int(math.Mod(delay.Seconds(), 60))
log.Warnf("WPA CRD missing (attempt=%d): will retry in %dm%ds", attempt, mins, secs)
}
}
func isWPACRDNotFoundError(err error) bool {
status, ok := err.(*apierrors.StatusError)
if !ok {
return false
}
reason := status.Status().Reason
details := status.Status().Details
return reason == v1.StatusReasonNotFound &&
details.Group == apis_v1alpha1.SchemeGroupVersion.Group &&
details.Kind == "watermarkpodautoscalers"
}
func checkWPACRD(wpaClient dynamic_client.Interface) backoff.Operation {
check := func() error {
_, err := wpaClient.Resource(gvrWPA).List(context.TODO(), v1.ListOptions{})
return err
}
return func() error {
return tryCheckWPACRD(check)
}
}
func waitForWPACRD(wpaClient dynamic_client.Interface) |
// enableWPA adds the handlers to the AutoscalersController to support WPAs
func (h *AutoscalersController) enableWPA(wpaInformerFactory dynamic_informer.DynamicSharedInformerFactory) error {
log.Info("Enabling WPA controller")
genericInformer := wpaInformerFactory.ForResource(gvrWPA)
h.WPAqueue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter(), "wpa-autoscalers")
h.wpaLister = genericInformer.Lister()
h.wpaListerSynced = genericInformer.Informer().HasSynced
if _, err := genericInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: h.addWPAutoscaler,
UpdateFunc: h.updateWPAutoscaler,
DeleteFunc: h.deleteWPAutoscaler,
},
); err != nil {
return err
}
h.mu.Lock()
defer h.mu.Unlock()
h.wpaEnabled = true
return nil
}
func (h *AutoscalersController) isWPAEnabled() bool {
h.mu.Lock()
defer h.mu.Unlock()
return h.wpaEnabled
}
func (h *AutoscalersController) workerWPA() {
for h.processNextWPA() {
}
}
func (h *AutoscalersController) processNextWPA() bool {
key, quit := h.WPAqueue.Get()
if quit {
log.Error("WPA controller HPAqueue is shutting down, stopping processing")
return false
}
log.Tracef("Processing %s", key)
defer h.WPAqueue.Done(key)
err := h.syncWPA(key)
h.handleErr(err, key)
// Debug output for unit tests only
if h.autoscalers != nil {
h.autoscalers <- key
}
return true
}
func (h *AutoscalersController) syncWPA(key interface{}) error {
h.mu.Lock()
defer h.mu.Unlock()
ns, name, err := cache.SplitMetaNamespaceKey(key.(string))
if err != nil {
log.Errorf("Could not split the key: %v", err)
return err
}
wpaCachedObj, err := h.wpaLister.ByNamespace(ns).Get(name)
if err != nil {
log.Errorf("Could not retrieve key %s from cache: %v", key, err)
return err
}
wpaCached := &apis_v1alpha1.WatermarkPodAutoscaler{}
err = UnstructuredIntoWPA(wpaCachedObj, wpaCached)
if err != nil {
log.Errorf("Could not cast wpa %s retrieved from cache to wpa structure: %v", key, err)
return err
}
switch {
case errors.IsNotFound(err):
log.Infof("WatermarkPodAutoscaler %v has been deleted but was not caught in the EventHandler. GC will cleanup.", key)
case err != nil:
log.Errorf("Unable to retrieve WatermarkPodAutoscaler %v from store: %v", key, err)
default:
if wpaCached == nil {
log.Errorf("Could not parse empty wpa %s/%s from local store", ns, name)
return ErrIsEmpty
}
emList := autoscalers.InspectWPA(wpaCached)
if len(emList) == 0 {
return nil
}
newMetrics := h.hpaProc.ProcessEMList(emList)
h.toStore.m.Lock()
for metric, value := range newMetrics {
// We should only insert placeholders in the local cache.
h.toStore.data[metric] = value
}
h.toStore.m.Unlock()
log.Tracef("Local batch cache of WPA is %v", h.toStore.data)
}
return err
}
func (h *AutoscalersController) addWPAutoscaler(obj interface{}) {
newAutoscaler := &apis_v1alpha1.WatermarkPodAutoscaler{}
if err := UnstructuredIntoWPA(obj, newAutoscaler); err != nil {
log.Errorf("Unable to cast obj %s to a WPA: %v", obj, err)
return
}
log.Debugf("Adding WPA %s/%s", newAutoscaler.Namespace, newAutoscaler.Name)
h.EventRecorder.Event(newAutoscaler.DeepCopyObject(), corev1.EventTypeNormal, autoscalerNowHandleMsgEvent, "")
h.enqueueWPA(newAutoscaler)
}
func (h *AutoscalersController) updateWPAutoscaler(old, obj interface{}) {
newAutoscaler := &apis_v1alpha1.WatermarkPodAutoscaler{}
if err := UnstructuredIntoWPA(obj, newAutoscaler); err != nil {
log.Errorf("Unable to cast obj %s to a WPA: %v", obj, err)
return
}
oldAutoscaler := &apis_v1alpha1.WatermarkPodAutoscaler{}
if err := UnstructuredIntoWPA(obj, oldAutoscaler); err != nil {
log.Errorf("Unable to cast obj %s to a WPA: %v", obj, err)
h.enqueueWPA(newAutoscaler) // We still want to enqueue the newAutoscaler to get the new change
return
}
if !autoscalers.AutoscalerMetricsUpdate(newAutoscaler.GetObjectMeta(), oldAutoscaler.GetObjectMeta()) {
log.Tracef("Update received for the %s/%s, without a relevant change to the configuration", newAutoscaler.Namespace, newAutoscaler.Name)
return
}
// Need to delete the old object from the local cache. If the labels have changed, the syncAutoscaler would not override the old key.
toDelete := autoscalers.InspectWPA(oldAutoscaler)
h.deleteFromLocalStore(toDelete)
log.Tracef("Processing update event for wpa %s/%s with configuration: %s", newAutoscaler.Namespace, newAutoscaler.Name, newAutoscaler.Annotations)
h.enqueueWPA(newAutoscaler)
}
// Processing the Delete Events in the Eventhandler as obj is deleted from the local store thereafter.
// Only here can we retrieve the content of the WPA to properly process and delete it.
// FIXME we could have an update in the WPAqueue while processing the deletion, we should make
// sure we process them in order instead. For now, the gc logic allows us to recover.
func (h *AutoscalersController) deleteWPAutoscaler(obj interface{}) {
h.mu.Lock()
defer h.mu.Unlock()
toDelete := &custommetrics.MetricsBundle{}
deletedWPA := &apis_v1alpha1.WatermarkPodAutoscaler{}
if err := UnstructuredIntoWPA(obj, deletedWPA); err == nil {
toDelete.External = autoscalers.InspectWPA(deletedWPA)
h.deleteFromLocalStore(toDelete.External)
log.Debugf("Deleting %s/%s from the local cache", deletedWPA.Namespace, deletedWPA.Name)
if !h.isLeaderFunc() {
return
}
log.Infof("Deleting entries of metrics from Ref %s/%s in the Global Store", deletedWPA.Namespace, deletedWPA.Name)
if err := h.store.DeleteExternalMetricValues(toDelete); err != nil {
h.enqueueWPA(deletedWPA)
}
return
}
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Errorf("Could not get object from tombstone %#v", obj)
return
}
if err := UnstructuredIntoWPA(tombstone, deletedWPA); err != nil {
log.Errorf("Tombstone contained object that is not an Autoscaler: %#v", obj)
return
}
log.Debugf("Deleting Metrics from WPA %s/%s", deletedWPA.Namespace, deletedWPA.Name)
toDelete.External = autoscalers.InspectWPA(deletedWPA)
log.Debugf("Deleting %s/%s from the local cache", deletedWPA.Namespace, deletedWPA.Name)
h.deleteFromLocalStore(toDelete.External)
if err := h.store.DeleteExternalMetricValues(toDelete); err != nil {
h.enqueueWPA(deletedWPA)
return
}
}
| {
exp := &backoff.ExponentialBackOff{
InitialInterval: crdCheckInitialInterval,
RandomizationFactor: 0,
Multiplier: crdCheckMultiplier,
MaxInterval: crdCheckMaxInterval,
MaxElapsedTime: crdCheckMaxElapsedTime,
Clock: backoff.SystemClock,
}
exp.Reset()
_ = backoff.RetryNotify(checkWPACRD(wpaClient), exp, notifyCheckWPACRD())
} | identifier_body |
wpa_controller.go | // Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
//go:build kubeapiserver
package apiserver
import (
"context"
"math"
"time"
dynamic_client "k8s.io/client-go/dynamic"
dynamic_informer "k8s.io/client-go/dynamic/dynamicinformer"
apis_v1alpha1 "github.com/DataDog/watermarkpodautoscaler/api/v1alpha1"
"github.com/cenkalti/backoff"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"github.com/DataDog/datadog-agent/pkg/clusteragent/custommetrics"
"github.com/DataDog/datadog-agent/pkg/errors"
"github.com/DataDog/datadog-agent/pkg/util/kubernetes/autoscalers"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
const (
crdCheckInitialInterval = time.Second * 5
crdCheckMaxInterval = 5 * time.Minute
crdCheckMultiplier = 2.0
crdCheckMaxElapsedTime = 0
)
var gvrWPA = apis_v1alpha1.GroupVersion.WithResource("watermarkpodautoscalers")
// RunWPA starts the controller to process events about Watermark Pod Autoscalers
func (h *AutoscalersController) RunWPA(stopCh <-chan struct{}, wpaClient dynamic_client.Interface, wpaInformerFactory dynamic_informer.DynamicSharedInformerFactory) {
waitForWPACRD(wpaClient)
// mutate the Autoscaler controller to embed an informer against the WPAs
if err := h.enableWPA(wpaInformerFactory); err != nil {
log.Errorf("impossible to enable WPQ: %v", err)
return
}
defer h.WPAqueue.ShutDown()
log.Infof("Starting WPA Controller ... ")
defer log.Infof("Stopping WPA Controller")
wpaInformerFactory.Start(stopCh)
if !cache.WaitForCacheSync(stopCh, h.wpaListerSynced) {
return
}
wait.Until(h.workerWPA, time.Second, stopCh)
}
type checkAPI func() error
func tryCheckWPACRD(check checkAPI) error {
if err := check(); err != nil {
// Check if this is a known problem of missing CRD registration
if isWPACRDNotFoundError(err) {
return err
}
// In all other cases return a permanent error to prevent from retrying
log.Errorf("WPA CRD check failed: not retryable: %s", err)
return backoff.Permanent(err)
}
log.Info("WPA CRD check successful")
return nil
}
func notifyCheckWPACRD() backoff.Notify {
attempt := 0
return func(err error, delay time.Duration) {
attempt++
mins := int(delay.Minutes())
secs := int(math.Mod(delay.Seconds(), 60))
log.Warnf("WPA CRD missing (attempt=%d): will retry in %dm%ds", attempt, mins, secs)
}
}
func isWPACRDNotFoundError(err error) bool {
status, ok := err.(*apierrors.StatusError)
if !ok {
return false
}
reason := status.Status().Reason | details.Kind == "watermarkpodautoscalers"
}
func checkWPACRD(wpaClient dynamic_client.Interface) backoff.Operation {
check := func() error {
_, err := wpaClient.Resource(gvrWPA).List(context.TODO(), v1.ListOptions{})
return err
}
return func() error {
return tryCheckWPACRD(check)
}
}
func waitForWPACRD(wpaClient dynamic_client.Interface) {
exp := &backoff.ExponentialBackOff{
InitialInterval: crdCheckInitialInterval,
RandomizationFactor: 0,
Multiplier: crdCheckMultiplier,
MaxInterval: crdCheckMaxInterval,
MaxElapsedTime: crdCheckMaxElapsedTime,
Clock: backoff.SystemClock,
}
exp.Reset()
_ = backoff.RetryNotify(checkWPACRD(wpaClient), exp, notifyCheckWPACRD())
}
// enableWPA adds the handlers to the AutoscalersController to support WPAs
func (h *AutoscalersController) enableWPA(wpaInformerFactory dynamic_informer.DynamicSharedInformerFactory) error {
log.Info("Enabling WPA controller")
genericInformer := wpaInformerFactory.ForResource(gvrWPA)
h.WPAqueue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter(), "wpa-autoscalers")
h.wpaLister = genericInformer.Lister()
h.wpaListerSynced = genericInformer.Informer().HasSynced
if _, err := genericInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: h.addWPAutoscaler,
UpdateFunc: h.updateWPAutoscaler,
DeleteFunc: h.deleteWPAutoscaler,
},
); err != nil {
return err
}
h.mu.Lock()
defer h.mu.Unlock()
h.wpaEnabled = true
return nil
}
func (h *AutoscalersController) isWPAEnabled() bool {
h.mu.Lock()
defer h.mu.Unlock()
return h.wpaEnabled
}
func (h *AutoscalersController) workerWPA() {
for h.processNextWPA() {
}
}
func (h *AutoscalersController) processNextWPA() bool {
key, quit := h.WPAqueue.Get()
if quit {
log.Error("WPA controller HPAqueue is shutting down, stopping processing")
return false
}
log.Tracef("Processing %s", key)
defer h.WPAqueue.Done(key)
err := h.syncWPA(key)
h.handleErr(err, key)
// Debug output for unit tests only
if h.autoscalers != nil {
h.autoscalers <- key
}
return true
}
func (h *AutoscalersController) syncWPA(key interface{}) error {
h.mu.Lock()
defer h.mu.Unlock()
ns, name, err := cache.SplitMetaNamespaceKey(key.(string))
if err != nil {
log.Errorf("Could not split the key: %v", err)
return err
}
wpaCachedObj, err := h.wpaLister.ByNamespace(ns).Get(name)
if err != nil {
log.Errorf("Could not retrieve key %s from cache: %v", key, err)
return err
}
wpaCached := &apis_v1alpha1.WatermarkPodAutoscaler{}
err = UnstructuredIntoWPA(wpaCachedObj, wpaCached)
if err != nil {
log.Errorf("Could not cast wpa %s retrieved from cache to wpa structure: %v", key, err)
return err
}
switch {
case errors.IsNotFound(err):
log.Infof("WatermarkPodAutoscaler %v has been deleted but was not caught in the EventHandler. GC will cleanup.", key)
case err != nil:
log.Errorf("Unable to retrieve WatermarkPodAutoscaler %v from store: %v", key, err)
default:
if wpaCached == nil {
log.Errorf("Could not parse empty wpa %s/%s from local store", ns, name)
return ErrIsEmpty
}
emList := autoscalers.InspectWPA(wpaCached)
if len(emList) == 0 {
return nil
}
newMetrics := h.hpaProc.ProcessEMList(emList)
h.toStore.m.Lock()
for metric, value := range newMetrics {
// We should only insert placeholders in the local cache.
h.toStore.data[metric] = value
}
h.toStore.m.Unlock()
log.Tracef("Local batch cache of WPA is %v", h.toStore.data)
}
return err
}
func (h *AutoscalersController) addWPAutoscaler(obj interface{}) {
newAutoscaler := &apis_v1alpha1.WatermarkPodAutoscaler{}
if err := UnstructuredIntoWPA(obj, newAutoscaler); err != nil {
log.Errorf("Unable to cast obj %s to a WPA: %v", obj, err)
return
}
log.Debugf("Adding WPA %s/%s", newAutoscaler.Namespace, newAutoscaler.Name)
h.EventRecorder.Event(newAutoscaler.DeepCopyObject(), corev1.EventTypeNormal, autoscalerNowHandleMsgEvent, "")
h.enqueueWPA(newAutoscaler)
}
func (h *AutoscalersController) updateWPAutoscaler(old, obj interface{}) {
newAutoscaler := &apis_v1alpha1.WatermarkPodAutoscaler{}
if err := UnstructuredIntoWPA(obj, newAutoscaler); err != nil {
log.Errorf("Unable to cast obj %s to a WPA: %v", obj, err)
return
}
oldAutoscaler := &apis_v1alpha1.WatermarkPodAutoscaler{}
if err := UnstructuredIntoWPA(obj, oldAutoscaler); err != nil {
log.Errorf("Unable to cast obj %s to a WPA: %v", obj, err)
h.enqueueWPA(newAutoscaler) // We still want to enqueue the newAutoscaler to get the new change
return
}
if !autoscalers.AutoscalerMetricsUpdate(newAutoscaler.GetObjectMeta(), oldAutoscaler.GetObjectMeta()) {
log.Tracef("Update received for the %s/%s, without a relevant change to the configuration", newAutoscaler.Namespace, newAutoscaler.Name)
return
}
// Need to delete the old object from the local cache. If the labels have changed, the syncAutoscaler would not override the old key.
toDelete := autoscalers.InspectWPA(oldAutoscaler)
h.deleteFromLocalStore(toDelete)
log.Tracef("Processing update event for wpa %s/%s with configuration: %s", newAutoscaler.Namespace, newAutoscaler.Name, newAutoscaler.Annotations)
h.enqueueWPA(newAutoscaler)
}
// Processing the Delete Events in the Eventhandler as obj is deleted from the local store thereafter.
// Only here can we retrieve the content of the WPA to properly process and delete it.
// FIXME we could have an update in the WPAqueue while processing the deletion, we should make
// sure we process them in order instead. For now, the gc logic allows us to recover.
func (h *AutoscalersController) deleteWPAutoscaler(obj interface{}) {
h.mu.Lock()
defer h.mu.Unlock()
toDelete := &custommetrics.MetricsBundle{}
deletedWPA := &apis_v1alpha1.WatermarkPodAutoscaler{}
if err := UnstructuredIntoWPA(obj, deletedWPA); err == nil {
toDelete.External = autoscalers.InspectWPA(deletedWPA)
h.deleteFromLocalStore(toDelete.External)
log.Debugf("Deleting %s/%s from the local cache", deletedWPA.Namespace, deletedWPA.Name)
if !h.isLeaderFunc() {
return
}
log.Infof("Deleting entries of metrics from Ref %s/%s in the Global Store", deletedWPA.Namespace, deletedWPA.Name)
if err := h.store.DeleteExternalMetricValues(toDelete); err != nil {
h.enqueueWPA(deletedWPA)
}
return
}
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Errorf("Could not get object from tombstone %#v", obj)
return
}
if err := UnstructuredIntoWPA(tombstone, deletedWPA); err != nil {
log.Errorf("Tombstone contained object that is not an Autoscaler: %#v", obj)
return
}
log.Debugf("Deleting Metrics from WPA %s/%s", deletedWPA.Namespace, deletedWPA.Name)
toDelete.External = autoscalers.InspectWPA(deletedWPA)
log.Debugf("Deleting %s/%s from the local cache", deletedWPA.Namespace, deletedWPA.Name)
h.deleteFromLocalStore(toDelete.External)
if err := h.store.DeleteExternalMetricValues(toDelete); err != nil {
h.enqueueWPA(deletedWPA)
return
}
} | details := status.Status().Details
return reason == v1.StatusReasonNotFound &&
details.Group == apis_v1alpha1.SchemeGroupVersion.Group && | random_line_split |
wpa_controller.go | // Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
//go:build kubeapiserver
package apiserver
import (
"context"
"math"
"time"
dynamic_client "k8s.io/client-go/dynamic"
dynamic_informer "k8s.io/client-go/dynamic/dynamicinformer"
apis_v1alpha1 "github.com/DataDog/watermarkpodautoscaler/api/v1alpha1"
"github.com/cenkalti/backoff"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"github.com/DataDog/datadog-agent/pkg/clusteragent/custommetrics"
"github.com/DataDog/datadog-agent/pkg/errors"
"github.com/DataDog/datadog-agent/pkg/util/kubernetes/autoscalers"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
const (
crdCheckInitialInterval = time.Second * 5
crdCheckMaxInterval = 5 * time.Minute
crdCheckMultiplier = 2.0
crdCheckMaxElapsedTime = 0
)
var gvrWPA = apis_v1alpha1.GroupVersion.WithResource("watermarkpodautoscalers")
// RunWPA starts the controller to process events about Watermark Pod Autoscalers
func (h *AutoscalersController) RunWPA(stopCh <-chan struct{}, wpaClient dynamic_client.Interface, wpaInformerFactory dynamic_informer.DynamicSharedInformerFactory) {
waitForWPACRD(wpaClient)
// mutate the Autoscaler controller to embed an informer against the WPAs
if err := h.enableWPA(wpaInformerFactory); err != nil {
log.Errorf("impossible to enable WPQ: %v", err)
return
}
defer h.WPAqueue.ShutDown()
log.Infof("Starting WPA Controller ... ")
defer log.Infof("Stopping WPA Controller")
wpaInformerFactory.Start(stopCh)
if !cache.WaitForCacheSync(stopCh, h.wpaListerSynced) {
return
}
wait.Until(h.workerWPA, time.Second, stopCh)
}
type checkAPI func() error
func tryCheckWPACRD(check checkAPI) error {
if err := check(); err != nil {
// Check if this is a known problem of missing CRD registration
if isWPACRDNotFoundError(err) {
return err
}
// In all other cases return a permanent error to prevent from retrying
log.Errorf("WPA CRD check failed: not retryable: %s", err)
return backoff.Permanent(err)
}
log.Info("WPA CRD check successful")
return nil
}
func notifyCheckWPACRD() backoff.Notify {
attempt := 0
return func(err error, delay time.Duration) {
attempt++
mins := int(delay.Minutes())
secs := int(math.Mod(delay.Seconds(), 60))
log.Warnf("WPA CRD missing (attempt=%d): will retry in %dm%ds", attempt, mins, secs)
}
}
func isWPACRDNotFoundError(err error) bool {
status, ok := err.(*apierrors.StatusError)
if !ok {
return false
}
reason := status.Status().Reason
details := status.Status().Details
return reason == v1.StatusReasonNotFound &&
details.Group == apis_v1alpha1.SchemeGroupVersion.Group &&
details.Kind == "watermarkpodautoscalers"
}
func checkWPACRD(wpaClient dynamic_client.Interface) backoff.Operation {
check := func() error {
_, err := wpaClient.Resource(gvrWPA).List(context.TODO(), v1.ListOptions{})
return err
}
return func() error {
return tryCheckWPACRD(check)
}
}
func waitForWPACRD(wpaClient dynamic_client.Interface) {
exp := &backoff.ExponentialBackOff{
InitialInterval: crdCheckInitialInterval,
RandomizationFactor: 0,
Multiplier: crdCheckMultiplier,
MaxInterval: crdCheckMaxInterval,
MaxElapsedTime: crdCheckMaxElapsedTime,
Clock: backoff.SystemClock,
}
exp.Reset()
_ = backoff.RetryNotify(checkWPACRD(wpaClient), exp, notifyCheckWPACRD())
}
// enableWPA adds the handlers to the AutoscalersController to support WPAs
func (h *AutoscalersController) enableWPA(wpaInformerFactory dynamic_informer.DynamicSharedInformerFactory) error {
log.Info("Enabling WPA controller")
genericInformer := wpaInformerFactory.ForResource(gvrWPA)
h.WPAqueue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter(), "wpa-autoscalers")
h.wpaLister = genericInformer.Lister()
h.wpaListerSynced = genericInformer.Informer().HasSynced
if _, err := genericInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: h.addWPAutoscaler,
UpdateFunc: h.updateWPAutoscaler,
DeleteFunc: h.deleteWPAutoscaler,
},
); err != nil {
return err
}
h.mu.Lock()
defer h.mu.Unlock()
h.wpaEnabled = true
return nil
}
func (h *AutoscalersController) isWPAEnabled() bool {
h.mu.Lock()
defer h.mu.Unlock()
return h.wpaEnabled
}
func (h *AutoscalersController) workerWPA() {
for h.processNextWPA() {
}
}
func (h *AutoscalersController) processNextWPA() bool {
key, quit := h.WPAqueue.Get()
if quit {
log.Error("WPA controller HPAqueue is shutting down, stopping processing")
return false
}
log.Tracef("Processing %s", key)
defer h.WPAqueue.Done(key)
err := h.syncWPA(key)
h.handleErr(err, key)
// Debug output for unit tests only
if h.autoscalers != nil {
h.autoscalers <- key
}
return true
}
func (h *AutoscalersController) syncWPA(key interface{}) error {
h.mu.Lock()
defer h.mu.Unlock()
ns, name, err := cache.SplitMetaNamespaceKey(key.(string))
if err != nil {
log.Errorf("Could not split the key: %v", err)
return err
}
wpaCachedObj, err := h.wpaLister.ByNamespace(ns).Get(name)
if err != nil {
log.Errorf("Could not retrieve key %s from cache: %v", key, err)
return err
}
wpaCached := &apis_v1alpha1.WatermarkPodAutoscaler{}
err = UnstructuredIntoWPA(wpaCachedObj, wpaCached)
if err != nil {
log.Errorf("Could not cast wpa %s retrieved from cache to wpa structure: %v", key, err)
return err
}
switch {
case errors.IsNotFound(err):
log.Infof("WatermarkPodAutoscaler %v has been deleted but was not caught in the EventHandler. GC will cleanup.", key)
case err != nil:
log.Errorf("Unable to retrieve WatermarkPodAutoscaler %v from store: %v", key, err)
default:
if wpaCached == nil {
log.Errorf("Could not parse empty wpa %s/%s from local store", ns, name)
return ErrIsEmpty
}
emList := autoscalers.InspectWPA(wpaCached)
if len(emList) == 0 {
return nil
}
newMetrics := h.hpaProc.ProcessEMList(emList)
h.toStore.m.Lock()
for metric, value := range newMetrics {
// We should only insert placeholders in the local cache.
h.toStore.data[metric] = value
}
h.toStore.m.Unlock()
log.Tracef("Local batch cache of WPA is %v", h.toStore.data)
}
return err
}
func (h *AutoscalersController) addWPAutoscaler(obj interface{}) {
newAutoscaler := &apis_v1alpha1.WatermarkPodAutoscaler{}
if err := UnstructuredIntoWPA(obj, newAutoscaler); err != nil {
log.Errorf("Unable to cast obj %s to a WPA: %v", obj, err)
return
}
log.Debugf("Adding WPA %s/%s", newAutoscaler.Namespace, newAutoscaler.Name)
h.EventRecorder.Event(newAutoscaler.DeepCopyObject(), corev1.EventTypeNormal, autoscalerNowHandleMsgEvent, "")
h.enqueueWPA(newAutoscaler)
}
func (h *AutoscalersController) updateWPAutoscaler(old, obj interface{}) {
newAutoscaler := &apis_v1alpha1.WatermarkPodAutoscaler{}
if err := UnstructuredIntoWPA(obj, newAutoscaler); err != nil {
log.Errorf("Unable to cast obj %s to a WPA: %v", obj, err)
return
}
oldAutoscaler := &apis_v1alpha1.WatermarkPodAutoscaler{}
if err := UnstructuredIntoWPA(obj, oldAutoscaler); err != nil {
log.Errorf("Unable to cast obj %s to a WPA: %v", obj, err)
h.enqueueWPA(newAutoscaler) // We still want to enqueue the newAutoscaler to get the new change
return
}
if !autoscalers.AutoscalerMetricsUpdate(newAutoscaler.GetObjectMeta(), oldAutoscaler.GetObjectMeta()) {
log.Tracef("Update received for the %s/%s, without a relevant change to the configuration", newAutoscaler.Namespace, newAutoscaler.Name)
return
}
// Need to delete the old object from the local cache. If the labels have changed, the syncAutoscaler would not override the old key.
toDelete := autoscalers.InspectWPA(oldAutoscaler)
h.deleteFromLocalStore(toDelete)
log.Tracef("Processing update event for wpa %s/%s with configuration: %s", newAutoscaler.Namespace, newAutoscaler.Name, newAutoscaler.Annotations)
h.enqueueWPA(newAutoscaler)
}
// Processing the Delete Events in the Eventhandler as obj is deleted from the local store thereafter.
// Only here can we retrieve the content of the WPA to properly process and delete it.
// FIXME we could have an update in the WPAqueue while processing the deletion, we should make
// sure we process them in order instead. For now, the gc logic allows us to recover.
func (h *AutoscalersController) deleteWPAutoscaler(obj interface{}) {
h.mu.Lock()
defer h.mu.Unlock()
toDelete := &custommetrics.MetricsBundle{}
deletedWPA := &apis_v1alpha1.WatermarkPodAutoscaler{}
if err := UnstructuredIntoWPA(obj, deletedWPA); err == nil {
toDelete.External = autoscalers.InspectWPA(deletedWPA)
h.deleteFromLocalStore(toDelete.External)
log.Debugf("Deleting %s/%s from the local cache", deletedWPA.Namespace, deletedWPA.Name)
if !h.isLeaderFunc() {
return
}
log.Infof("Deleting entries of metrics from Ref %s/%s in the Global Store", deletedWPA.Namespace, deletedWPA.Name)
if err := h.store.DeleteExternalMetricValues(toDelete); err != nil {
h.enqueueWPA(deletedWPA)
}
return
}
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok |
if err := UnstructuredIntoWPA(tombstone, deletedWPA); err != nil {
log.Errorf("Tombstone contained object that is not an Autoscaler: %#v", obj)
return
}
log.Debugf("Deleting Metrics from WPA %s/%s", deletedWPA.Namespace, deletedWPA.Name)
toDelete.External = autoscalers.InspectWPA(deletedWPA)
log.Debugf("Deleting %s/%s from the local cache", deletedWPA.Namespace, deletedWPA.Name)
h.deleteFromLocalStore(toDelete.External)
if err := h.store.DeleteExternalMetricValues(toDelete); err != nil {
h.enqueueWPA(deletedWPA)
return
}
}
| {
log.Errorf("Could not get object from tombstone %#v", obj)
return
} | conditional_block |
splash.go | package splash
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
neturl "net/url"
"os"
"strings"
"time"
"github.com/pquerna/cachecontrol/cacheobject"
"github.com/spf13/viper"
"github.com/slotix/dataflowkit/errs"
)
var logger *log.Logger
func init() {
viper.AutomaticEnv() // read in environment variables that match
logger = log.New(os.Stdout, "splash: ", log.Lshortfile)
}
type splashConn struct {
host string //splash server address
//password string
//Splash parameters:
timeout int
resourceTimeout int
wait float64
}
//NewSplashConn creates new connection to Splash Server
func NewSplashConn(host string, timeout, resourceTimeout int, wait float64) splashConn {
return splashConn{ | }
//GenerateSplashURL Generates Splash URL and return error
func (s *splashConn) GenerateSplashURL(req Request) string {
/*
//"Set-Cookie" from response headers should be sent when accessing for the same domain second time
cookie := `PHPSESSID=ef75e2737a14b06a2749d0b73840354f; path=/; domain=.acer-a500.ru; HttpOnly
dle_user_id=deleted; expires=Thu, 01-Jan-1970 00:00:01 GMT; Max-Age=0; path=/; domain=.acer-a500.ru; httponly
dle_password=deleted; expires=Thu, 01-Jan-1970 00:00:01 GMT; Max-Age=0; path=/; domain=.acer-a500.ru; httponly
dle_hash=deleted; expires=Thu, 01-Jan-1970 00:00:01 GMT; Max-Age=0; path=/; domain=.acer-a500.ru; httponly
dle_forum_sessions=ef75e2737a14b06a2749d0b73840354f; expires=Wed, 06-Jun-2018 19:13:00 GMT; Max-Age=31536000; path=/; domain=.acer-a500.ru; httponly
forum_last=1496801580; expires=Wed, 06-Jun-2018 19:13:00 GMT; Max-Age=31536000; path=/; domain=.acer-a500.ru; httponly`
//cookie := ""
req.Cookies, err = generateCookie(cookie)
if err != nil {
logger.Println(err)
}
//logger.Println(req.Cookies)
//---------
*/
//req.Params = `"auth_key=880ea6a14ea49e853634fbdc5015a024&referer=http%3A%2F%2Fdiesel.elcat.kg%2F&ips_username=dm_&ips_password=asfwwe!444D&rememberMe=1"`
var LUAScript string
if isRobotsTxt(req.URL) {
LUAScript = robotsLUA
} else {
LUAScript = baseLUA
}
splashURL := fmt.Sprintf(
"http://%s/execute?url=%s&timeout=%d&resource_timeout=%d&wait=%.1f&cookies=%s&formdata=%s&lua_source=%s",
s.host,
neturl.QueryEscape(req.URL),
s.timeout,
s.resourceTimeout,
s.wait,
neturl.QueryEscape(req.Cookies),
neturl.QueryEscape(paramsToLuaTable(req.Params)),
neturl.QueryEscape(LUAScript))
return splashURL
}
//GetResponse result is passed to caching middleware
//to provide a RFC7234 compliant HTTP cache
func GetResponse(req Request) (*Response, error) {
sConn := NewSplashConn(
viper.GetString("SPLASH"),
viper.GetInt("SPLASH_TIMEOUT"),
viper.GetInt("SPLASH_RESOURCE_TIMEOUT"),
viper.GetFloat64("SPLASH_WAIT"),
)
splashURL := sConn.GenerateSplashURL(req)
client := &http.Client{}
request, err := http.NewRequest("GET", splashURL, nil)
//req.SetBasicAuth(s.user, s.password)
resp, err := client.Do(request)
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
return nil, err
}
res, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
//response from Splash service.
statusCode := resp.StatusCode
if statusCode != 200 {
switch statusCode {
case 504:
return nil, &errs.GatewayTimeout{}
default:
return nil, fmt.Errorf(string(res))
}
}
var sResponse Response
if err := json.Unmarshal(res, &sResponse); err != nil {
logger.Println("Json Unmarshall error", err)
}
//if response status code is not 200
if sResponse.Error != "" {
switch sResponse.Error {
case "http404":
return nil, &errs.NotFound{sResponse.URL}
case "http403":
return nil, &errs.Forbidden{sResponse.URL}
case "network3":
return nil, &errs.InvalidHost{sResponse.URL}
default:
return nil, &errs.Error{sResponse.Error}
}
//return nil, fmt.Errorf("%s", sResponse.Error)
}
// Sometimes no response, request returned by Splash.
// gc (garbage collection) method should be called to clear WebKit caches and then
// GetResponse again. See more at https://github.com/scrapinghub/splash/issues/613
if sResponse.Response == nil || sResponse.Request == nil && sResponse.HTML != "" {
var response *Response
gcResponse, err := gc(viper.GetString("SPLASH"))
if err == nil && gcResponse.Status == "ok" {
response, err = GetResponse(req)
if err != nil {
return nil, err
}
}
return response, nil
}
if !sResponse.Response.Ok {
if sResponse.Response.Status == 0 {
err = fmt.Errorf("%s",
//sResponse.Error)
sResponse.Response.StatusText)
} else {
err = fmt.Errorf("%d. %s",
sResponse.Response.Status,
sResponse.Response.StatusText)
}
return nil, err
}
//if cacheable ?
rv := sResponse.cacheable()
//logger.Println(rv.OutReasons)
//logger.Println(rv.OutWarnings)
//logger.Println(rv.OutExpirationTime)
if len(rv.OutReasons) == 0 {
sResponse.Cacheable = true
}
return &sResponse, nil
}
func (r *Response) GetContent() (io.ReadCloser, error) {
if r == nil {
return nil, errors.New("empty response")
}
if isRobotsTxt(r.Request.URL) {
decoded, err := base64.StdEncoding.DecodeString(r.Response.Content.Text)
if err != nil {
logger.Println("decode error:", err)
return nil, err
}
readCloser := ioutil.NopCloser(bytes.NewReader(decoded))
return readCloser, nil
}
readCloser := ioutil.NopCloser(strings.NewReader(r.HTML))
return readCloser, nil
}
//cacheable check if resource is cacheable
func (r *Response) cacheable() (rv cacheobject.ObjectResults) {
respHeader := r.Response.Headers.(http.Header)
reqHeader := r.Request.Headers.(http.Header)
// respHeader := r.Response.castHeaders()
// reqHeader := r.Request.castHeaders()
reqDir, err := cacheobject.ParseRequestCacheControl(reqHeader.Get("Cache-Control"))
if err != nil {
logger.Printf(err.Error())
}
resDir, err := cacheobject.ParseResponseCacheControl(respHeader.Get("Cache-Control"))
if err != nil {
logger.Printf(err.Error())
}
//logger.Println(respHeader)
expiresHeader, _ := http.ParseTime(respHeader.Get("Expires"))
dateHeader, _ := http.ParseTime(respHeader.Get("Date"))
lastModifiedHeader, _ := http.ParseTime(respHeader.Get("Last-Modified"))
obj := cacheobject.Object{
// CacheIsPrivate: false,
RespDirectives: resDir,
RespHeaders: respHeader,
RespStatusCode: r.Response.Status,
RespExpiresHeader: expiresHeader,
RespDateHeader: dateHeader,
RespLastModifiedHeader: lastModifiedHeader,
ReqDirectives: reqDir,
ReqHeaders: reqHeader,
ReqMethod: r.Request.Method,
NowUTC: time.Now().UTC(),
}
rv = cacheobject.ObjectResults{}
cacheobject.CachableObject(&obj, &rv)
cacheobject.ExpirationObject(&obj, &rv)
//Check if it is cacheable
expTime := rv.OutExpirationTime.Unix()
if rv.OutExpirationTime.IsZero() {
expTime = 0
}
r.CacheExpirationTime = expTime
debug := false
if debug {
if rv.OutErr != nil {
logger.Println("Errors: ", rv.OutErr)
}
if rv.OutReasons != nil {
logger.Println("Reasons to not cache: ", rv.OutReasons)
}
if rv.OutWarnings != nil {
logger.Println("Warning headers to add: ", rv.OutWarnings)
}
logger.Println("Expiration: ", rv.OutExpirationTime.String())
}
return rv
}
//Fetch content from url through Splash server https://github.com/scrapinghub/splash/
func Fetch(req Request) (io.ReadCloser, error) {
//logger.Println(splashURL)
response, err := GetResponse(req)
if err != nil {
return nil, err
}
logger.Println(err)
content, err := response.GetContent()
if err == nil {
return content, nil
}
return nil, err
}
func (r Request) GetURL() string {
return r.URL
}
func isRobotsTxt(url string) bool {
if strings.HasSuffix(url, "robots.txt") {
return true
}
return false
}
//http://choly.ca/post/go-json-marshalling/
//UnmarshalJSON convert headers to http.Header type
func (r *Response) UnmarshalJSON(data []byte) error {
type Alias Response
aux := &struct {
Headers interface{} `json:"headers"`
*Alias
}{
Alias: (*Alias)(r),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
if r.Request != nil {
r.Request.Headers = castHeaders(r.Request.Headers)
}
if r.Response != nil {
r.Response.Headers = castHeaders(r.Response.Headers)
}
return nil
}
//castHeaders serves for casting headers returned by Splash to standard http.Header type
func castHeaders(splashHeaders interface{}) (header http.Header) {
header = make(map[string][]string)
switch splashHeaders.(type) {
case []interface{}:
for _, h := range splashHeaders.([]interface{}) {
//var str []string
str := []string{}
v, ok := h.(map[string]interface{})["value"].(string)
if ok {
str = append(str, v)
header[h.(map[string]interface{})["name"].(string)] = str
}
}
return header
case map[string]interface{}:
for k, v := range splashHeaders.(map[string]interface{}) {
var str []string
for _, vv := range v.([]interface{}) {
str = append(str, vv.(string))
}
header[k] = str
}
return header
default:
return nil
}
}
//Splash splash:history() may return empty list as it cannot retrieve cached results
//in case request the same page twice. So the only solution for the moment is to call
//http://localhost:8050/_gc if an error occures. It runs the Python garbage collector
//and clears internal WebKit caches. See more at https://github.com/scrapinghub/splash/issues/613
func gc(host string) (*gcResponse, error) {
client := &http.Client{}
gcURL := fmt.Sprintf("http://%s/_gc", host)
req, err := http.NewRequest("POST", gcURL, nil)
if err != nil {
return nil, err
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(resp.Body)
var gc gcResponse
err = json.Unmarshal(buf.Bytes(), &gc)
if err != nil {
return nil, err
}
return &gc, nil
}
//Ping returns status and maxrss from _ping endpoint
func Ping(host string) (*PingResponse, error) {
client := &http.Client{}
pingURL := fmt.Sprintf("http://%s/_ping", host)
req, err := http.NewRequest("GET", pingURL, nil)
if err != nil {
return nil, err
}
//req.SetBasicAuth(userName, userPass)
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(resp.Body)
var p PingResponse
err = json.Unmarshal(buf.Bytes(), &p)
if err != nil {
return nil, err
}
return &p, nil
} | host: host,
timeout: timeout,
resourceTimeout: resourceTimeout,
wait: wait,
} | random_line_split |
splash.go | package splash
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
neturl "net/url"
"os"
"strings"
"time"
"github.com/pquerna/cachecontrol/cacheobject"
"github.com/spf13/viper"
"github.com/slotix/dataflowkit/errs"
)
var logger *log.Logger
func init() {
viper.AutomaticEnv() // read in environment variables that match
logger = log.New(os.Stdout, "splash: ", log.Lshortfile)
}
type splashConn struct {
host string //splash server address
//password string
//Splash parameters:
timeout int
resourceTimeout int
wait float64
}
//NewSplashConn creates new connection to Splash Server
func NewSplashConn(host string, timeout, resourceTimeout int, wait float64) splashConn {
return splashConn{
host: host,
timeout: timeout,
resourceTimeout: resourceTimeout,
wait: wait,
}
}
//GenerateSplashURL Generates Splash URL and return error
func (s *splashConn) GenerateSplashURL(req Request) string {
/*
//"Set-Cookie" from response headers should be sent when accessing for the same domain second time
cookie := `PHPSESSID=ef75e2737a14b06a2749d0b73840354f; path=/; domain=.acer-a500.ru; HttpOnly
dle_user_id=deleted; expires=Thu, 01-Jan-1970 00:00:01 GMT; Max-Age=0; path=/; domain=.acer-a500.ru; httponly
dle_password=deleted; expires=Thu, 01-Jan-1970 00:00:01 GMT; Max-Age=0; path=/; domain=.acer-a500.ru; httponly
dle_hash=deleted; expires=Thu, 01-Jan-1970 00:00:01 GMT; Max-Age=0; path=/; domain=.acer-a500.ru; httponly
dle_forum_sessions=ef75e2737a14b06a2749d0b73840354f; expires=Wed, 06-Jun-2018 19:13:00 GMT; Max-Age=31536000; path=/; domain=.acer-a500.ru; httponly
forum_last=1496801580; expires=Wed, 06-Jun-2018 19:13:00 GMT; Max-Age=31536000; path=/; domain=.acer-a500.ru; httponly`
//cookie := ""
req.Cookies, err = generateCookie(cookie)
if err != nil {
logger.Println(err)
}
//logger.Println(req.Cookies)
//---------
*/
//req.Params = `"auth_key=880ea6a14ea49e853634fbdc5015a024&referer=http%3A%2F%2Fdiesel.elcat.kg%2F&ips_username=dm_&ips_password=asfwwe!444D&rememberMe=1"`
var LUAScript string
if isRobotsTxt(req.URL) {
LUAScript = robotsLUA
} else {
LUAScript = baseLUA
}
splashURL := fmt.Sprintf(
"http://%s/execute?url=%s&timeout=%d&resource_timeout=%d&wait=%.1f&cookies=%s&formdata=%s&lua_source=%s",
s.host,
neturl.QueryEscape(req.URL),
s.timeout,
s.resourceTimeout,
s.wait,
neturl.QueryEscape(req.Cookies),
neturl.QueryEscape(paramsToLuaTable(req.Params)),
neturl.QueryEscape(LUAScript))
return splashURL
}
//GetResponse result is passed to caching middleware
//to provide a RFC7234 compliant HTTP cache
func GetResponse(req Request) (*Response, error) {
sConn := NewSplashConn(
viper.GetString("SPLASH"),
viper.GetInt("SPLASH_TIMEOUT"),
viper.GetInt("SPLASH_RESOURCE_TIMEOUT"),
viper.GetFloat64("SPLASH_WAIT"),
)
splashURL := sConn.GenerateSplashURL(req)
client := &http.Client{}
request, err := http.NewRequest("GET", splashURL, nil)
//req.SetBasicAuth(s.user, s.password)
resp, err := client.Do(request)
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
return nil, err
}
res, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
//response from Splash service.
statusCode := resp.StatusCode
if statusCode != 200 {
switch statusCode {
case 504:
return nil, &errs.GatewayTimeout{}
default:
return nil, fmt.Errorf(string(res))
}
}
var sResponse Response
if err := json.Unmarshal(res, &sResponse); err != nil {
logger.Println("Json Unmarshall error", err)
}
//if response status code is not 200
if sResponse.Error != "" {
switch sResponse.Error {
case "http404":
return nil, &errs.NotFound{sResponse.URL}
case "http403":
return nil, &errs.Forbidden{sResponse.URL}
case "network3":
return nil, &errs.InvalidHost{sResponse.URL}
default:
return nil, &errs.Error{sResponse.Error}
}
//return nil, fmt.Errorf("%s", sResponse.Error)
}
// Sometimes no response, request returned by Splash.
// gc (garbage collection) method should be called to clear WebKit caches and then
// GetResponse again. See more at https://github.com/scrapinghub/splash/issues/613
if sResponse.Response == nil || sResponse.Request == nil && sResponse.HTML != "" {
var response *Response
gcResponse, err := gc(viper.GetString("SPLASH"))
if err == nil && gcResponse.Status == "ok" {
response, err = GetResponse(req)
if err != nil {
return nil, err
}
}
return response, nil
}
if !sResponse.Response.Ok {
if sResponse.Response.Status == 0 {
err = fmt.Errorf("%s",
//sResponse.Error)
sResponse.Response.StatusText)
} else {
err = fmt.Errorf("%d. %s",
sResponse.Response.Status,
sResponse.Response.StatusText)
}
return nil, err
}
//if cacheable ?
rv := sResponse.cacheable()
//logger.Println(rv.OutReasons)
//logger.Println(rv.OutWarnings)
//logger.Println(rv.OutExpirationTime)
if len(rv.OutReasons) == 0 {
sResponse.Cacheable = true
}
return &sResponse, nil
}
func (r *Response) | () (io.ReadCloser, error) {
if r == nil {
return nil, errors.New("empty response")
}
if isRobotsTxt(r.Request.URL) {
decoded, err := base64.StdEncoding.DecodeString(r.Response.Content.Text)
if err != nil {
logger.Println("decode error:", err)
return nil, err
}
readCloser := ioutil.NopCloser(bytes.NewReader(decoded))
return readCloser, nil
}
readCloser := ioutil.NopCloser(strings.NewReader(r.HTML))
return readCloser, nil
}
//cacheable check if resource is cacheable
func (r *Response) cacheable() (rv cacheobject.ObjectResults) {
respHeader := r.Response.Headers.(http.Header)
reqHeader := r.Request.Headers.(http.Header)
// respHeader := r.Response.castHeaders()
// reqHeader := r.Request.castHeaders()
reqDir, err := cacheobject.ParseRequestCacheControl(reqHeader.Get("Cache-Control"))
if err != nil {
logger.Printf(err.Error())
}
resDir, err := cacheobject.ParseResponseCacheControl(respHeader.Get("Cache-Control"))
if err != nil {
logger.Printf(err.Error())
}
//logger.Println(respHeader)
expiresHeader, _ := http.ParseTime(respHeader.Get("Expires"))
dateHeader, _ := http.ParseTime(respHeader.Get("Date"))
lastModifiedHeader, _ := http.ParseTime(respHeader.Get("Last-Modified"))
obj := cacheobject.Object{
// CacheIsPrivate: false,
RespDirectives: resDir,
RespHeaders: respHeader,
RespStatusCode: r.Response.Status,
RespExpiresHeader: expiresHeader,
RespDateHeader: dateHeader,
RespLastModifiedHeader: lastModifiedHeader,
ReqDirectives: reqDir,
ReqHeaders: reqHeader,
ReqMethod: r.Request.Method,
NowUTC: time.Now().UTC(),
}
rv = cacheobject.ObjectResults{}
cacheobject.CachableObject(&obj, &rv)
cacheobject.ExpirationObject(&obj, &rv)
//Check if it is cacheable
expTime := rv.OutExpirationTime.Unix()
if rv.OutExpirationTime.IsZero() {
expTime = 0
}
r.CacheExpirationTime = expTime
debug := false
if debug {
if rv.OutErr != nil {
logger.Println("Errors: ", rv.OutErr)
}
if rv.OutReasons != nil {
logger.Println("Reasons to not cache: ", rv.OutReasons)
}
if rv.OutWarnings != nil {
logger.Println("Warning headers to add: ", rv.OutWarnings)
}
logger.Println("Expiration: ", rv.OutExpirationTime.String())
}
return rv
}
//Fetch content from url through Splash server https://github.com/scrapinghub/splash/
func Fetch(req Request) (io.ReadCloser, error) {
//logger.Println(splashURL)
response, err := GetResponse(req)
if err != nil {
return nil, err
}
logger.Println(err)
content, err := response.GetContent()
if err == nil {
return content, nil
}
return nil, err
}
func (r Request) GetURL() string {
return r.URL
}
func isRobotsTxt(url string) bool {
if strings.HasSuffix(url, "robots.txt") {
return true
}
return false
}
//http://choly.ca/post/go-json-marshalling/
//UnmarshalJSON convert headers to http.Header type
func (r *Response) UnmarshalJSON(data []byte) error {
type Alias Response
aux := &struct {
Headers interface{} `json:"headers"`
*Alias
}{
Alias: (*Alias)(r),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
if r.Request != nil {
r.Request.Headers = castHeaders(r.Request.Headers)
}
if r.Response != nil {
r.Response.Headers = castHeaders(r.Response.Headers)
}
return nil
}
//castHeaders serves for casting headers returned by Splash to standard http.Header type
func castHeaders(splashHeaders interface{}) (header http.Header) {
header = make(map[string][]string)
switch splashHeaders.(type) {
case []interface{}:
for _, h := range splashHeaders.([]interface{}) {
//var str []string
str := []string{}
v, ok := h.(map[string]interface{})["value"].(string)
if ok {
str = append(str, v)
header[h.(map[string]interface{})["name"].(string)] = str
}
}
return header
case map[string]interface{}:
for k, v := range splashHeaders.(map[string]interface{}) {
var str []string
for _, vv := range v.([]interface{}) {
str = append(str, vv.(string))
}
header[k] = str
}
return header
default:
return nil
}
}
//Splash splash:history() may return empty list as it cannot retrieve cached results
//in case request the same page twice. So the only solution for the moment is to call
//http://localhost:8050/_gc if an error occures. It runs the Python garbage collector
//and clears internal WebKit caches. See more at https://github.com/scrapinghub/splash/issues/613
func gc(host string) (*gcResponse, error) {
client := &http.Client{}
gcURL := fmt.Sprintf("http://%s/_gc", host)
req, err := http.NewRequest("POST", gcURL, nil)
if err != nil {
return nil, err
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(resp.Body)
var gc gcResponse
err = json.Unmarshal(buf.Bytes(), &gc)
if err != nil {
return nil, err
}
return &gc, nil
}
//Ping returns status and maxrss from _ping endpoint
func Ping(host string) (*PingResponse, error) {
client := &http.Client{}
pingURL := fmt.Sprintf("http://%s/_ping", host)
req, err := http.NewRequest("GET", pingURL, nil)
if err != nil {
return nil, err
}
//req.SetBasicAuth(userName, userPass)
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(resp.Body)
var p PingResponse
err = json.Unmarshal(buf.Bytes(), &p)
if err != nil {
return nil, err
}
return &p, nil
}
| GetContent | identifier_name |
splash.go | package splash
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
neturl "net/url"
"os"
"strings"
"time"
"github.com/pquerna/cachecontrol/cacheobject"
"github.com/spf13/viper"
"github.com/slotix/dataflowkit/errs"
)
var logger *log.Logger
func init() {
viper.AutomaticEnv() // read in environment variables that match
logger = log.New(os.Stdout, "splash: ", log.Lshortfile)
}
type splashConn struct {
host string //splash server address
//password string
//Splash parameters:
timeout int
resourceTimeout int
wait float64
}
//NewSplashConn creates new connection to Splash Server
func NewSplashConn(host string, timeout, resourceTimeout int, wait float64) splashConn {
return splashConn{
host: host,
timeout: timeout,
resourceTimeout: resourceTimeout,
wait: wait,
}
}
//GenerateSplashURL Generates Splash URL and return error
func (s *splashConn) GenerateSplashURL(req Request) string {
/*
//"Set-Cookie" from response headers should be sent when accessing for the same domain second time
cookie := `PHPSESSID=ef75e2737a14b06a2749d0b73840354f; path=/; domain=.acer-a500.ru; HttpOnly
dle_user_id=deleted; expires=Thu, 01-Jan-1970 00:00:01 GMT; Max-Age=0; path=/; domain=.acer-a500.ru; httponly
dle_password=deleted; expires=Thu, 01-Jan-1970 00:00:01 GMT; Max-Age=0; path=/; domain=.acer-a500.ru; httponly
dle_hash=deleted; expires=Thu, 01-Jan-1970 00:00:01 GMT; Max-Age=0; path=/; domain=.acer-a500.ru; httponly
dle_forum_sessions=ef75e2737a14b06a2749d0b73840354f; expires=Wed, 06-Jun-2018 19:13:00 GMT; Max-Age=31536000; path=/; domain=.acer-a500.ru; httponly
forum_last=1496801580; expires=Wed, 06-Jun-2018 19:13:00 GMT; Max-Age=31536000; path=/; domain=.acer-a500.ru; httponly`
//cookie := ""
req.Cookies, err = generateCookie(cookie)
if err != nil {
logger.Println(err)
}
//logger.Println(req.Cookies)
//---------
*/
//req.Params = `"auth_key=880ea6a14ea49e853634fbdc5015a024&referer=http%3A%2F%2Fdiesel.elcat.kg%2F&ips_username=dm_&ips_password=asfwwe!444D&rememberMe=1"`
var LUAScript string
if isRobotsTxt(req.URL) | else {
LUAScript = baseLUA
}
splashURL := fmt.Sprintf(
"http://%s/execute?url=%s&timeout=%d&resource_timeout=%d&wait=%.1f&cookies=%s&formdata=%s&lua_source=%s",
s.host,
neturl.QueryEscape(req.URL),
s.timeout,
s.resourceTimeout,
s.wait,
neturl.QueryEscape(req.Cookies),
neturl.QueryEscape(paramsToLuaTable(req.Params)),
neturl.QueryEscape(LUAScript))
return splashURL
}
//GetResponse result is passed to caching middleware
//to provide a RFC7234 compliant HTTP cache
func GetResponse(req Request) (*Response, error) {
sConn := NewSplashConn(
viper.GetString("SPLASH"),
viper.GetInt("SPLASH_TIMEOUT"),
viper.GetInt("SPLASH_RESOURCE_TIMEOUT"),
viper.GetFloat64("SPLASH_WAIT"),
)
splashURL := sConn.GenerateSplashURL(req)
client := &http.Client{}
request, err := http.NewRequest("GET", splashURL, nil)
//req.SetBasicAuth(s.user, s.password)
resp, err := client.Do(request)
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
return nil, err
}
res, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
//response from Splash service.
statusCode := resp.StatusCode
if statusCode != 200 {
switch statusCode {
case 504:
return nil, &errs.GatewayTimeout{}
default:
return nil, fmt.Errorf(string(res))
}
}
var sResponse Response
if err := json.Unmarshal(res, &sResponse); err != nil {
logger.Println("Json Unmarshall error", err)
}
//if response status code is not 200
if sResponse.Error != "" {
switch sResponse.Error {
case "http404":
return nil, &errs.NotFound{sResponse.URL}
case "http403":
return nil, &errs.Forbidden{sResponse.URL}
case "network3":
return nil, &errs.InvalidHost{sResponse.URL}
default:
return nil, &errs.Error{sResponse.Error}
}
//return nil, fmt.Errorf("%s", sResponse.Error)
}
// Sometimes no response, request returned by Splash.
// gc (garbage collection) method should be called to clear WebKit caches and then
// GetResponse again. See more at https://github.com/scrapinghub/splash/issues/613
if sResponse.Response == nil || sResponse.Request == nil && sResponse.HTML != "" {
var response *Response
gcResponse, err := gc(viper.GetString("SPLASH"))
if err == nil && gcResponse.Status == "ok" {
response, err = GetResponse(req)
if err != nil {
return nil, err
}
}
return response, nil
}
if !sResponse.Response.Ok {
if sResponse.Response.Status == 0 {
err = fmt.Errorf("%s",
//sResponse.Error)
sResponse.Response.StatusText)
} else {
err = fmt.Errorf("%d. %s",
sResponse.Response.Status,
sResponse.Response.StatusText)
}
return nil, err
}
//if cacheable ?
rv := sResponse.cacheable()
//logger.Println(rv.OutReasons)
//logger.Println(rv.OutWarnings)
//logger.Println(rv.OutExpirationTime)
if len(rv.OutReasons) == 0 {
sResponse.Cacheable = true
}
return &sResponse, nil
}
func (r *Response) GetContent() (io.ReadCloser, error) {
if r == nil {
return nil, errors.New("empty response")
}
if isRobotsTxt(r.Request.URL) {
decoded, err := base64.StdEncoding.DecodeString(r.Response.Content.Text)
if err != nil {
logger.Println("decode error:", err)
return nil, err
}
readCloser := ioutil.NopCloser(bytes.NewReader(decoded))
return readCloser, nil
}
readCloser := ioutil.NopCloser(strings.NewReader(r.HTML))
return readCloser, nil
}
//cacheable check if resource is cacheable
func (r *Response) cacheable() (rv cacheobject.ObjectResults) {
respHeader := r.Response.Headers.(http.Header)
reqHeader := r.Request.Headers.(http.Header)
// respHeader := r.Response.castHeaders()
// reqHeader := r.Request.castHeaders()
reqDir, err := cacheobject.ParseRequestCacheControl(reqHeader.Get("Cache-Control"))
if err != nil {
logger.Printf(err.Error())
}
resDir, err := cacheobject.ParseResponseCacheControl(respHeader.Get("Cache-Control"))
if err != nil {
logger.Printf(err.Error())
}
//logger.Println(respHeader)
expiresHeader, _ := http.ParseTime(respHeader.Get("Expires"))
dateHeader, _ := http.ParseTime(respHeader.Get("Date"))
lastModifiedHeader, _ := http.ParseTime(respHeader.Get("Last-Modified"))
obj := cacheobject.Object{
// CacheIsPrivate: false,
RespDirectives: resDir,
RespHeaders: respHeader,
RespStatusCode: r.Response.Status,
RespExpiresHeader: expiresHeader,
RespDateHeader: dateHeader,
RespLastModifiedHeader: lastModifiedHeader,
ReqDirectives: reqDir,
ReqHeaders: reqHeader,
ReqMethod: r.Request.Method,
NowUTC: time.Now().UTC(),
}
rv = cacheobject.ObjectResults{}
cacheobject.CachableObject(&obj, &rv)
cacheobject.ExpirationObject(&obj, &rv)
//Check if it is cacheable
expTime := rv.OutExpirationTime.Unix()
if rv.OutExpirationTime.IsZero() {
expTime = 0
}
r.CacheExpirationTime = expTime
debug := false
if debug {
if rv.OutErr != nil {
logger.Println("Errors: ", rv.OutErr)
}
if rv.OutReasons != nil {
logger.Println("Reasons to not cache: ", rv.OutReasons)
}
if rv.OutWarnings != nil {
logger.Println("Warning headers to add: ", rv.OutWarnings)
}
logger.Println("Expiration: ", rv.OutExpirationTime.String())
}
return rv
}
//Fetch content from url through Splash server https://github.com/scrapinghub/splash/
func Fetch(req Request) (io.ReadCloser, error) {
//logger.Println(splashURL)
response, err := GetResponse(req)
if err != nil {
return nil, err
}
logger.Println(err)
content, err := response.GetContent()
if err == nil {
return content, nil
}
return nil, err
}
func (r Request) GetURL() string {
return r.URL
}
func isRobotsTxt(url string) bool {
if strings.HasSuffix(url, "robots.txt") {
return true
}
return false
}
//http://choly.ca/post/go-json-marshalling/
//UnmarshalJSON convert headers to http.Header type
func (r *Response) UnmarshalJSON(data []byte) error {
type Alias Response
aux := &struct {
Headers interface{} `json:"headers"`
*Alias
}{
Alias: (*Alias)(r),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
if r.Request != nil {
r.Request.Headers = castHeaders(r.Request.Headers)
}
if r.Response != nil {
r.Response.Headers = castHeaders(r.Response.Headers)
}
return nil
}
//castHeaders serves for casting headers returned by Splash to standard http.Header type
func castHeaders(splashHeaders interface{}) (header http.Header) {
header = make(map[string][]string)
switch splashHeaders.(type) {
case []interface{}:
for _, h := range splashHeaders.([]interface{}) {
//var str []string
str := []string{}
v, ok := h.(map[string]interface{})["value"].(string)
if ok {
str = append(str, v)
header[h.(map[string]interface{})["name"].(string)] = str
}
}
return header
case map[string]interface{}:
for k, v := range splashHeaders.(map[string]interface{}) {
var str []string
for _, vv := range v.([]interface{}) {
str = append(str, vv.(string))
}
header[k] = str
}
return header
default:
return nil
}
}
//Splash splash:history() may return empty list as it cannot retrieve cached results
//in case request the same page twice. So the only solution for the moment is to call
//http://localhost:8050/_gc if an error occures. It runs the Python garbage collector
//and clears internal WebKit caches. See more at https://github.com/scrapinghub/splash/issues/613
func gc(host string) (*gcResponse, error) {
client := &http.Client{}
gcURL := fmt.Sprintf("http://%s/_gc", host)
req, err := http.NewRequest("POST", gcURL, nil)
if err != nil {
return nil, err
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(resp.Body)
var gc gcResponse
err = json.Unmarshal(buf.Bytes(), &gc)
if err != nil {
return nil, err
}
return &gc, nil
}
//Ping returns status and maxrss from _ping endpoint
func Ping(host string) (*PingResponse, error) {
client := &http.Client{}
pingURL := fmt.Sprintf("http://%s/_ping", host)
req, err := http.NewRequest("GET", pingURL, nil)
if err != nil {
return nil, err
}
//req.SetBasicAuth(userName, userPass)
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(resp.Body)
var p PingResponse
err = json.Unmarshal(buf.Bytes(), &p)
if err != nil {
return nil, err
}
return &p, nil
}
| {
LUAScript = robotsLUA
} | conditional_block |
splash.go | package splash
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
neturl "net/url"
"os"
"strings"
"time"
"github.com/pquerna/cachecontrol/cacheobject"
"github.com/spf13/viper"
"github.com/slotix/dataflowkit/errs"
)
var logger *log.Logger
func init() {
viper.AutomaticEnv() // read in environment variables that match
logger = log.New(os.Stdout, "splash: ", log.Lshortfile)
}
type splashConn struct {
host string //splash server address
//password string
//Splash parameters:
timeout int
resourceTimeout int
wait float64
}
//NewSplashConn creates new connection to Splash Server
func NewSplashConn(host string, timeout, resourceTimeout int, wait float64) splashConn {
return splashConn{
host: host,
timeout: timeout,
resourceTimeout: resourceTimeout,
wait: wait,
}
}
//GenerateSplashURL Generates Splash URL and return error
func (s *splashConn) GenerateSplashURL(req Request) string {
/*
//"Set-Cookie" from response headers should be sent when accessing for the same domain second time
cookie := `PHPSESSID=ef75e2737a14b06a2749d0b73840354f; path=/; domain=.acer-a500.ru; HttpOnly
dle_user_id=deleted; expires=Thu, 01-Jan-1970 00:00:01 GMT; Max-Age=0; path=/; domain=.acer-a500.ru; httponly
dle_password=deleted; expires=Thu, 01-Jan-1970 00:00:01 GMT; Max-Age=0; path=/; domain=.acer-a500.ru; httponly
dle_hash=deleted; expires=Thu, 01-Jan-1970 00:00:01 GMT; Max-Age=0; path=/; domain=.acer-a500.ru; httponly
dle_forum_sessions=ef75e2737a14b06a2749d0b73840354f; expires=Wed, 06-Jun-2018 19:13:00 GMT; Max-Age=31536000; path=/; domain=.acer-a500.ru; httponly
forum_last=1496801580; expires=Wed, 06-Jun-2018 19:13:00 GMT; Max-Age=31536000; path=/; domain=.acer-a500.ru; httponly`
//cookie := ""
req.Cookies, err = generateCookie(cookie)
if err != nil {
logger.Println(err)
}
//logger.Println(req.Cookies)
//---------
*/
//req.Params = `"auth_key=880ea6a14ea49e853634fbdc5015a024&referer=http%3A%2F%2Fdiesel.elcat.kg%2F&ips_username=dm_&ips_password=asfwwe!444D&rememberMe=1"`
var LUAScript string
if isRobotsTxt(req.URL) {
LUAScript = robotsLUA
} else {
LUAScript = baseLUA
}
splashURL := fmt.Sprintf(
"http://%s/execute?url=%s&timeout=%d&resource_timeout=%d&wait=%.1f&cookies=%s&formdata=%s&lua_source=%s",
s.host,
neturl.QueryEscape(req.URL),
s.timeout,
s.resourceTimeout,
s.wait,
neturl.QueryEscape(req.Cookies),
neturl.QueryEscape(paramsToLuaTable(req.Params)),
neturl.QueryEscape(LUAScript))
return splashURL
}
//GetResponse result is passed to caching middleware
//to provide a RFC7234 compliant HTTP cache
func GetResponse(req Request) (*Response, error) {
sConn := NewSplashConn(
viper.GetString("SPLASH"),
viper.GetInt("SPLASH_TIMEOUT"),
viper.GetInt("SPLASH_RESOURCE_TIMEOUT"),
viper.GetFloat64("SPLASH_WAIT"),
)
splashURL := sConn.GenerateSplashURL(req)
client := &http.Client{}
request, err := http.NewRequest("GET", splashURL, nil)
//req.SetBasicAuth(s.user, s.password)
resp, err := client.Do(request)
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
return nil, err
}
res, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
//response from Splash service.
statusCode := resp.StatusCode
if statusCode != 200 {
switch statusCode {
case 504:
return nil, &errs.GatewayTimeout{}
default:
return nil, fmt.Errorf(string(res))
}
}
var sResponse Response
if err := json.Unmarshal(res, &sResponse); err != nil {
logger.Println("Json Unmarshall error", err)
}
//if response status code is not 200
if sResponse.Error != "" {
switch sResponse.Error {
case "http404":
return nil, &errs.NotFound{sResponse.URL}
case "http403":
return nil, &errs.Forbidden{sResponse.URL}
case "network3":
return nil, &errs.InvalidHost{sResponse.URL}
default:
return nil, &errs.Error{sResponse.Error}
}
//return nil, fmt.Errorf("%s", sResponse.Error)
}
// Sometimes no response, request returned by Splash.
// gc (garbage collection) method should be called to clear WebKit caches and then
// GetResponse again. See more at https://github.com/scrapinghub/splash/issues/613
if sResponse.Response == nil || sResponse.Request == nil && sResponse.HTML != "" {
var response *Response
gcResponse, err := gc(viper.GetString("SPLASH"))
if err == nil && gcResponse.Status == "ok" {
response, err = GetResponse(req)
if err != nil {
return nil, err
}
}
return response, nil
}
if !sResponse.Response.Ok {
if sResponse.Response.Status == 0 {
err = fmt.Errorf("%s",
//sResponse.Error)
sResponse.Response.StatusText)
} else {
err = fmt.Errorf("%d. %s",
sResponse.Response.Status,
sResponse.Response.StatusText)
}
return nil, err
}
//if cacheable ?
rv := sResponse.cacheable()
//logger.Println(rv.OutReasons)
//logger.Println(rv.OutWarnings)
//logger.Println(rv.OutExpirationTime)
if len(rv.OutReasons) == 0 {
sResponse.Cacheable = true
}
return &sResponse, nil
}
func (r *Response) GetContent() (io.ReadCloser, error) {
if r == nil {
return nil, errors.New("empty response")
}
if isRobotsTxt(r.Request.URL) {
decoded, err := base64.StdEncoding.DecodeString(r.Response.Content.Text)
if err != nil {
logger.Println("decode error:", err)
return nil, err
}
readCloser := ioutil.NopCloser(bytes.NewReader(decoded))
return readCloser, nil
}
readCloser := ioutil.NopCloser(strings.NewReader(r.HTML))
return readCloser, nil
}
//cacheable check if resource is cacheable
func (r *Response) cacheable() (rv cacheobject.ObjectResults) |
//Fetch content from url through Splash server https://github.com/scrapinghub/splash/
func Fetch(req Request) (io.ReadCloser, error) {
//logger.Println(splashURL)
response, err := GetResponse(req)
if err != nil {
return nil, err
}
logger.Println(err)
content, err := response.GetContent()
if err == nil {
return content, nil
}
return nil, err
}
func (r Request) GetURL() string {
return r.URL
}
func isRobotsTxt(url string) bool {
if strings.HasSuffix(url, "robots.txt") {
return true
}
return false
}
//http://choly.ca/post/go-json-marshalling/
//UnmarshalJSON convert headers to http.Header type
func (r *Response) UnmarshalJSON(data []byte) error {
type Alias Response
aux := &struct {
Headers interface{} `json:"headers"`
*Alias
}{
Alias: (*Alias)(r),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
if r.Request != nil {
r.Request.Headers = castHeaders(r.Request.Headers)
}
if r.Response != nil {
r.Response.Headers = castHeaders(r.Response.Headers)
}
return nil
}
//castHeaders serves for casting headers returned by Splash to standard http.Header type
func castHeaders(splashHeaders interface{}) (header http.Header) {
header = make(map[string][]string)
switch splashHeaders.(type) {
case []interface{}:
for _, h := range splashHeaders.([]interface{}) {
//var str []string
str := []string{}
v, ok := h.(map[string]interface{})["value"].(string)
if ok {
str = append(str, v)
header[h.(map[string]interface{})["name"].(string)] = str
}
}
return header
case map[string]interface{}:
for k, v := range splashHeaders.(map[string]interface{}) {
var str []string
for _, vv := range v.([]interface{}) {
str = append(str, vv.(string))
}
header[k] = str
}
return header
default:
return nil
}
}
//Splash splash:history() may return empty list as it cannot retrieve cached results
//in case request the same page twice. So the only solution for the moment is to call
//http://localhost:8050/_gc if an error occures. It runs the Python garbage collector
//and clears internal WebKit caches. See more at https://github.com/scrapinghub/splash/issues/613
func gc(host string) (*gcResponse, error) {
client := &http.Client{}
gcURL := fmt.Sprintf("http://%s/_gc", host)
req, err := http.NewRequest("POST", gcURL, nil)
if err != nil {
return nil, err
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(resp.Body)
var gc gcResponse
err = json.Unmarshal(buf.Bytes(), &gc)
if err != nil {
return nil, err
}
return &gc, nil
}
//Ping returns status and maxrss from _ping endpoint
func Ping(host string) (*PingResponse, error) {
client := &http.Client{}
pingURL := fmt.Sprintf("http://%s/_ping", host)
req, err := http.NewRequest("GET", pingURL, nil)
if err != nil {
return nil, err
}
//req.SetBasicAuth(userName, userPass)
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(resp.Body)
var p PingResponse
err = json.Unmarshal(buf.Bytes(), &p)
if err != nil {
return nil, err
}
return &p, nil
}
| {
respHeader := r.Response.Headers.(http.Header)
reqHeader := r.Request.Headers.(http.Header)
// respHeader := r.Response.castHeaders()
// reqHeader := r.Request.castHeaders()
reqDir, err := cacheobject.ParseRequestCacheControl(reqHeader.Get("Cache-Control"))
if err != nil {
logger.Printf(err.Error())
}
resDir, err := cacheobject.ParseResponseCacheControl(respHeader.Get("Cache-Control"))
if err != nil {
logger.Printf(err.Error())
}
//logger.Println(respHeader)
expiresHeader, _ := http.ParseTime(respHeader.Get("Expires"))
dateHeader, _ := http.ParseTime(respHeader.Get("Date"))
lastModifiedHeader, _ := http.ParseTime(respHeader.Get("Last-Modified"))
obj := cacheobject.Object{
// CacheIsPrivate: false,
RespDirectives: resDir,
RespHeaders: respHeader,
RespStatusCode: r.Response.Status,
RespExpiresHeader: expiresHeader,
RespDateHeader: dateHeader,
RespLastModifiedHeader: lastModifiedHeader,
ReqDirectives: reqDir,
ReqHeaders: reqHeader,
ReqMethod: r.Request.Method,
NowUTC: time.Now().UTC(),
}
rv = cacheobject.ObjectResults{}
cacheobject.CachableObject(&obj, &rv)
cacheobject.ExpirationObject(&obj, &rv)
//Check if it is cacheable
expTime := rv.OutExpirationTime.Unix()
if rv.OutExpirationTime.IsZero() {
expTime = 0
}
r.CacheExpirationTime = expTime
debug := false
if debug {
if rv.OutErr != nil {
logger.Println("Errors: ", rv.OutErr)
}
if rv.OutReasons != nil {
logger.Println("Reasons to not cache: ", rv.OutReasons)
}
if rv.OutWarnings != nil {
logger.Println("Warning headers to add: ", rv.OutWarnings)
}
logger.Println("Expiration: ", rv.OutExpirationTime.String())
}
return rv
} | identifier_body |
main.go | package main
import (
"context"
"crypto/rand"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"regexp"
"strconv"
"strings"
"time"
"github.com/andygrunwald/go-gerrit"
"github.com/google/go-github/github"
"github.com/gregjones/httpcache"
git "github.com/libgit2/git2go/v31"
"golang.org/x/oauth2"
)
var isDryRun bool = true
const BOT_IDENT_TAG = "::SDKBOT/PR"
func RedactEmail(email string) string {
parts := strings.Split(email, "@")
if len(parts) != 2 {
return "REDACTION FAILED"
}
user := parts[0]
domain := parts[1]
userAllow := float32(len(user)) * 0.6
domainAllow := float32(len(domain)) * 0.7
userAllowSide := int(userAllow / 2)
domainAllowSide := int(domainAllow / 2)
userRightStart := len(user) - userAllowSide
domainRightStart := len(domain) - domainAllowSide
user = user[0:userAllowSide] + strings.Repeat("*", userRightStart-userAllowSide) + user[userRightStart:]
domain = domain[0:domainAllowSide] + strings.Repeat("*", domainRightStart-domainAllowSide) + domain[domainRightStart:]
return user + "@" + domain
}
func RandomChangeId() string {
b := make([]byte, sha1.Size)
rand.Read(b)
encData := sha1.Sum(b)
return "I" + hex.EncodeToString(encData[:])
}
type subError struct {
msg string
err error
}
func (e subError) Error() string {
if e.err != nil {
return e.msg + ": " + e.err.Error()
} else {
return e.msg
}
}
func makeErr(msg string, err error) error {
return subError{msg, err}
}
func SquashHead(repo *git.Repository, squashCount int, mergeCommitTitle, changeId string) error {
log.Printf("Generating squash commit for `HEAD~0` to `HEAD~%d`", squashCount)
headRef, err := repo.Head()
if err != nil {
return makeErr("failed to get head reference", err)
}
topCommitId := headRef.Target()
topCommit, err := repo.LookupCommit(topCommitId)
if err != nil {
return makeErr("failed to locate head commit", nil)
}
var baseCommit *git.Commit
var squashCommits []*git.Commit
{
curCommit := topCommit
for i := 0; i < squashCount; i++ {
squashCommits = append(squashCommits, curCommit)
curCommit = curCommit.Parent(0)
}
baseCommit = curCommit
}
log.Printf("Base Commit is `%s`", baseCommit.Id().String())
var newCommitAuthor *git.Signature
var newCommitCommitter *git.Signature
var newCommitMsg string
if len(squashCommits) == 1 {
newCommitAuthor = squashCommits[0].Author()
newCommitCommitter = squashCommits[0].Committer()
newCommitMsg = strings.TrimSpace(squashCommits[0].Message()) + "\n"
} else {
newCommitMsg = ""
newCommitMsg += mergeCommitTitle + "\n\n"
for i := 0; i < len(squashCommits); i++ {
curCommit := squashCommits[i]
newCommitMsg += "----\n"
newCommitMsg += strings.TrimSpace(curCommit.Message()) + "\n"
newCommitAuthor = curCommit.Author()
newCommitCommitter = curCommit.Committer()
}
}
newCommitMsg += "\nChange-Id: " + changeId
err = repo.ResetToCommit(baseCommit, git.ResetSoft, nil)
if err != nil {
return makeErr("failed to reset to base commit", err)
}
idx, err := repo.Index()
if err != nil {
return makeErr("failed to retrieve repo index", err)
}
err = idx.Write()
if err != nil {
return makeErr("failed to write squash index", err)
}
newCommitTreeId, err := idx.WriteTree()
if err != nil {
return makeErr("failed to write squash tree", err)
}
newCommitTree, err := repo.LookupTree(newCommitTreeId)
if err != nil {
return makeErr("failed to find created squash tree", err)
}
log.Printf("Generated new commit message:\n%s", newCommitMsg)
_, err = repo.CreateCommit("HEAD", newCommitAuthor, newCommitCommitter, newCommitMsg, newCommitTree, baseCommit)
if err != nil {
return makeErr("failed to generate squash commit", err)
}
return nil
}
type CsStateInfo struct {
ChangeNum int
ChangeId string
Status string
CurrentSha1 string
}
func GetChangesetState(owner, repo string, prnum int) (*CsStateInfo, error) {
log.Printf("Retrieving change set for %s/%s/%d", owner, repo, prnum)
path := fmt.Sprintf("github.com/%s/%s/pull/%d", owner, repo, prnum)
changes, _, err := gerritClient.Changes.QueryChanges(&gerrit.QueryChangeOptions{
QueryOptions: gerrit.QueryOptions{
Query: []string{path},
},
ChangeOptions: gerrit.ChangeOptions{
AdditionalFields: []string{"messages"},
},
})
if err != nil {
return nil, makeErr("failed to gerrit query for changes", err)
}
var foundChangeset *gerrit.ChangeInfo
for i := 0; i < len(*changes); i++ {
change := &(*changes)[i]
for j := 0; j < len(change.Messages); j++ {
if !strings.Contains(change.Messages[j].Message, BOT_IDENT_TAG) {
continue
}
if strings.Contains(change.Messages[j].Message, path) {
if foundChangeset != nil {
return nil, makeErr("found multiple possible changesets", nil)
}
foundChangeset = change
break
}
}
}
if foundChangeset == nil {
return nil, nil
}
commitMatcher, err := regexp.Compile("commit:([0-9a-zA-Z]+)")
if err != nil {
return nil, makeErr("failed to compile commit sha1 finding regexp", err)
}
var latestSha1 string
for i := 0; i < len(foundChangeset.Messages); i++ {
commitMatches := commitMatcher.FindStringSubmatch(foundChangeset.Messages[i].Message)
if len(commitMatches) == 2 {
latestSha1 = commitMatches[1]
}
}
return &CsStateInfo{
ChangeNum: foundChangeset.Number,
ChangeId: foundChangeset.ChangeID,
Status: foundChangeset.Status,
CurrentSha1: latestSha1,
}, nil
}
type RepoInfo struct {
Owner string
Name string
Repo string
}
var botOwners []string
var githubClient *github.Client
var githubUser string
var githubToken string
var gerritClient *gerrit.Client
var gerritHost string
var gerritUser string
var gerritPass string
var gerritPublicKey string
var gerritPrivateKey string
var gerritClaGroupName string
var allRepos []RepoInfo
func gerritGitCredentialsHandler(url string, username_from_url string, allowed_types git.CredType) (*git.Cred, error) {
creds, err := git.NewCredSshKey(gerritUser, gerritPublicKey, gerritPrivateKey, "")
return creds, err
}
func initGerritClient() error {
client, err := gerrit.NewClient("https://"+gerritHost+"/", nil)
if err != nil {
return makeErr("failed to create gerrit client", err)
}
client.Authentication.SetBasicAuth(gerritUser, gerritPass)
gerritClient = client
return nil
}
func initGitHubClient() error {
tx := httpcache.NewMemoryCacheTransport()
tc := &http.Client{
Transport: &oauth2.Transport{
Source: oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: githubToken},
),
Base: tx,
},
}
githubClient = github.NewClient(tc)
return nil
}
type PrStateInfo struct {
CurrentState string
LastUpdatedTime time.Time
CurrentSha1 string
NumOfCommits int
}
func IsGitHubUserBot(user *github.User) bool {
if user == nil || user.Login == nil {
return false
}
if *user.Login == githubUser {
return true
}
return false
}
func IsGitHubUserBotOwner(user *github.User) bool {
if user == nil || user.Login == nil {
return false
}
for i := 0; i < len(botOwners); i++ {
if *user.Login == botOwners[i] {
return true
}
}
return IsGitHubUserBot(user)
}
const (
BOTSTATE_NEW = ""
BOTSTATE_NO_CLA = "no_cla"
BOTSTATE_CREATED = "created"
BOTSTATE_UPDATED = "updated"
BOTSTATE_ABANDONED = "abandoned"
BOTSTATE_MERGED = "merged"
BOTSTATE_TIMEOUT = "timeout"
)
func GetPullRequestState(owner, repo string, prnum int) (*PrStateInfo, error) {
var parseableStateNames = []string{
BOTSTATE_NO_CLA,
BOTSTATE_CREATED,
BOTSTATE_UPDATED,
BOTSTATE_ABANDONED,
BOTSTATE_MERGED,
BOTSTATE_TIMEOUT,
// backwards compatibility names
"pushed",
"too_many_commits",
"closed",
}
log.Printf("Retrieving PR state for %s/%s/%d", owner, repo, prnum)
info, _, err := githubClient.PullRequests.Get(context.Background(), owner, repo, prnum)
if err != nil {
return nil, makeErr("failed to retieve pull request info", err)
}
comments, _, err := githubClient.Issues.ListComments(context.Background(), owner, repo, prnum, nil)
if err != nil {
return nil, makeErr("failed to retrieve pull request comments", err)
}
var lastStateTime time.Time
var lastStateName string
var lastUpdatedTime time.Time
for i := 0; i < len(comments); i++ {
if comments[i].CreatedAt.After(lastUpdatedTime) || comments[i].UpdatedAt.After(lastUpdatedTime) {
lastUpdatedTime = *comments[i].UpdatedAt
}
if !IsGitHubUserBotOwner(comments[i].User) {
continue
}
for j := 0; j < len(parseableStateNames); j++ {
if strings.Contains(*comments[i].Body, BOT_IDENT_TAG+":"+parseableStateNames[j]) {
if comments[i].CreatedAt.After(lastStateTime) {
lastStateName = parseableStateNames[j]
lastStateTime = *comments[i].CreatedAt
}
}
}
}
// For backwards compat...
if lastStateName == "too_many_commits" {
lastStateName = BOTSTATE_NEW
} else if lastStateName == "pushed" {
lastStateName = BOTSTATE_UPDATED
} else if lastStateName == "closed" {
lastStateName = BOTSTATE_ABANDONED
}
if lastUpdatedTime.IsZero() {
lastUpdatedTime = time.Now()
}
return &PrStateInfo{
CurrentState: lastStateName,
LastUpdatedTime: lastUpdatedTime,
CurrentSha1: *info.Head.SHA,
NumOfCommits: *info.Commits,
}, nil
}
func VerifyEmailCla(email string) (bool, error) {
log.Printf("Verifying CLA signed for `%s`", email)
if email == "" {
return false, makeErr("you must specify a non-empty email", nil)
}
groups, _, err := gerritClient.Accounts.ListGroups(email)
if err != nil {
if strings.Contains(err.Error(), "Not Found") {
log.Printf("Email was not found on Gerrit")
return false, nil
}
log.Printf("An error occured trying to locate the user on Gerrit")
return false, makeErr("failed to retrieve gerrit user groups", err)
}
hasClaGroup := false
for i := 0; i < len(*groups); i++ {
if (*groups)[i].Name == gerritClaGroupName {
hasClaGroup = true
}
}
if hasClaGroup {
log.Printf("The user was located and has signed the CLA")
} else {
log.Printf("The user was located, but they did not sign the CLA")
}
return hasClaGroup, nil
}
func VerifyPrAuthorClas(owner, repo string, prnum int) (bool, []string, error) {
log.Printf("Verifying CLA signed for PR %s/%s/%d", owner, repo, prnum)
commits, _, err := githubClient.PullRequests.ListCommits(context.Background(), owner, repo, prnum, nil)
if err != nil {
return false, nil, makeErr("failed to retrieve pull request commits", err)
}
authorEmailMap := make(map[string]bool)
for i := 0; i < len(commits); i++ {
authorEmail := *commits[i].Commit.Author.Email
authorEmailMap[authorEmail] = false
}
var emails []string
for authorEmail := range authorEmailMap {
emails = append(emails, authorEmail)
}
allSigned := true
for authorEmail := range authorEmailMap {
signed, err := VerifyEmailCla(authorEmail)
if err != nil {
return false, emails, err
}
if !signed {
allSigned = false
}
}
return allSigned, emails, nil
}
func SendPrStateCommentAndClose(owner, repo string, prnum int, message, state string, is_first bool) error {
if isDryRun {
log.Printf("Skipping pr comment and close for '%s' due to dry run.", state)
return nil
}
newState := "closed"
_, _, err := githubClient.PullRequests.Edit(context.Background(), owner, repo, prnum, &github.PullRequest{
State: &newState,
})
if err != nil {
return makeErr("failed to close pull request", err)
}
return SendPrStateComment(owner, repo, prnum, message, state, is_first)
}
func SendPrStateComment(owner, repo string, prnum int, message, state string, is_first bool) error {
if isDryRun {
log.Printf("Skipping pr comment for '%s' due to dry run.", state)
return nil
}
var messageBody string
if is_first {
messageBody += "Thanks for the pull request!! To ensure quality review, Couchbase employs"
messageBody += " a [code review system](http://review.couchbase.org/) based on"
messageBody += " [Gerrit](https://www.gerritcodereview.com/) to manage the workflow of changes"
messageBody += " in addition to tracking our contributor agreements.\n\n"
}
messageBody += strings.TrimSpace(message)
messageBody += "\n\n" + BOT_IDENT_TAG + ":" + state
_, _, err := githubClient.Issues.CreateComment(context.Background(), owner, repo, prnum, &github.IssueComment{
Body: &messageBody,
})
if err != nil {
return makeErr("failed to comment on pull request", err)
}
return nil
}
func SendClaText(owner, repo string, prnum int, state *PrStateInfo, emails []string) error {
log.Printf("Sending no_cla for %s/%s/%d", owner, repo, prnum)
message := "To get this change in and collaborate in code review, please register on Gerrit"
message += " and accept our CLA. The easiest way to do this is to follow the link below,"
message += " sign in with your GitHub account and then follow through the steps provided"
message += " on that page to sign an 'Individual' agreement:"
message += " http://review.couchbase.org/#/settings/new-agreement."
message += "\n\n"
message += "Keep in mind that the emails we are seeing on the commits are: "
for i, email := range emails {
if i != 0 {
message += ", "
}
message += "`" + RedactEmail(email) + "`"
}
message += "\n\n"
message += "Note: Please contact us if you have any issues registering with Gerrit!"
message += " If you have not signed our CLA within 7 days, the Pull Request will be"
message += " automatically closed."
return SendPrStateComment(owner, repo, prnum, message, BOTSTATE_NO_CLA, state.CurrentState == BOTSTATE_NEW)
}
func SendPushedText(owner, repo string, prnum int, state *PrStateInfo, changeNum int) error {
log.Printf("Sending pushed for %s/%s/%d", owner, repo, prnum)
message := "Your changes (commit: " + state.CurrentSha1 + ") have been pushed to the Couchbase Review Site:\n"
message += "http://review.couchbase.org/" + strconv.FormatInt(int64(changeNum), 10)
if state.NumOfCommits > 1 {
message += "\n\n"
message += "Note: As your pull request contains multiple commits, we have"
message += " performed an automatic squash of these commits into a single"
message += " change-set. If this is not the desired behaviour, please"
message += " consider submitting a pull request per discreet feature."
}
if state.CurrentState == BOTSTATE_CREATED || state.CurrentState == BOTSTATE_UPDATED {
return SendPrStateComment(owner, repo, prnum, message, BOTSTATE_UPDATED, false)
} else {
return SendPrStateComment(owner, repo, prnum, message, BOTSTATE_CREATED, state.CurrentState == BOTSTATE_NEW)
}
}
func ClosePrForTimeout(owner, repo string, prnum int, state *PrStateInfo) error {
log.Printf("Closing due to timeout for %s/%s/%d", owner, repo, prnum)
message := "Unfortunately it has been 7 days and we are still unable to confirm that you"
message += " have signed our CLA. We sincerely appreciate your submission and hope that"
message += " you will register and resubmit this Pull Request in the future!"
return SendPrStateCommentAndClose(owner, repo, prnum, message, BOTSTATE_TIMEOUT, state.CurrentState == BOTSTATE_NEW)
}
func ClosePrForMerge(owner, repo string, prnum int, state *PrStateInfo) error {
log.Printf("Closing due to gerrit merge for %s/%s/%d", owner, repo, prnum)
message := "This Pull Request has been closed as the associated Gerrit change was merged."
return SendPrStateCommentAndClose(owner, repo, prnum, message, BOTSTATE_MERGED, state.CurrentState == BOTSTATE_NEW)
}
func ClosePrForAbandon(owner, repo string, prnum int, state *PrStateInfo) error {
log.Printf("Closing due to gerrit abandon for %s/%s/%d", owner, repo, prnum)
message := "This Pull Request has been closed as the associated Gerrit change was abandoned."
return SendPrStateCommentAndClose(owner, repo, prnum, message, BOTSTATE_ABANDONED, state.CurrentState == BOTSTATE_NEW)
}
func GetGerritRepo(owner, repo string) string {
return repo
}
func TransferPrToGerrit(owner, repo string, prnum int, prstate *PrStateInfo) error {
log.Printf("Attempting to gerrit transfer PR %s/%s/%d %v", owner, repo, prnum, prstate)
csstate, err := GetChangesetState(owner, repo, prnum)
if err != nil {
return err
}
if csstate != nil {
if csstate.Status == "ABANDONED" {
return ClosePrForAbandon(owner, repo, prnum, prstate)
}
if csstate.Status == "MERGED" {
return ClosePrForMerge(owner, repo, prnum, prstate)
}
if csstate.CurrentSha1 == prstate.CurrentSha1 {
// Already up to date!
log.Printf("Nothing to do, already up to date.")
return nil
}
}
thisChangeId := RandomChangeId()
if csstate != nil {
thisChangeId = csstate.ChangeId
}
pr, _, err := githubClient.PullRequests.Get(context.Background(), owner, repo, prnum)
if err != nil {
return makeErr("failed to request pull request data", err)
}
gitRepoPath := "/tmp/gtest"
os.RemoveAll(gitRepoPath)
gitRepo, err := git.Clone(*pr.Head.Repo.CloneURL, gitRepoPath, &git.CloneOptions{
CheckoutBranch: *pr.Head.Ref,
})
if err != nil {
return makeErr("failed to clone repository head", err)
}
err = SquashHead(gitRepo, *pr.Commits, *pr.Title, thisChangeId)
if err != nil {
return err
}
log.Printf("Generated squash commit with ChangeId `%s`", thisChangeId)
if isDryRun {
log.Printf("Skipping remote push and comment due to dry run.")
return nil
}
reviewRemote, err := gitRepo.Remotes.Create("review",
"ssh://"+gerritUser+"@"+gerritHost+":29418/"+GetGerritRepo(owner, repo))
if err != nil {
return makeErr("failed to add gerrit as a remote", err)
}
log.Printf("Assigned remote.")
statusText := ""
err = reviewRemote.Push([]string{"HEAD:refs/for/master"}, &git.PushOptions{
RemoteCallbacks: git.RemoteCallbacks{
PushUpdateReferenceCallback: func(refname, status string) git.ErrorCode {
statusText = status
return 0
},
CredentialsCallback: gerritGitCredentialsHandler,
CertificateCheckCallback: func(cert *git.Certificate, valid bool, hostname string) git.ErrorCode {
return 0
},
},
})
if err != nil {
return makeErr("failed to push to gerrit", err)
}
log.Printf("Successfully pushed to Gerrit with status `%s`", statusText)
if statusText != "" {
if statusText == "no new changes" && prstate != nil &&
(prstate.CurrentState == BOTSTATE_CREATED || prstate.CurrentState == BOTSTATE_UPDATED) {
// Nothing changed
return nil
}
return makeErr("failed to upload to gerrit", errors.New(statusText))
}
var reviewMessage string
reviewMessage += fmt.Sprintf("Change-Set generated from https://github.com/%s/%s/pull/%d (commit:%s).",
owner, repo, prnum, *pr.Head.SHA)
reviewMessage += "\n" + BOT_IDENT_TAG
_, _, err = gerritClient.Changes.SetReview(thisChangeId, "current", &gerrit.ReviewInput{
Message: reviewMessage,
})
if err != nil {
return makeErr("failed to publish comment to gerrit", err)
}
if csstate == nil {
csstate, err = GetChangesetState(owner, repo, prnum)
if err != nil {
return makeErr("failed to retrieve updated change from gerrit", err)
}
if csstate == nil {
return makeErr("could not locate pushed change on gerrit", err)
}
}
err = SendPushedText(owner, repo, prnum, prstate, csstate.ChangeNum)
if err != nil {
return err
}
return nil
}
func ProcessPullRequest(owner, repo string, prnum int, noCheckCla bool) error {
state, err := GetPullRequestState(owner, repo, prnum)
if err != nil {
return err
}
if state.CurrentState == BOTSTATE_ABANDONED || state.CurrentState == BOTSTATE_TIMEOUT ||
state.CurrentState == BOTSTATE_MERGED {
// That's odd... This ticket should not even be open...
// Let's do nothing in case someone intentionally reopened it.
return nil
}
if state.CurrentState == BOTSTATE_NEW || state.CurrentState == BOTSTATE_NO_CLA ||
state.CurrentState == BOTSTATE_CREATED || state.CurrentState == BOTSTATE_UPDATED {
// Check CLA
allSigned, authorEmails, err := VerifyPrAuthorClas(owner, repo, prnum)
if err != nil {
return err
}
if noCheckCla {
log.Printf("Skipping no_cla warning for this pull request.")
allSigned = true
}
if !allSigned {
if state.CurrentState == BOTSTATE_NO_CLA {
// If we already sent the no_cla message, lets not do it again,
// instead we should check if this is now timed out...
if time.Now().After(state.LastUpdatedTime.Add(10 * 24 * time.Hour)) {
return ClosePrForTimeout(owner, repo, prnum, state)
}
log.Printf("Skipping this pull request as no_cla was already sent.")
return nil
}
return SendClaText(owner, repo, prnum, state, authorEmails)
} else {
// Need to do normal process
return TransferPrToGerrit(owner, repo, prnum, state)
}
}
return makeErr("unexpected pull request state", nil)
}
func ProcessProject(owner, repo string) error {
log.Printf("Processing project %s/%s", owner, repo)
prs, _, err := githubClient.PullRequests.List(context.Background(), owner, repo, &github.PullRequestListOptions{
State: "open",
})
if err != nil {
return makeErr("failed to list all pull requests", err)
}
for i := 0; i < len(prs); i++ {
prNum := *prs[i].Number
log.Printf("Processing pull request %d", prNum)
err := ProcessPullRequest(owner, repo, prNum, false)
if err != nil |
}
log.Printf("Processed project %s/%s", owner, repo)
return nil
}
func ProcessAllProjects() error {
log.Printf("Processing all projects")
for i := 0; i < len(allRepos); i++ {
thisRepo := allRepos[i]
err := ProcessProject(thisRepo.Owner, thisRepo.Name)
if err != nil {
return err
}
}
log.Printf("Processed all projects")
return nil
}
func initClients() error {
_, err := os.Stat(gerritPrivateKey)
if err != nil {
return makeErr("failed to locate gerrit private key", err)
}
_, err = os.Stat(gerritPublicKey)
if err != nil {
return makeErr("failed to locate gerrit public key", err)
}
err = initGerritClient()
if err != nil {
return err
}
err = initGitHubClient()
if err != nil {
return err
}
return nil
}
func readConfig() error {
configBytes, err := ioutil.ReadFile("./config.json")
if err != nil {
return makeErr("failed to read config file at `./config.json`", err)
}
var configData struct {
DryRun bool
GitHub struct {
User string
Token string
Owners []string
}
Gerrit struct {
Host string
User string
Pass string
Keys struct {
Public string
Private string
}
ClaGroupName string
}
Repos []struct {
Owner string
Name string
Repo string
}
}
err = json.Unmarshal(configBytes, &configData)
if err != nil {
return makeErr("failed to parse config file", err)
}
isDryRun = configData.DryRun
botOwners = configData.GitHub.Owners
githubUser = configData.GitHub.User
githubToken = configData.GitHub.Token
gerritHost = configData.Gerrit.Host
gerritUser = configData.Gerrit.User
gerritPass = configData.Gerrit.Pass
gerritPublicKey = configData.Gerrit.Keys.Public
gerritPrivateKey = configData.Gerrit.Keys.Private
gerritClaGroupName = configData.Gerrit.ClaGroupName
allRepos = nil
for i := 0; i < len(configData.Repos); i++ {
allRepos = append(allRepos, RepoInfo{
Owner: configData.Repos[i].Owner,
Name: configData.Repos[i].Name,
Repo: configData.Repos[i].Repo,
})
}
return nil
}
func githubHttpHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received GitHub webhook")
var data github.WebHookPayload
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&data)
if err != nil {
log.Printf("Failed to parse GitHub data %+v", err)
return
}
fmt.Fprintf(w, "success")
if data.Repo == nil || data.Repo.Owner == nil {
// No repository data in the webhook
return
}
if data.Sender != nil && IsGitHubUserBot(data.Sender) {
// Ignore hooks triggered by the bot itself.
return
}
ownerName := *data.Repo.Owner.Login
repoName := *data.Repo.Name
go func() {
err := ProcessProject(ownerName, repoName)
if err != nil {
log.Printf("githubHttpHandler error: %+v\n", err)
}
}()
}
func gerritHttpHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Gerrit webhook")
fmt.Fprintf(w, "success")
go func() {
err := ProcessAllProjects()
if err != nil {
log.Printf("gerritHttpHandler error: %+v\n", err)
}
}()
}
func forceCheckHttpHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Force Check request")
fmt.Fprintf(w, "Running!")
go func() {
err := ProcessAllProjects()
if err != nil {
log.Printf("forceCheckHttpHandler error: %+v\n", err)
}
}()
}
func forceTransferHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Force Transfer request")
owner := r.FormValue("owner")
repo := r.FormValue("repo")
prnum := r.FormValue("prnum")
prnumParsed, err := strconv.Atoi(prnum)
if err != nil {
err = makeErr("You specified an invalid numeric `prnum` value", err)
} else {
err = ProcessPullRequest(owner, repo, prnumParsed, true)
}
if err != nil {
fmt.Fprintf(w, "Error: %s\n", err)
return
}
fmt.Fprintf(w, "success")
}
func checkClaHttpHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Check CLA Request")
var target string
var err error
var res bool
email := r.FormValue("email")
owner := r.FormValue("owner")
repo := r.FormValue("repo")
prnum := r.FormValue("prnum")
if email != "" {
target = email
res, err = VerifyEmailCla(email)
} else if owner != "" && repo != "" {
prnumParsed, err := strconv.Atoi(prnum)
if err != nil {
err = makeErr("You specified an invalid numeric `prnum` value", err)
} else {
target = fmt.Sprintf("github.com/%s/%s/%d", owner, repo, prnumParsed)
res, _, err = VerifyPrAuthorClas(owner, repo, prnumParsed)
}
} else {
fmt.Fprintf(w, "You must specify either an email or owner/repo/prnum.")
return
}
if err != nil {
fmt.Fprintf(w, "Error: %s\n", err)
return
}
resText := "NOT SIGNED"
if res {
resText = "signed"
}
fmt.Fprintf(w, "CLA Status for `%s` is: %s\n", target, resText)
}
func proxyRepoStats(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Proxy Repo Stats Request")
owner := r.FormValue("owner")
repo := r.FormValue("repo")
clones, _, err := githubClient.Repositories.ListTrafficClones(context.Background(), owner, repo, &github.TrafficBreakdownOptions{
Per: "day",
})
if err != nil {
fmt.Fprintf(w, "Error: %s", err)
return
}
jsonWriter := json.NewEncoder(w)
jsonWriter.Encode(clones)
}
func rootHttpHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "This is just a bot...")
}
func main() {
log.Printf("Reading configuration...")
err := readConfig()
if err != nil {
log.Printf("Failed to initalize configuration:")
log.Printf("%+v", err)
return
}
log.Printf("Initializing API clients...")
err = initClients()
if err != nil {
log.Printf("Failed to initalize clients:")
log.Printf("%+v", err)
return
}
log.Printf("Starting web server on :4455...")
http.HandleFunc("/", rootHttpHandler)
http.HandleFunc("/github", githubHttpHandler)
http.HandleFunc("/gerrit", gerritHttpHandler)
http.HandleFunc("/forcecheck", forceCheckHttpHandler)
http.HandleFunc("/forcetransfer", forceTransferHandler)
http.HandleFunc("/checkcla", checkClaHttpHandler)
http.HandleFunc("/repostats", proxyRepoStats)
err = http.ListenAndServe(":4455", nil)
if err != nil {
log.Printf("Failed to start http listening.")
log.Printf("%+v", err)
return
}
/*
ownerName := "couchbase"
repoName := "couchnode"
prNum := 0
if prNum > 0 {
err = ProcessPullRequest(ownerName, repoName, prNum)
} else {
err = ProcessProject(ownerName, repoName)
}
if err != nil {
log.Printf("An error occured during processing:")
log.Printf("%+v", err)
}
*/
}
| {
return err
} | conditional_block |
main.go | package main
import (
"context"
"crypto/rand"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"regexp"
"strconv"
"strings"
"time"
"github.com/andygrunwald/go-gerrit"
"github.com/google/go-github/github"
"github.com/gregjones/httpcache"
git "github.com/libgit2/git2go/v31"
"golang.org/x/oauth2"
)
var isDryRun bool = true
const BOT_IDENT_TAG = "::SDKBOT/PR"
func RedactEmail(email string) string {
parts := strings.Split(email, "@")
if len(parts) != 2 {
return "REDACTION FAILED"
}
user := parts[0]
domain := parts[1]
userAllow := float32(len(user)) * 0.6
domainAllow := float32(len(domain)) * 0.7
userAllowSide := int(userAllow / 2)
domainAllowSide := int(domainAllow / 2)
userRightStart := len(user) - userAllowSide
domainRightStart := len(domain) - domainAllowSide
user = user[0:userAllowSide] + strings.Repeat("*", userRightStart-userAllowSide) + user[userRightStart:]
domain = domain[0:domainAllowSide] + strings.Repeat("*", domainRightStart-domainAllowSide) + domain[domainRightStart:]
return user + "@" + domain
}
func RandomChangeId() string |
type subError struct {
msg string
err error
}
func (e subError) Error() string {
if e.err != nil {
return e.msg + ": " + e.err.Error()
} else {
return e.msg
}
}
func makeErr(msg string, err error) error {
return subError{msg, err}
}
func SquashHead(repo *git.Repository, squashCount int, mergeCommitTitle, changeId string) error {
log.Printf("Generating squash commit for `HEAD~0` to `HEAD~%d`", squashCount)
headRef, err := repo.Head()
if err != nil {
return makeErr("failed to get head reference", err)
}
topCommitId := headRef.Target()
topCommit, err := repo.LookupCommit(topCommitId)
if err != nil {
return makeErr("failed to locate head commit", nil)
}
var baseCommit *git.Commit
var squashCommits []*git.Commit
{
curCommit := topCommit
for i := 0; i < squashCount; i++ {
squashCommits = append(squashCommits, curCommit)
curCommit = curCommit.Parent(0)
}
baseCommit = curCommit
}
log.Printf("Base Commit is `%s`", baseCommit.Id().String())
var newCommitAuthor *git.Signature
var newCommitCommitter *git.Signature
var newCommitMsg string
if len(squashCommits) == 1 {
newCommitAuthor = squashCommits[0].Author()
newCommitCommitter = squashCommits[0].Committer()
newCommitMsg = strings.TrimSpace(squashCommits[0].Message()) + "\n"
} else {
newCommitMsg = ""
newCommitMsg += mergeCommitTitle + "\n\n"
for i := 0; i < len(squashCommits); i++ {
curCommit := squashCommits[i]
newCommitMsg += "----\n"
newCommitMsg += strings.TrimSpace(curCommit.Message()) + "\n"
newCommitAuthor = curCommit.Author()
newCommitCommitter = curCommit.Committer()
}
}
newCommitMsg += "\nChange-Id: " + changeId
err = repo.ResetToCommit(baseCommit, git.ResetSoft, nil)
if err != nil {
return makeErr("failed to reset to base commit", err)
}
idx, err := repo.Index()
if err != nil {
return makeErr("failed to retrieve repo index", err)
}
err = idx.Write()
if err != nil {
return makeErr("failed to write squash index", err)
}
newCommitTreeId, err := idx.WriteTree()
if err != nil {
return makeErr("failed to write squash tree", err)
}
newCommitTree, err := repo.LookupTree(newCommitTreeId)
if err != nil {
return makeErr("failed to find created squash tree", err)
}
log.Printf("Generated new commit message:\n%s", newCommitMsg)
_, err = repo.CreateCommit("HEAD", newCommitAuthor, newCommitCommitter, newCommitMsg, newCommitTree, baseCommit)
if err != nil {
return makeErr("failed to generate squash commit", err)
}
return nil
}
type CsStateInfo struct {
ChangeNum int
ChangeId string
Status string
CurrentSha1 string
}
func GetChangesetState(owner, repo string, prnum int) (*CsStateInfo, error) {
log.Printf("Retrieving change set for %s/%s/%d", owner, repo, prnum)
path := fmt.Sprintf("github.com/%s/%s/pull/%d", owner, repo, prnum)
changes, _, err := gerritClient.Changes.QueryChanges(&gerrit.QueryChangeOptions{
QueryOptions: gerrit.QueryOptions{
Query: []string{path},
},
ChangeOptions: gerrit.ChangeOptions{
AdditionalFields: []string{"messages"},
},
})
if err != nil {
return nil, makeErr("failed to gerrit query for changes", err)
}
var foundChangeset *gerrit.ChangeInfo
for i := 0; i < len(*changes); i++ {
change := &(*changes)[i]
for j := 0; j < len(change.Messages); j++ {
if !strings.Contains(change.Messages[j].Message, BOT_IDENT_TAG) {
continue
}
if strings.Contains(change.Messages[j].Message, path) {
if foundChangeset != nil {
return nil, makeErr("found multiple possible changesets", nil)
}
foundChangeset = change
break
}
}
}
if foundChangeset == nil {
return nil, nil
}
commitMatcher, err := regexp.Compile("commit:([0-9a-zA-Z]+)")
if err != nil {
return nil, makeErr("failed to compile commit sha1 finding regexp", err)
}
var latestSha1 string
for i := 0; i < len(foundChangeset.Messages); i++ {
commitMatches := commitMatcher.FindStringSubmatch(foundChangeset.Messages[i].Message)
if len(commitMatches) == 2 {
latestSha1 = commitMatches[1]
}
}
return &CsStateInfo{
ChangeNum: foundChangeset.Number,
ChangeId: foundChangeset.ChangeID,
Status: foundChangeset.Status,
CurrentSha1: latestSha1,
}, nil
}
type RepoInfo struct {
Owner string
Name string
Repo string
}
var botOwners []string
var githubClient *github.Client
var githubUser string
var githubToken string
var gerritClient *gerrit.Client
var gerritHost string
var gerritUser string
var gerritPass string
var gerritPublicKey string
var gerritPrivateKey string
var gerritClaGroupName string
var allRepos []RepoInfo
func gerritGitCredentialsHandler(url string, username_from_url string, allowed_types git.CredType) (*git.Cred, error) {
creds, err := git.NewCredSshKey(gerritUser, gerritPublicKey, gerritPrivateKey, "")
return creds, err
}
func initGerritClient() error {
client, err := gerrit.NewClient("https://"+gerritHost+"/", nil)
if err != nil {
return makeErr("failed to create gerrit client", err)
}
client.Authentication.SetBasicAuth(gerritUser, gerritPass)
gerritClient = client
return nil
}
func initGitHubClient() error {
tx := httpcache.NewMemoryCacheTransport()
tc := &http.Client{
Transport: &oauth2.Transport{
Source: oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: githubToken},
),
Base: tx,
},
}
githubClient = github.NewClient(tc)
return nil
}
type PrStateInfo struct {
CurrentState string
LastUpdatedTime time.Time
CurrentSha1 string
NumOfCommits int
}
func IsGitHubUserBot(user *github.User) bool {
if user == nil || user.Login == nil {
return false
}
if *user.Login == githubUser {
return true
}
return false
}
func IsGitHubUserBotOwner(user *github.User) bool {
if user == nil || user.Login == nil {
return false
}
for i := 0; i < len(botOwners); i++ {
if *user.Login == botOwners[i] {
return true
}
}
return IsGitHubUserBot(user)
}
const (
BOTSTATE_NEW = ""
BOTSTATE_NO_CLA = "no_cla"
BOTSTATE_CREATED = "created"
BOTSTATE_UPDATED = "updated"
BOTSTATE_ABANDONED = "abandoned"
BOTSTATE_MERGED = "merged"
BOTSTATE_TIMEOUT = "timeout"
)
func GetPullRequestState(owner, repo string, prnum int) (*PrStateInfo, error) {
var parseableStateNames = []string{
BOTSTATE_NO_CLA,
BOTSTATE_CREATED,
BOTSTATE_UPDATED,
BOTSTATE_ABANDONED,
BOTSTATE_MERGED,
BOTSTATE_TIMEOUT,
// backwards compatibility names
"pushed",
"too_many_commits",
"closed",
}
log.Printf("Retrieving PR state for %s/%s/%d", owner, repo, prnum)
info, _, err := githubClient.PullRequests.Get(context.Background(), owner, repo, prnum)
if err != nil {
return nil, makeErr("failed to retieve pull request info", err)
}
comments, _, err := githubClient.Issues.ListComments(context.Background(), owner, repo, prnum, nil)
if err != nil {
return nil, makeErr("failed to retrieve pull request comments", err)
}
var lastStateTime time.Time
var lastStateName string
var lastUpdatedTime time.Time
for i := 0; i < len(comments); i++ {
if comments[i].CreatedAt.After(lastUpdatedTime) || comments[i].UpdatedAt.After(lastUpdatedTime) {
lastUpdatedTime = *comments[i].UpdatedAt
}
if !IsGitHubUserBotOwner(comments[i].User) {
continue
}
for j := 0; j < len(parseableStateNames); j++ {
if strings.Contains(*comments[i].Body, BOT_IDENT_TAG+":"+parseableStateNames[j]) {
if comments[i].CreatedAt.After(lastStateTime) {
lastStateName = parseableStateNames[j]
lastStateTime = *comments[i].CreatedAt
}
}
}
}
// For backwards compat...
if lastStateName == "too_many_commits" {
lastStateName = BOTSTATE_NEW
} else if lastStateName == "pushed" {
lastStateName = BOTSTATE_UPDATED
} else if lastStateName == "closed" {
lastStateName = BOTSTATE_ABANDONED
}
if lastUpdatedTime.IsZero() {
lastUpdatedTime = time.Now()
}
return &PrStateInfo{
CurrentState: lastStateName,
LastUpdatedTime: lastUpdatedTime,
CurrentSha1: *info.Head.SHA,
NumOfCommits: *info.Commits,
}, nil
}
func VerifyEmailCla(email string) (bool, error) {
log.Printf("Verifying CLA signed for `%s`", email)
if email == "" {
return false, makeErr("you must specify a non-empty email", nil)
}
groups, _, err := gerritClient.Accounts.ListGroups(email)
if err != nil {
if strings.Contains(err.Error(), "Not Found") {
log.Printf("Email was not found on Gerrit")
return false, nil
}
log.Printf("An error occured trying to locate the user on Gerrit")
return false, makeErr("failed to retrieve gerrit user groups", err)
}
hasClaGroup := false
for i := 0; i < len(*groups); i++ {
if (*groups)[i].Name == gerritClaGroupName {
hasClaGroup = true
}
}
if hasClaGroup {
log.Printf("The user was located and has signed the CLA")
} else {
log.Printf("The user was located, but they did not sign the CLA")
}
return hasClaGroup, nil
}
func VerifyPrAuthorClas(owner, repo string, prnum int) (bool, []string, error) {
log.Printf("Verifying CLA signed for PR %s/%s/%d", owner, repo, prnum)
commits, _, err := githubClient.PullRequests.ListCommits(context.Background(), owner, repo, prnum, nil)
if err != nil {
return false, nil, makeErr("failed to retrieve pull request commits", err)
}
authorEmailMap := make(map[string]bool)
for i := 0; i < len(commits); i++ {
authorEmail := *commits[i].Commit.Author.Email
authorEmailMap[authorEmail] = false
}
var emails []string
for authorEmail := range authorEmailMap {
emails = append(emails, authorEmail)
}
allSigned := true
for authorEmail := range authorEmailMap {
signed, err := VerifyEmailCla(authorEmail)
if err != nil {
return false, emails, err
}
if !signed {
allSigned = false
}
}
return allSigned, emails, nil
}
func SendPrStateCommentAndClose(owner, repo string, prnum int, message, state string, is_first bool) error {
if isDryRun {
log.Printf("Skipping pr comment and close for '%s' due to dry run.", state)
return nil
}
newState := "closed"
_, _, err := githubClient.PullRequests.Edit(context.Background(), owner, repo, prnum, &github.PullRequest{
State: &newState,
})
if err != nil {
return makeErr("failed to close pull request", err)
}
return SendPrStateComment(owner, repo, prnum, message, state, is_first)
}
func SendPrStateComment(owner, repo string, prnum int, message, state string, is_first bool) error {
if isDryRun {
log.Printf("Skipping pr comment for '%s' due to dry run.", state)
return nil
}
var messageBody string
if is_first {
messageBody += "Thanks for the pull request!! To ensure quality review, Couchbase employs"
messageBody += " a [code review system](http://review.couchbase.org/) based on"
messageBody += " [Gerrit](https://www.gerritcodereview.com/) to manage the workflow of changes"
messageBody += " in addition to tracking our contributor agreements.\n\n"
}
messageBody += strings.TrimSpace(message)
messageBody += "\n\n" + BOT_IDENT_TAG + ":" + state
_, _, err := githubClient.Issues.CreateComment(context.Background(), owner, repo, prnum, &github.IssueComment{
Body: &messageBody,
})
if err != nil {
return makeErr("failed to comment on pull request", err)
}
return nil
}
func SendClaText(owner, repo string, prnum int, state *PrStateInfo, emails []string) error {
log.Printf("Sending no_cla for %s/%s/%d", owner, repo, prnum)
message := "To get this change in and collaborate in code review, please register on Gerrit"
message += " and accept our CLA. The easiest way to do this is to follow the link below,"
message += " sign in with your GitHub account and then follow through the steps provided"
message += " on that page to sign an 'Individual' agreement:"
message += " http://review.couchbase.org/#/settings/new-agreement."
message += "\n\n"
message += "Keep in mind that the emails we are seeing on the commits are: "
for i, email := range emails {
if i != 0 {
message += ", "
}
message += "`" + RedactEmail(email) + "`"
}
message += "\n\n"
message += "Note: Please contact us if you have any issues registering with Gerrit!"
message += " If you have not signed our CLA within 7 days, the Pull Request will be"
message += " automatically closed."
return SendPrStateComment(owner, repo, prnum, message, BOTSTATE_NO_CLA, state.CurrentState == BOTSTATE_NEW)
}
func SendPushedText(owner, repo string, prnum int, state *PrStateInfo, changeNum int) error {
log.Printf("Sending pushed for %s/%s/%d", owner, repo, prnum)
message := "Your changes (commit: " + state.CurrentSha1 + ") have been pushed to the Couchbase Review Site:\n"
message += "http://review.couchbase.org/" + strconv.FormatInt(int64(changeNum), 10)
if state.NumOfCommits > 1 {
message += "\n\n"
message += "Note: As your pull request contains multiple commits, we have"
message += " performed an automatic squash of these commits into a single"
message += " change-set. If this is not the desired behaviour, please"
message += " consider submitting a pull request per discreet feature."
}
if state.CurrentState == BOTSTATE_CREATED || state.CurrentState == BOTSTATE_UPDATED {
return SendPrStateComment(owner, repo, prnum, message, BOTSTATE_UPDATED, false)
} else {
return SendPrStateComment(owner, repo, prnum, message, BOTSTATE_CREATED, state.CurrentState == BOTSTATE_NEW)
}
}
func ClosePrForTimeout(owner, repo string, prnum int, state *PrStateInfo) error {
log.Printf("Closing due to timeout for %s/%s/%d", owner, repo, prnum)
message := "Unfortunately it has been 7 days and we are still unable to confirm that you"
message += " have signed our CLA. We sincerely appreciate your submission and hope that"
message += " you will register and resubmit this Pull Request in the future!"
return SendPrStateCommentAndClose(owner, repo, prnum, message, BOTSTATE_TIMEOUT, state.CurrentState == BOTSTATE_NEW)
}
func ClosePrForMerge(owner, repo string, prnum int, state *PrStateInfo) error {
log.Printf("Closing due to gerrit merge for %s/%s/%d", owner, repo, prnum)
message := "This Pull Request has been closed as the associated Gerrit change was merged."
return SendPrStateCommentAndClose(owner, repo, prnum, message, BOTSTATE_MERGED, state.CurrentState == BOTSTATE_NEW)
}
func ClosePrForAbandon(owner, repo string, prnum int, state *PrStateInfo) error {
log.Printf("Closing due to gerrit abandon for %s/%s/%d", owner, repo, prnum)
message := "This Pull Request has been closed as the associated Gerrit change was abandoned."
return SendPrStateCommentAndClose(owner, repo, prnum, message, BOTSTATE_ABANDONED, state.CurrentState == BOTSTATE_NEW)
}
func GetGerritRepo(owner, repo string) string {
return repo
}
func TransferPrToGerrit(owner, repo string, prnum int, prstate *PrStateInfo) error {
log.Printf("Attempting to gerrit transfer PR %s/%s/%d %v", owner, repo, prnum, prstate)
csstate, err := GetChangesetState(owner, repo, prnum)
if err != nil {
return err
}
if csstate != nil {
if csstate.Status == "ABANDONED" {
return ClosePrForAbandon(owner, repo, prnum, prstate)
}
if csstate.Status == "MERGED" {
return ClosePrForMerge(owner, repo, prnum, prstate)
}
if csstate.CurrentSha1 == prstate.CurrentSha1 {
// Already up to date!
log.Printf("Nothing to do, already up to date.")
return nil
}
}
thisChangeId := RandomChangeId()
if csstate != nil {
thisChangeId = csstate.ChangeId
}
pr, _, err := githubClient.PullRequests.Get(context.Background(), owner, repo, prnum)
if err != nil {
return makeErr("failed to request pull request data", err)
}
gitRepoPath := "/tmp/gtest"
os.RemoveAll(gitRepoPath)
gitRepo, err := git.Clone(*pr.Head.Repo.CloneURL, gitRepoPath, &git.CloneOptions{
CheckoutBranch: *pr.Head.Ref,
})
if err != nil {
return makeErr("failed to clone repository head", err)
}
err = SquashHead(gitRepo, *pr.Commits, *pr.Title, thisChangeId)
if err != nil {
return err
}
log.Printf("Generated squash commit with ChangeId `%s`", thisChangeId)
if isDryRun {
log.Printf("Skipping remote push and comment due to dry run.")
return nil
}
reviewRemote, err := gitRepo.Remotes.Create("review",
"ssh://"+gerritUser+"@"+gerritHost+":29418/"+GetGerritRepo(owner, repo))
if err != nil {
return makeErr("failed to add gerrit as a remote", err)
}
log.Printf("Assigned remote.")
statusText := ""
err = reviewRemote.Push([]string{"HEAD:refs/for/master"}, &git.PushOptions{
RemoteCallbacks: git.RemoteCallbacks{
PushUpdateReferenceCallback: func(refname, status string) git.ErrorCode {
statusText = status
return 0
},
CredentialsCallback: gerritGitCredentialsHandler,
CertificateCheckCallback: func(cert *git.Certificate, valid bool, hostname string) git.ErrorCode {
return 0
},
},
})
if err != nil {
return makeErr("failed to push to gerrit", err)
}
log.Printf("Successfully pushed to Gerrit with status `%s`", statusText)
if statusText != "" {
if statusText == "no new changes" && prstate != nil &&
(prstate.CurrentState == BOTSTATE_CREATED || prstate.CurrentState == BOTSTATE_UPDATED) {
// Nothing changed
return nil
}
return makeErr("failed to upload to gerrit", errors.New(statusText))
}
var reviewMessage string
reviewMessage += fmt.Sprintf("Change-Set generated from https://github.com/%s/%s/pull/%d (commit:%s).",
owner, repo, prnum, *pr.Head.SHA)
reviewMessage += "\n" + BOT_IDENT_TAG
_, _, err = gerritClient.Changes.SetReview(thisChangeId, "current", &gerrit.ReviewInput{
Message: reviewMessage,
})
if err != nil {
return makeErr("failed to publish comment to gerrit", err)
}
if csstate == nil {
csstate, err = GetChangesetState(owner, repo, prnum)
if err != nil {
return makeErr("failed to retrieve updated change from gerrit", err)
}
if csstate == nil {
return makeErr("could not locate pushed change on gerrit", err)
}
}
err = SendPushedText(owner, repo, prnum, prstate, csstate.ChangeNum)
if err != nil {
return err
}
return nil
}
func ProcessPullRequest(owner, repo string, prnum int, noCheckCla bool) error {
state, err := GetPullRequestState(owner, repo, prnum)
if err != nil {
return err
}
if state.CurrentState == BOTSTATE_ABANDONED || state.CurrentState == BOTSTATE_TIMEOUT ||
state.CurrentState == BOTSTATE_MERGED {
// That's odd... This ticket should not even be open...
// Let's do nothing in case someone intentionally reopened it.
return nil
}
if state.CurrentState == BOTSTATE_NEW || state.CurrentState == BOTSTATE_NO_CLA ||
state.CurrentState == BOTSTATE_CREATED || state.CurrentState == BOTSTATE_UPDATED {
// Check CLA
allSigned, authorEmails, err := VerifyPrAuthorClas(owner, repo, prnum)
if err != nil {
return err
}
if noCheckCla {
log.Printf("Skipping no_cla warning for this pull request.")
allSigned = true
}
if !allSigned {
if state.CurrentState == BOTSTATE_NO_CLA {
// If we already sent the no_cla message, lets not do it again,
// instead we should check if this is now timed out...
if time.Now().After(state.LastUpdatedTime.Add(10 * 24 * time.Hour)) {
return ClosePrForTimeout(owner, repo, prnum, state)
}
log.Printf("Skipping this pull request as no_cla was already sent.")
return nil
}
return SendClaText(owner, repo, prnum, state, authorEmails)
} else {
// Need to do normal process
return TransferPrToGerrit(owner, repo, prnum, state)
}
}
return makeErr("unexpected pull request state", nil)
}
func ProcessProject(owner, repo string) error {
log.Printf("Processing project %s/%s", owner, repo)
prs, _, err := githubClient.PullRequests.List(context.Background(), owner, repo, &github.PullRequestListOptions{
State: "open",
})
if err != nil {
return makeErr("failed to list all pull requests", err)
}
for i := 0; i < len(prs); i++ {
prNum := *prs[i].Number
log.Printf("Processing pull request %d", prNum)
err := ProcessPullRequest(owner, repo, prNum, false)
if err != nil {
return err
}
}
log.Printf("Processed project %s/%s", owner, repo)
return nil
}
func ProcessAllProjects() error {
log.Printf("Processing all projects")
for i := 0; i < len(allRepos); i++ {
thisRepo := allRepos[i]
err := ProcessProject(thisRepo.Owner, thisRepo.Name)
if err != nil {
return err
}
}
log.Printf("Processed all projects")
return nil
}
func initClients() error {
_, err := os.Stat(gerritPrivateKey)
if err != nil {
return makeErr("failed to locate gerrit private key", err)
}
_, err = os.Stat(gerritPublicKey)
if err != nil {
return makeErr("failed to locate gerrit public key", err)
}
err = initGerritClient()
if err != nil {
return err
}
err = initGitHubClient()
if err != nil {
return err
}
return nil
}
func readConfig() error {
configBytes, err := ioutil.ReadFile("./config.json")
if err != nil {
return makeErr("failed to read config file at `./config.json`", err)
}
var configData struct {
DryRun bool
GitHub struct {
User string
Token string
Owners []string
}
Gerrit struct {
Host string
User string
Pass string
Keys struct {
Public string
Private string
}
ClaGroupName string
}
Repos []struct {
Owner string
Name string
Repo string
}
}
err = json.Unmarshal(configBytes, &configData)
if err != nil {
return makeErr("failed to parse config file", err)
}
isDryRun = configData.DryRun
botOwners = configData.GitHub.Owners
githubUser = configData.GitHub.User
githubToken = configData.GitHub.Token
gerritHost = configData.Gerrit.Host
gerritUser = configData.Gerrit.User
gerritPass = configData.Gerrit.Pass
gerritPublicKey = configData.Gerrit.Keys.Public
gerritPrivateKey = configData.Gerrit.Keys.Private
gerritClaGroupName = configData.Gerrit.ClaGroupName
allRepos = nil
for i := 0; i < len(configData.Repos); i++ {
allRepos = append(allRepos, RepoInfo{
Owner: configData.Repos[i].Owner,
Name: configData.Repos[i].Name,
Repo: configData.Repos[i].Repo,
})
}
return nil
}
func githubHttpHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received GitHub webhook")
var data github.WebHookPayload
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&data)
if err != nil {
log.Printf("Failed to parse GitHub data %+v", err)
return
}
fmt.Fprintf(w, "success")
if data.Repo == nil || data.Repo.Owner == nil {
// No repository data in the webhook
return
}
if data.Sender != nil && IsGitHubUserBot(data.Sender) {
// Ignore hooks triggered by the bot itself.
return
}
ownerName := *data.Repo.Owner.Login
repoName := *data.Repo.Name
go func() {
err := ProcessProject(ownerName, repoName)
if err != nil {
log.Printf("githubHttpHandler error: %+v\n", err)
}
}()
}
func gerritHttpHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Gerrit webhook")
fmt.Fprintf(w, "success")
go func() {
err := ProcessAllProjects()
if err != nil {
log.Printf("gerritHttpHandler error: %+v\n", err)
}
}()
}
func forceCheckHttpHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Force Check request")
fmt.Fprintf(w, "Running!")
go func() {
err := ProcessAllProjects()
if err != nil {
log.Printf("forceCheckHttpHandler error: %+v\n", err)
}
}()
}
func forceTransferHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Force Transfer request")
owner := r.FormValue("owner")
repo := r.FormValue("repo")
prnum := r.FormValue("prnum")
prnumParsed, err := strconv.Atoi(prnum)
if err != nil {
err = makeErr("You specified an invalid numeric `prnum` value", err)
} else {
err = ProcessPullRequest(owner, repo, prnumParsed, true)
}
if err != nil {
fmt.Fprintf(w, "Error: %s\n", err)
return
}
fmt.Fprintf(w, "success")
}
func checkClaHttpHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Check CLA Request")
var target string
var err error
var res bool
email := r.FormValue("email")
owner := r.FormValue("owner")
repo := r.FormValue("repo")
prnum := r.FormValue("prnum")
if email != "" {
target = email
res, err = VerifyEmailCla(email)
} else if owner != "" && repo != "" {
prnumParsed, err := strconv.Atoi(prnum)
if err != nil {
err = makeErr("You specified an invalid numeric `prnum` value", err)
} else {
target = fmt.Sprintf("github.com/%s/%s/%d", owner, repo, prnumParsed)
res, _, err = VerifyPrAuthorClas(owner, repo, prnumParsed)
}
} else {
fmt.Fprintf(w, "You must specify either an email or owner/repo/prnum.")
return
}
if err != nil {
fmt.Fprintf(w, "Error: %s\n", err)
return
}
resText := "NOT SIGNED"
if res {
resText = "signed"
}
fmt.Fprintf(w, "CLA Status for `%s` is: %s\n", target, resText)
}
func proxyRepoStats(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Proxy Repo Stats Request")
owner := r.FormValue("owner")
repo := r.FormValue("repo")
clones, _, err := githubClient.Repositories.ListTrafficClones(context.Background(), owner, repo, &github.TrafficBreakdownOptions{
Per: "day",
})
if err != nil {
fmt.Fprintf(w, "Error: %s", err)
return
}
jsonWriter := json.NewEncoder(w)
jsonWriter.Encode(clones)
}
func rootHttpHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "This is just a bot...")
}
func main() {
log.Printf("Reading configuration...")
err := readConfig()
if err != nil {
log.Printf("Failed to initalize configuration:")
log.Printf("%+v", err)
return
}
log.Printf("Initializing API clients...")
err = initClients()
if err != nil {
log.Printf("Failed to initalize clients:")
log.Printf("%+v", err)
return
}
log.Printf("Starting web server on :4455...")
http.HandleFunc("/", rootHttpHandler)
http.HandleFunc("/github", githubHttpHandler)
http.HandleFunc("/gerrit", gerritHttpHandler)
http.HandleFunc("/forcecheck", forceCheckHttpHandler)
http.HandleFunc("/forcetransfer", forceTransferHandler)
http.HandleFunc("/checkcla", checkClaHttpHandler)
http.HandleFunc("/repostats", proxyRepoStats)
err = http.ListenAndServe(":4455", nil)
if err != nil {
log.Printf("Failed to start http listening.")
log.Printf("%+v", err)
return
}
/*
ownerName := "couchbase"
repoName := "couchnode"
prNum := 0
if prNum > 0 {
err = ProcessPullRequest(ownerName, repoName, prNum)
} else {
err = ProcessProject(ownerName, repoName)
}
if err != nil {
log.Printf("An error occured during processing:")
log.Printf("%+v", err)
}
*/
}
| {
b := make([]byte, sha1.Size)
rand.Read(b)
encData := sha1.Sum(b)
return "I" + hex.EncodeToString(encData[:])
} | identifier_body |
main.go | package main
import (
"context"
"crypto/rand"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"regexp"
"strconv"
"strings"
"time"
"github.com/andygrunwald/go-gerrit"
"github.com/google/go-github/github"
"github.com/gregjones/httpcache"
git "github.com/libgit2/git2go/v31"
"golang.org/x/oauth2"
)
var isDryRun bool = true
const BOT_IDENT_TAG = "::SDKBOT/PR"
func RedactEmail(email string) string {
parts := strings.Split(email, "@")
if len(parts) != 2 {
return "REDACTION FAILED"
}
user := parts[0]
domain := parts[1]
userAllow := float32(len(user)) * 0.6
domainAllow := float32(len(domain)) * 0.7
userAllowSide := int(userAllow / 2)
domainAllowSide := int(domainAllow / 2)
userRightStart := len(user) - userAllowSide
domainRightStart := len(domain) - domainAllowSide
user = user[0:userAllowSide] + strings.Repeat("*", userRightStart-userAllowSide) + user[userRightStart:]
domain = domain[0:domainAllowSide] + strings.Repeat("*", domainRightStart-domainAllowSide) + domain[domainRightStart:]
return user + "@" + domain
}
func RandomChangeId() string {
b := make([]byte, sha1.Size)
rand.Read(b)
encData := sha1.Sum(b)
return "I" + hex.EncodeToString(encData[:])
}
type subError struct {
msg string
err error
}
func (e subError) Error() string {
if e.err != nil {
return e.msg + ": " + e.err.Error()
} else {
return e.msg
}
}
func makeErr(msg string, err error) error {
return subError{msg, err}
}
func SquashHead(repo *git.Repository, squashCount int, mergeCommitTitle, changeId string) error {
log.Printf("Generating squash commit for `HEAD~0` to `HEAD~%d`", squashCount)
headRef, err := repo.Head()
if err != nil {
return makeErr("failed to get head reference", err)
}
topCommitId := headRef.Target()
topCommit, err := repo.LookupCommit(topCommitId)
if err != nil {
return makeErr("failed to locate head commit", nil)
}
var baseCommit *git.Commit
var squashCommits []*git.Commit
{
curCommit := topCommit
for i := 0; i < squashCount; i++ {
squashCommits = append(squashCommits, curCommit)
curCommit = curCommit.Parent(0)
}
baseCommit = curCommit
}
log.Printf("Base Commit is `%s`", baseCommit.Id().String())
var newCommitAuthor *git.Signature
var newCommitCommitter *git.Signature
var newCommitMsg string
if len(squashCommits) == 1 {
newCommitAuthor = squashCommits[0].Author()
newCommitCommitter = squashCommits[0].Committer()
newCommitMsg = strings.TrimSpace(squashCommits[0].Message()) + "\n"
} else {
newCommitMsg = ""
newCommitMsg += mergeCommitTitle + "\n\n"
for i := 0; i < len(squashCommits); i++ {
curCommit := squashCommits[i]
newCommitMsg += "----\n"
newCommitMsg += strings.TrimSpace(curCommit.Message()) + "\n"
newCommitAuthor = curCommit.Author()
newCommitCommitter = curCommit.Committer()
}
}
newCommitMsg += "\nChange-Id: " + changeId
err = repo.ResetToCommit(baseCommit, git.ResetSoft, nil)
if err != nil {
return makeErr("failed to reset to base commit", err)
}
idx, err := repo.Index()
if err != nil {
return makeErr("failed to retrieve repo index", err)
}
err = idx.Write()
if err != nil {
return makeErr("failed to write squash index", err)
}
newCommitTreeId, err := idx.WriteTree()
if err != nil {
return makeErr("failed to write squash tree", err)
}
newCommitTree, err := repo.LookupTree(newCommitTreeId)
if err != nil {
return makeErr("failed to find created squash tree", err)
}
log.Printf("Generated new commit message:\n%s", newCommitMsg)
_, err = repo.CreateCommit("HEAD", newCommitAuthor, newCommitCommitter, newCommitMsg, newCommitTree, baseCommit)
if err != nil {
return makeErr("failed to generate squash commit", err)
}
return nil
}
type CsStateInfo struct {
ChangeNum int
ChangeId string
Status string
CurrentSha1 string
}
func GetChangesetState(owner, repo string, prnum int) (*CsStateInfo, error) {
log.Printf("Retrieving change set for %s/%s/%d", owner, repo, prnum)
path := fmt.Sprintf("github.com/%s/%s/pull/%d", owner, repo, prnum)
changes, _, err := gerritClient.Changes.QueryChanges(&gerrit.QueryChangeOptions{
QueryOptions: gerrit.QueryOptions{
Query: []string{path},
},
ChangeOptions: gerrit.ChangeOptions{
AdditionalFields: []string{"messages"},
},
})
if err != nil {
return nil, makeErr("failed to gerrit query for changes", err)
}
var foundChangeset *gerrit.ChangeInfo
for i := 0; i < len(*changes); i++ {
change := &(*changes)[i]
for j := 0; j < len(change.Messages); j++ {
if !strings.Contains(change.Messages[j].Message, BOT_IDENT_TAG) {
continue
}
if strings.Contains(change.Messages[j].Message, path) {
if foundChangeset != nil {
return nil, makeErr("found multiple possible changesets", nil)
}
foundChangeset = change
break
}
}
}
if foundChangeset == nil {
return nil, nil
}
commitMatcher, err := regexp.Compile("commit:([0-9a-zA-Z]+)")
if err != nil {
return nil, makeErr("failed to compile commit sha1 finding regexp", err)
}
var latestSha1 string
for i := 0; i < len(foundChangeset.Messages); i++ {
commitMatches := commitMatcher.FindStringSubmatch(foundChangeset.Messages[i].Message)
if len(commitMatches) == 2 {
latestSha1 = commitMatches[1]
}
}
return &CsStateInfo{
ChangeNum: foundChangeset.Number,
ChangeId: foundChangeset.ChangeID,
Status: foundChangeset.Status,
CurrentSha1: latestSha1,
}, nil
}
type RepoInfo struct {
Owner string
Name string
Repo string
}
var botOwners []string
var githubClient *github.Client
var githubUser string
var githubToken string
var gerritClient *gerrit.Client
var gerritHost string
var gerritUser string
var gerritPass string
var gerritPublicKey string
var gerritPrivateKey string
var gerritClaGroupName string
var allRepos []RepoInfo
func gerritGitCredentialsHandler(url string, username_from_url string, allowed_types git.CredType) (*git.Cred, error) {
creds, err := git.NewCredSshKey(gerritUser, gerritPublicKey, gerritPrivateKey, "")
return creds, err
}
func initGerritClient() error {
client, err := gerrit.NewClient("https://"+gerritHost+"/", nil)
if err != nil {
return makeErr("failed to create gerrit client", err)
}
client.Authentication.SetBasicAuth(gerritUser, gerritPass)
gerritClient = client
return nil
}
func initGitHubClient() error {
tx := httpcache.NewMemoryCacheTransport()
tc := &http.Client{
Transport: &oauth2.Transport{
Source: oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: githubToken},
),
Base: tx,
},
}
githubClient = github.NewClient(tc)
return nil
}
type PrStateInfo struct {
CurrentState string
LastUpdatedTime time.Time
CurrentSha1 string
NumOfCommits int
}
func IsGitHubUserBot(user *github.User) bool {
if user == nil || user.Login == nil {
return false
}
if *user.Login == githubUser {
return true
}
return false
}
func IsGitHubUserBotOwner(user *github.User) bool {
if user == nil || user.Login == nil {
return false
}
for i := 0; i < len(botOwners); i++ {
if *user.Login == botOwners[i] {
return true
}
}
return IsGitHubUserBot(user)
}
const (
BOTSTATE_NEW = ""
BOTSTATE_NO_CLA = "no_cla"
BOTSTATE_CREATED = "created"
BOTSTATE_UPDATED = "updated"
BOTSTATE_ABANDONED = "abandoned"
BOTSTATE_MERGED = "merged"
BOTSTATE_TIMEOUT = "timeout"
)
func GetPullRequestState(owner, repo string, prnum int) (*PrStateInfo, error) {
var parseableStateNames = []string{
BOTSTATE_NO_CLA,
BOTSTATE_CREATED,
BOTSTATE_UPDATED,
BOTSTATE_ABANDONED,
BOTSTATE_MERGED,
BOTSTATE_TIMEOUT,
// backwards compatibility names
"pushed",
"too_many_commits",
"closed",
}
log.Printf("Retrieving PR state for %s/%s/%d", owner, repo, prnum)
info, _, err := githubClient.PullRequests.Get(context.Background(), owner, repo, prnum)
if err != nil {
return nil, makeErr("failed to retieve pull request info", err)
}
comments, _, err := githubClient.Issues.ListComments(context.Background(), owner, repo, prnum, nil)
if err != nil {
return nil, makeErr("failed to retrieve pull request comments", err)
}
var lastStateTime time.Time
var lastStateName string
var lastUpdatedTime time.Time
for i := 0; i < len(comments); i++ {
if comments[i].CreatedAt.After(lastUpdatedTime) || comments[i].UpdatedAt.After(lastUpdatedTime) {
lastUpdatedTime = *comments[i].UpdatedAt
}
if !IsGitHubUserBotOwner(comments[i].User) {
continue
}
for j := 0; j < len(parseableStateNames); j++ {
if strings.Contains(*comments[i].Body, BOT_IDENT_TAG+":"+parseableStateNames[j]) {
if comments[i].CreatedAt.After(lastStateTime) {
lastStateName = parseableStateNames[j]
lastStateTime = *comments[i].CreatedAt
}
}
}
}
// For backwards compat...
if lastStateName == "too_many_commits" {
lastStateName = BOTSTATE_NEW
} else if lastStateName == "pushed" {
lastStateName = BOTSTATE_UPDATED
} else if lastStateName == "closed" {
lastStateName = BOTSTATE_ABANDONED
}
if lastUpdatedTime.IsZero() {
lastUpdatedTime = time.Now()
}
return &PrStateInfo{
CurrentState: lastStateName,
LastUpdatedTime: lastUpdatedTime,
CurrentSha1: *info.Head.SHA,
NumOfCommits: *info.Commits,
}, nil
}
func VerifyEmailCla(email string) (bool, error) {
log.Printf("Verifying CLA signed for `%s`", email)
if email == "" {
return false, makeErr("you must specify a non-empty email", nil)
}
groups, _, err := gerritClient.Accounts.ListGroups(email)
if err != nil {
if strings.Contains(err.Error(), "Not Found") {
log.Printf("Email was not found on Gerrit")
return false, nil
}
log.Printf("An error occured trying to locate the user on Gerrit")
return false, makeErr("failed to retrieve gerrit user groups", err)
}
hasClaGroup := false
for i := 0; i < len(*groups); i++ {
if (*groups)[i].Name == gerritClaGroupName {
hasClaGroup = true
}
}
if hasClaGroup {
log.Printf("The user was located and has signed the CLA")
} else {
log.Printf("The user was located, but they did not sign the CLA")
}
return hasClaGroup, nil
}
func VerifyPrAuthorClas(owner, repo string, prnum int) (bool, []string, error) {
log.Printf("Verifying CLA signed for PR %s/%s/%d", owner, repo, prnum)
commits, _, err := githubClient.PullRequests.ListCommits(context.Background(), owner, repo, prnum, nil)
if err != nil {
return false, nil, makeErr("failed to retrieve pull request commits", err)
}
authorEmailMap := make(map[string]bool)
for i := 0; i < len(commits); i++ {
authorEmail := *commits[i].Commit.Author.Email
authorEmailMap[authorEmail] = false
}
var emails []string
for authorEmail := range authorEmailMap {
emails = append(emails, authorEmail)
}
allSigned := true
for authorEmail := range authorEmailMap {
signed, err := VerifyEmailCla(authorEmail)
if err != nil {
return false, emails, err
}
if !signed {
allSigned = false
}
}
return allSigned, emails, nil
}
func SendPrStateCommentAndClose(owner, repo string, prnum int, message, state string, is_first bool) error {
if isDryRun {
log.Printf("Skipping pr comment and close for '%s' due to dry run.", state)
return nil
}
| _, _, err := githubClient.PullRequests.Edit(context.Background(), owner, repo, prnum, &github.PullRequest{
State: &newState,
})
if err != nil {
return makeErr("failed to close pull request", err)
}
return SendPrStateComment(owner, repo, prnum, message, state, is_first)
}
func SendPrStateComment(owner, repo string, prnum int, message, state string, is_first bool) error {
if isDryRun {
log.Printf("Skipping pr comment for '%s' due to dry run.", state)
return nil
}
var messageBody string
if is_first {
messageBody += "Thanks for the pull request!! To ensure quality review, Couchbase employs"
messageBody += " a [code review system](http://review.couchbase.org/) based on"
messageBody += " [Gerrit](https://www.gerritcodereview.com/) to manage the workflow of changes"
messageBody += " in addition to tracking our contributor agreements.\n\n"
}
messageBody += strings.TrimSpace(message)
messageBody += "\n\n" + BOT_IDENT_TAG + ":" + state
_, _, err := githubClient.Issues.CreateComment(context.Background(), owner, repo, prnum, &github.IssueComment{
Body: &messageBody,
})
if err != nil {
return makeErr("failed to comment on pull request", err)
}
return nil
}
func SendClaText(owner, repo string, prnum int, state *PrStateInfo, emails []string) error {
log.Printf("Sending no_cla for %s/%s/%d", owner, repo, prnum)
message := "To get this change in and collaborate in code review, please register on Gerrit"
message += " and accept our CLA. The easiest way to do this is to follow the link below,"
message += " sign in with your GitHub account and then follow through the steps provided"
message += " on that page to sign an 'Individual' agreement:"
message += " http://review.couchbase.org/#/settings/new-agreement."
message += "\n\n"
message += "Keep in mind that the emails we are seeing on the commits are: "
for i, email := range emails {
if i != 0 {
message += ", "
}
message += "`" + RedactEmail(email) + "`"
}
message += "\n\n"
message += "Note: Please contact us if you have any issues registering with Gerrit!"
message += " If you have not signed our CLA within 7 days, the Pull Request will be"
message += " automatically closed."
return SendPrStateComment(owner, repo, prnum, message, BOTSTATE_NO_CLA, state.CurrentState == BOTSTATE_NEW)
}
func SendPushedText(owner, repo string, prnum int, state *PrStateInfo, changeNum int) error {
log.Printf("Sending pushed for %s/%s/%d", owner, repo, prnum)
message := "Your changes (commit: " + state.CurrentSha1 + ") have been pushed to the Couchbase Review Site:\n"
message += "http://review.couchbase.org/" + strconv.FormatInt(int64(changeNum), 10)
if state.NumOfCommits > 1 {
message += "\n\n"
message += "Note: As your pull request contains multiple commits, we have"
message += " performed an automatic squash of these commits into a single"
message += " change-set. If this is not the desired behaviour, please"
message += " consider submitting a pull request per discreet feature."
}
if state.CurrentState == BOTSTATE_CREATED || state.CurrentState == BOTSTATE_UPDATED {
return SendPrStateComment(owner, repo, prnum, message, BOTSTATE_UPDATED, false)
} else {
return SendPrStateComment(owner, repo, prnum, message, BOTSTATE_CREATED, state.CurrentState == BOTSTATE_NEW)
}
}
func ClosePrForTimeout(owner, repo string, prnum int, state *PrStateInfo) error {
log.Printf("Closing due to timeout for %s/%s/%d", owner, repo, prnum)
message := "Unfortunately it has been 7 days and we are still unable to confirm that you"
message += " have signed our CLA. We sincerely appreciate your submission and hope that"
message += " you will register and resubmit this Pull Request in the future!"
return SendPrStateCommentAndClose(owner, repo, prnum, message, BOTSTATE_TIMEOUT, state.CurrentState == BOTSTATE_NEW)
}
func ClosePrForMerge(owner, repo string, prnum int, state *PrStateInfo) error {
log.Printf("Closing due to gerrit merge for %s/%s/%d", owner, repo, prnum)
message := "This Pull Request has been closed as the associated Gerrit change was merged."
return SendPrStateCommentAndClose(owner, repo, prnum, message, BOTSTATE_MERGED, state.CurrentState == BOTSTATE_NEW)
}
func ClosePrForAbandon(owner, repo string, prnum int, state *PrStateInfo) error {
log.Printf("Closing due to gerrit abandon for %s/%s/%d", owner, repo, prnum)
message := "This Pull Request has been closed as the associated Gerrit change was abandoned."
return SendPrStateCommentAndClose(owner, repo, prnum, message, BOTSTATE_ABANDONED, state.CurrentState == BOTSTATE_NEW)
}
func GetGerritRepo(owner, repo string) string {
return repo
}
func TransferPrToGerrit(owner, repo string, prnum int, prstate *PrStateInfo) error {
log.Printf("Attempting to gerrit transfer PR %s/%s/%d %v", owner, repo, prnum, prstate)
csstate, err := GetChangesetState(owner, repo, prnum)
if err != nil {
return err
}
if csstate != nil {
if csstate.Status == "ABANDONED" {
return ClosePrForAbandon(owner, repo, prnum, prstate)
}
if csstate.Status == "MERGED" {
return ClosePrForMerge(owner, repo, prnum, prstate)
}
if csstate.CurrentSha1 == prstate.CurrentSha1 {
// Already up to date!
log.Printf("Nothing to do, already up to date.")
return nil
}
}
thisChangeId := RandomChangeId()
if csstate != nil {
thisChangeId = csstate.ChangeId
}
pr, _, err := githubClient.PullRequests.Get(context.Background(), owner, repo, prnum)
if err != nil {
return makeErr("failed to request pull request data", err)
}
gitRepoPath := "/tmp/gtest"
os.RemoveAll(gitRepoPath)
gitRepo, err := git.Clone(*pr.Head.Repo.CloneURL, gitRepoPath, &git.CloneOptions{
CheckoutBranch: *pr.Head.Ref,
})
if err != nil {
return makeErr("failed to clone repository head", err)
}
err = SquashHead(gitRepo, *pr.Commits, *pr.Title, thisChangeId)
if err != nil {
return err
}
log.Printf("Generated squash commit with ChangeId `%s`", thisChangeId)
if isDryRun {
log.Printf("Skipping remote push and comment due to dry run.")
return nil
}
reviewRemote, err := gitRepo.Remotes.Create("review",
"ssh://"+gerritUser+"@"+gerritHost+":29418/"+GetGerritRepo(owner, repo))
if err != nil {
return makeErr("failed to add gerrit as a remote", err)
}
log.Printf("Assigned remote.")
statusText := ""
err = reviewRemote.Push([]string{"HEAD:refs/for/master"}, &git.PushOptions{
RemoteCallbacks: git.RemoteCallbacks{
PushUpdateReferenceCallback: func(refname, status string) git.ErrorCode {
statusText = status
return 0
},
CredentialsCallback: gerritGitCredentialsHandler,
CertificateCheckCallback: func(cert *git.Certificate, valid bool, hostname string) git.ErrorCode {
return 0
},
},
})
if err != nil {
return makeErr("failed to push to gerrit", err)
}
log.Printf("Successfully pushed to Gerrit with status `%s`", statusText)
if statusText != "" {
if statusText == "no new changes" && prstate != nil &&
(prstate.CurrentState == BOTSTATE_CREATED || prstate.CurrentState == BOTSTATE_UPDATED) {
// Nothing changed
return nil
}
return makeErr("failed to upload to gerrit", errors.New(statusText))
}
var reviewMessage string
reviewMessage += fmt.Sprintf("Change-Set generated from https://github.com/%s/%s/pull/%d (commit:%s).",
owner, repo, prnum, *pr.Head.SHA)
reviewMessage += "\n" + BOT_IDENT_TAG
_, _, err = gerritClient.Changes.SetReview(thisChangeId, "current", &gerrit.ReviewInput{
Message: reviewMessage,
})
if err != nil {
return makeErr("failed to publish comment to gerrit", err)
}
if csstate == nil {
csstate, err = GetChangesetState(owner, repo, prnum)
if err != nil {
return makeErr("failed to retrieve updated change from gerrit", err)
}
if csstate == nil {
return makeErr("could not locate pushed change on gerrit", err)
}
}
err = SendPushedText(owner, repo, prnum, prstate, csstate.ChangeNum)
if err != nil {
return err
}
return nil
}
func ProcessPullRequest(owner, repo string, prnum int, noCheckCla bool) error {
state, err := GetPullRequestState(owner, repo, prnum)
if err != nil {
return err
}
if state.CurrentState == BOTSTATE_ABANDONED || state.CurrentState == BOTSTATE_TIMEOUT ||
state.CurrentState == BOTSTATE_MERGED {
// That's odd... This ticket should not even be open...
// Let's do nothing in case someone intentionally reopened it.
return nil
}
if state.CurrentState == BOTSTATE_NEW || state.CurrentState == BOTSTATE_NO_CLA ||
state.CurrentState == BOTSTATE_CREATED || state.CurrentState == BOTSTATE_UPDATED {
// Check CLA
allSigned, authorEmails, err := VerifyPrAuthorClas(owner, repo, prnum)
if err != nil {
return err
}
if noCheckCla {
log.Printf("Skipping no_cla warning for this pull request.")
allSigned = true
}
if !allSigned {
if state.CurrentState == BOTSTATE_NO_CLA {
// If we already sent the no_cla message, lets not do it again,
// instead we should check if this is now timed out...
if time.Now().After(state.LastUpdatedTime.Add(10 * 24 * time.Hour)) {
return ClosePrForTimeout(owner, repo, prnum, state)
}
log.Printf("Skipping this pull request as no_cla was already sent.")
return nil
}
return SendClaText(owner, repo, prnum, state, authorEmails)
} else {
// Need to do normal process
return TransferPrToGerrit(owner, repo, prnum, state)
}
}
return makeErr("unexpected pull request state", nil)
}
func ProcessProject(owner, repo string) error {
log.Printf("Processing project %s/%s", owner, repo)
prs, _, err := githubClient.PullRequests.List(context.Background(), owner, repo, &github.PullRequestListOptions{
State: "open",
})
if err != nil {
return makeErr("failed to list all pull requests", err)
}
for i := 0; i < len(prs); i++ {
prNum := *prs[i].Number
log.Printf("Processing pull request %d", prNum)
err := ProcessPullRequest(owner, repo, prNum, false)
if err != nil {
return err
}
}
log.Printf("Processed project %s/%s", owner, repo)
return nil
}
func ProcessAllProjects() error {
log.Printf("Processing all projects")
for i := 0; i < len(allRepos); i++ {
thisRepo := allRepos[i]
err := ProcessProject(thisRepo.Owner, thisRepo.Name)
if err != nil {
return err
}
}
log.Printf("Processed all projects")
return nil
}
func initClients() error {
_, err := os.Stat(gerritPrivateKey)
if err != nil {
return makeErr("failed to locate gerrit private key", err)
}
_, err = os.Stat(gerritPublicKey)
if err != nil {
return makeErr("failed to locate gerrit public key", err)
}
err = initGerritClient()
if err != nil {
return err
}
err = initGitHubClient()
if err != nil {
return err
}
return nil
}
func readConfig() error {
configBytes, err := ioutil.ReadFile("./config.json")
if err != nil {
return makeErr("failed to read config file at `./config.json`", err)
}
var configData struct {
DryRun bool
GitHub struct {
User string
Token string
Owners []string
}
Gerrit struct {
Host string
User string
Pass string
Keys struct {
Public string
Private string
}
ClaGroupName string
}
Repos []struct {
Owner string
Name string
Repo string
}
}
err = json.Unmarshal(configBytes, &configData)
if err != nil {
return makeErr("failed to parse config file", err)
}
isDryRun = configData.DryRun
botOwners = configData.GitHub.Owners
githubUser = configData.GitHub.User
githubToken = configData.GitHub.Token
gerritHost = configData.Gerrit.Host
gerritUser = configData.Gerrit.User
gerritPass = configData.Gerrit.Pass
gerritPublicKey = configData.Gerrit.Keys.Public
gerritPrivateKey = configData.Gerrit.Keys.Private
gerritClaGroupName = configData.Gerrit.ClaGroupName
allRepos = nil
for i := 0; i < len(configData.Repos); i++ {
allRepos = append(allRepos, RepoInfo{
Owner: configData.Repos[i].Owner,
Name: configData.Repos[i].Name,
Repo: configData.Repos[i].Repo,
})
}
return nil
}
func githubHttpHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received GitHub webhook")
var data github.WebHookPayload
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&data)
if err != nil {
log.Printf("Failed to parse GitHub data %+v", err)
return
}
fmt.Fprintf(w, "success")
if data.Repo == nil || data.Repo.Owner == nil {
// No repository data in the webhook
return
}
if data.Sender != nil && IsGitHubUserBot(data.Sender) {
// Ignore hooks triggered by the bot itself.
return
}
ownerName := *data.Repo.Owner.Login
repoName := *data.Repo.Name
go func() {
err := ProcessProject(ownerName, repoName)
if err != nil {
log.Printf("githubHttpHandler error: %+v\n", err)
}
}()
}
func gerritHttpHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Gerrit webhook")
fmt.Fprintf(w, "success")
go func() {
err := ProcessAllProjects()
if err != nil {
log.Printf("gerritHttpHandler error: %+v\n", err)
}
}()
}
func forceCheckHttpHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Force Check request")
fmt.Fprintf(w, "Running!")
go func() {
err := ProcessAllProjects()
if err != nil {
log.Printf("forceCheckHttpHandler error: %+v\n", err)
}
}()
}
func forceTransferHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Force Transfer request")
owner := r.FormValue("owner")
repo := r.FormValue("repo")
prnum := r.FormValue("prnum")
prnumParsed, err := strconv.Atoi(prnum)
if err != nil {
err = makeErr("You specified an invalid numeric `prnum` value", err)
} else {
err = ProcessPullRequest(owner, repo, prnumParsed, true)
}
if err != nil {
fmt.Fprintf(w, "Error: %s\n", err)
return
}
fmt.Fprintf(w, "success")
}
func checkClaHttpHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Check CLA Request")
var target string
var err error
var res bool
email := r.FormValue("email")
owner := r.FormValue("owner")
repo := r.FormValue("repo")
prnum := r.FormValue("prnum")
if email != "" {
target = email
res, err = VerifyEmailCla(email)
} else if owner != "" && repo != "" {
prnumParsed, err := strconv.Atoi(prnum)
if err != nil {
err = makeErr("You specified an invalid numeric `prnum` value", err)
} else {
target = fmt.Sprintf("github.com/%s/%s/%d", owner, repo, prnumParsed)
res, _, err = VerifyPrAuthorClas(owner, repo, prnumParsed)
}
} else {
fmt.Fprintf(w, "You must specify either an email or owner/repo/prnum.")
return
}
if err != nil {
fmt.Fprintf(w, "Error: %s\n", err)
return
}
resText := "NOT SIGNED"
if res {
resText = "signed"
}
fmt.Fprintf(w, "CLA Status for `%s` is: %s\n", target, resText)
}
func proxyRepoStats(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Proxy Repo Stats Request")
owner := r.FormValue("owner")
repo := r.FormValue("repo")
clones, _, err := githubClient.Repositories.ListTrafficClones(context.Background(), owner, repo, &github.TrafficBreakdownOptions{
Per: "day",
})
if err != nil {
fmt.Fprintf(w, "Error: %s", err)
return
}
jsonWriter := json.NewEncoder(w)
jsonWriter.Encode(clones)
}
func rootHttpHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "This is just a bot...")
}
func main() {
log.Printf("Reading configuration...")
err := readConfig()
if err != nil {
log.Printf("Failed to initalize configuration:")
log.Printf("%+v", err)
return
}
log.Printf("Initializing API clients...")
err = initClients()
if err != nil {
log.Printf("Failed to initalize clients:")
log.Printf("%+v", err)
return
}
log.Printf("Starting web server on :4455...")
http.HandleFunc("/", rootHttpHandler)
http.HandleFunc("/github", githubHttpHandler)
http.HandleFunc("/gerrit", gerritHttpHandler)
http.HandleFunc("/forcecheck", forceCheckHttpHandler)
http.HandleFunc("/forcetransfer", forceTransferHandler)
http.HandleFunc("/checkcla", checkClaHttpHandler)
http.HandleFunc("/repostats", proxyRepoStats)
err = http.ListenAndServe(":4455", nil)
if err != nil {
log.Printf("Failed to start http listening.")
log.Printf("%+v", err)
return
}
/*
ownerName := "couchbase"
repoName := "couchnode"
prNum := 0
if prNum > 0 {
err = ProcessPullRequest(ownerName, repoName, prNum)
} else {
err = ProcessProject(ownerName, repoName)
}
if err != nil {
log.Printf("An error occured during processing:")
log.Printf("%+v", err)
}
*/
} | newState := "closed" | random_line_split |
main.go | package main
import (
"context"
"crypto/rand"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"regexp"
"strconv"
"strings"
"time"
"github.com/andygrunwald/go-gerrit"
"github.com/google/go-github/github"
"github.com/gregjones/httpcache"
git "github.com/libgit2/git2go/v31"
"golang.org/x/oauth2"
)
var isDryRun bool = true
const BOT_IDENT_TAG = "::SDKBOT/PR"
func RedactEmail(email string) string {
parts := strings.Split(email, "@")
if len(parts) != 2 {
return "REDACTION FAILED"
}
user := parts[0]
domain := parts[1]
userAllow := float32(len(user)) * 0.6
domainAllow := float32(len(domain)) * 0.7
userAllowSide := int(userAllow / 2)
domainAllowSide := int(domainAllow / 2)
userRightStart := len(user) - userAllowSide
domainRightStart := len(domain) - domainAllowSide
user = user[0:userAllowSide] + strings.Repeat("*", userRightStart-userAllowSide) + user[userRightStart:]
domain = domain[0:domainAllowSide] + strings.Repeat("*", domainRightStart-domainAllowSide) + domain[domainRightStart:]
return user + "@" + domain
}
func RandomChangeId() string {
b := make([]byte, sha1.Size)
rand.Read(b)
encData := sha1.Sum(b)
return "I" + hex.EncodeToString(encData[:])
}
type subError struct {
msg string
err error
}
func (e subError) Error() string {
if e.err != nil {
return e.msg + ": " + e.err.Error()
} else {
return e.msg
}
}
func makeErr(msg string, err error) error {
return subError{msg, err}
}
func SquashHead(repo *git.Repository, squashCount int, mergeCommitTitle, changeId string) error {
log.Printf("Generating squash commit for `HEAD~0` to `HEAD~%d`", squashCount)
headRef, err := repo.Head()
if err != nil {
return makeErr("failed to get head reference", err)
}
topCommitId := headRef.Target()
topCommit, err := repo.LookupCommit(topCommitId)
if err != nil {
return makeErr("failed to locate head commit", nil)
}
var baseCommit *git.Commit
var squashCommits []*git.Commit
{
curCommit := topCommit
for i := 0; i < squashCount; i++ {
squashCommits = append(squashCommits, curCommit)
curCommit = curCommit.Parent(0)
}
baseCommit = curCommit
}
log.Printf("Base Commit is `%s`", baseCommit.Id().String())
var newCommitAuthor *git.Signature
var newCommitCommitter *git.Signature
var newCommitMsg string
if len(squashCommits) == 1 {
newCommitAuthor = squashCommits[0].Author()
newCommitCommitter = squashCommits[0].Committer()
newCommitMsg = strings.TrimSpace(squashCommits[0].Message()) + "\n"
} else {
newCommitMsg = ""
newCommitMsg += mergeCommitTitle + "\n\n"
for i := 0; i < len(squashCommits); i++ {
curCommit := squashCommits[i]
newCommitMsg += "----\n"
newCommitMsg += strings.TrimSpace(curCommit.Message()) + "\n"
newCommitAuthor = curCommit.Author()
newCommitCommitter = curCommit.Committer()
}
}
newCommitMsg += "\nChange-Id: " + changeId
err = repo.ResetToCommit(baseCommit, git.ResetSoft, nil)
if err != nil {
return makeErr("failed to reset to base commit", err)
}
idx, err := repo.Index()
if err != nil {
return makeErr("failed to retrieve repo index", err)
}
err = idx.Write()
if err != nil {
return makeErr("failed to write squash index", err)
}
newCommitTreeId, err := idx.WriteTree()
if err != nil {
return makeErr("failed to write squash tree", err)
}
newCommitTree, err := repo.LookupTree(newCommitTreeId)
if err != nil {
return makeErr("failed to find created squash tree", err)
}
log.Printf("Generated new commit message:\n%s", newCommitMsg)
_, err = repo.CreateCommit("HEAD", newCommitAuthor, newCommitCommitter, newCommitMsg, newCommitTree, baseCommit)
if err != nil {
return makeErr("failed to generate squash commit", err)
}
return nil
}
type CsStateInfo struct {
ChangeNum int
ChangeId string
Status string
CurrentSha1 string
}
func GetChangesetState(owner, repo string, prnum int) (*CsStateInfo, error) {
log.Printf("Retrieving change set for %s/%s/%d", owner, repo, prnum)
path := fmt.Sprintf("github.com/%s/%s/pull/%d", owner, repo, prnum)
changes, _, err := gerritClient.Changes.QueryChanges(&gerrit.QueryChangeOptions{
QueryOptions: gerrit.QueryOptions{
Query: []string{path},
},
ChangeOptions: gerrit.ChangeOptions{
AdditionalFields: []string{"messages"},
},
})
if err != nil {
return nil, makeErr("failed to gerrit query for changes", err)
}
var foundChangeset *gerrit.ChangeInfo
for i := 0; i < len(*changes); i++ {
change := &(*changes)[i]
for j := 0; j < len(change.Messages); j++ {
if !strings.Contains(change.Messages[j].Message, BOT_IDENT_TAG) {
continue
}
if strings.Contains(change.Messages[j].Message, path) {
if foundChangeset != nil {
return nil, makeErr("found multiple possible changesets", nil)
}
foundChangeset = change
break
}
}
}
if foundChangeset == nil {
return nil, nil
}
commitMatcher, err := regexp.Compile("commit:([0-9a-zA-Z]+)")
if err != nil {
return nil, makeErr("failed to compile commit sha1 finding regexp", err)
}
var latestSha1 string
for i := 0; i < len(foundChangeset.Messages); i++ {
commitMatches := commitMatcher.FindStringSubmatch(foundChangeset.Messages[i].Message)
if len(commitMatches) == 2 {
latestSha1 = commitMatches[1]
}
}
return &CsStateInfo{
ChangeNum: foundChangeset.Number,
ChangeId: foundChangeset.ChangeID,
Status: foundChangeset.Status,
CurrentSha1: latestSha1,
}, nil
}
type RepoInfo struct {
Owner string
Name string
Repo string
}
var botOwners []string
var githubClient *github.Client
var githubUser string
var githubToken string
var gerritClient *gerrit.Client
var gerritHost string
var gerritUser string
var gerritPass string
var gerritPublicKey string
var gerritPrivateKey string
var gerritClaGroupName string
var allRepos []RepoInfo
func gerritGitCredentialsHandler(url string, username_from_url string, allowed_types git.CredType) (*git.Cred, error) {
creds, err := git.NewCredSshKey(gerritUser, gerritPublicKey, gerritPrivateKey, "")
return creds, err
}
func initGerritClient() error {
client, err := gerrit.NewClient("https://"+gerritHost+"/", nil)
if err != nil {
return makeErr("failed to create gerrit client", err)
}
client.Authentication.SetBasicAuth(gerritUser, gerritPass)
gerritClient = client
return nil
}
func initGitHubClient() error {
tx := httpcache.NewMemoryCacheTransport()
tc := &http.Client{
Transport: &oauth2.Transport{
Source: oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: githubToken},
),
Base: tx,
},
}
githubClient = github.NewClient(tc)
return nil
}
type PrStateInfo struct {
CurrentState string
LastUpdatedTime time.Time
CurrentSha1 string
NumOfCommits int
}
func IsGitHubUserBot(user *github.User) bool {
if user == nil || user.Login == nil {
return false
}
if *user.Login == githubUser {
return true
}
return false
}
func | (user *github.User) bool {
if user == nil || user.Login == nil {
return false
}
for i := 0; i < len(botOwners); i++ {
if *user.Login == botOwners[i] {
return true
}
}
return IsGitHubUserBot(user)
}
const (
BOTSTATE_NEW = ""
BOTSTATE_NO_CLA = "no_cla"
BOTSTATE_CREATED = "created"
BOTSTATE_UPDATED = "updated"
BOTSTATE_ABANDONED = "abandoned"
BOTSTATE_MERGED = "merged"
BOTSTATE_TIMEOUT = "timeout"
)
func GetPullRequestState(owner, repo string, prnum int) (*PrStateInfo, error) {
var parseableStateNames = []string{
BOTSTATE_NO_CLA,
BOTSTATE_CREATED,
BOTSTATE_UPDATED,
BOTSTATE_ABANDONED,
BOTSTATE_MERGED,
BOTSTATE_TIMEOUT,
// backwards compatibility names
"pushed",
"too_many_commits",
"closed",
}
log.Printf("Retrieving PR state for %s/%s/%d", owner, repo, prnum)
info, _, err := githubClient.PullRequests.Get(context.Background(), owner, repo, prnum)
if err != nil {
return nil, makeErr("failed to retieve pull request info", err)
}
comments, _, err := githubClient.Issues.ListComments(context.Background(), owner, repo, prnum, nil)
if err != nil {
return nil, makeErr("failed to retrieve pull request comments", err)
}
var lastStateTime time.Time
var lastStateName string
var lastUpdatedTime time.Time
for i := 0; i < len(comments); i++ {
if comments[i].CreatedAt.After(lastUpdatedTime) || comments[i].UpdatedAt.After(lastUpdatedTime) {
lastUpdatedTime = *comments[i].UpdatedAt
}
if !IsGitHubUserBotOwner(comments[i].User) {
continue
}
for j := 0; j < len(parseableStateNames); j++ {
if strings.Contains(*comments[i].Body, BOT_IDENT_TAG+":"+parseableStateNames[j]) {
if comments[i].CreatedAt.After(lastStateTime) {
lastStateName = parseableStateNames[j]
lastStateTime = *comments[i].CreatedAt
}
}
}
}
// For backwards compat...
if lastStateName == "too_many_commits" {
lastStateName = BOTSTATE_NEW
} else if lastStateName == "pushed" {
lastStateName = BOTSTATE_UPDATED
} else if lastStateName == "closed" {
lastStateName = BOTSTATE_ABANDONED
}
if lastUpdatedTime.IsZero() {
lastUpdatedTime = time.Now()
}
return &PrStateInfo{
CurrentState: lastStateName,
LastUpdatedTime: lastUpdatedTime,
CurrentSha1: *info.Head.SHA,
NumOfCommits: *info.Commits,
}, nil
}
func VerifyEmailCla(email string) (bool, error) {
log.Printf("Verifying CLA signed for `%s`", email)
if email == "" {
return false, makeErr("you must specify a non-empty email", nil)
}
groups, _, err := gerritClient.Accounts.ListGroups(email)
if err != nil {
if strings.Contains(err.Error(), "Not Found") {
log.Printf("Email was not found on Gerrit")
return false, nil
}
log.Printf("An error occured trying to locate the user on Gerrit")
return false, makeErr("failed to retrieve gerrit user groups", err)
}
hasClaGroup := false
for i := 0; i < len(*groups); i++ {
if (*groups)[i].Name == gerritClaGroupName {
hasClaGroup = true
}
}
if hasClaGroup {
log.Printf("The user was located and has signed the CLA")
} else {
log.Printf("The user was located, but they did not sign the CLA")
}
return hasClaGroup, nil
}
func VerifyPrAuthorClas(owner, repo string, prnum int) (bool, []string, error) {
log.Printf("Verifying CLA signed for PR %s/%s/%d", owner, repo, prnum)
commits, _, err := githubClient.PullRequests.ListCommits(context.Background(), owner, repo, prnum, nil)
if err != nil {
return false, nil, makeErr("failed to retrieve pull request commits", err)
}
authorEmailMap := make(map[string]bool)
for i := 0; i < len(commits); i++ {
authorEmail := *commits[i].Commit.Author.Email
authorEmailMap[authorEmail] = false
}
var emails []string
for authorEmail := range authorEmailMap {
emails = append(emails, authorEmail)
}
allSigned := true
for authorEmail := range authorEmailMap {
signed, err := VerifyEmailCla(authorEmail)
if err != nil {
return false, emails, err
}
if !signed {
allSigned = false
}
}
return allSigned, emails, nil
}
func SendPrStateCommentAndClose(owner, repo string, prnum int, message, state string, is_first bool) error {
if isDryRun {
log.Printf("Skipping pr comment and close for '%s' due to dry run.", state)
return nil
}
newState := "closed"
_, _, err := githubClient.PullRequests.Edit(context.Background(), owner, repo, prnum, &github.PullRequest{
State: &newState,
})
if err != nil {
return makeErr("failed to close pull request", err)
}
return SendPrStateComment(owner, repo, prnum, message, state, is_first)
}
func SendPrStateComment(owner, repo string, prnum int, message, state string, is_first bool) error {
if isDryRun {
log.Printf("Skipping pr comment for '%s' due to dry run.", state)
return nil
}
var messageBody string
if is_first {
messageBody += "Thanks for the pull request!! To ensure quality review, Couchbase employs"
messageBody += " a [code review system](http://review.couchbase.org/) based on"
messageBody += " [Gerrit](https://www.gerritcodereview.com/) to manage the workflow of changes"
messageBody += " in addition to tracking our contributor agreements.\n\n"
}
messageBody += strings.TrimSpace(message)
messageBody += "\n\n" + BOT_IDENT_TAG + ":" + state
_, _, err := githubClient.Issues.CreateComment(context.Background(), owner, repo, prnum, &github.IssueComment{
Body: &messageBody,
})
if err != nil {
return makeErr("failed to comment on pull request", err)
}
return nil
}
func SendClaText(owner, repo string, prnum int, state *PrStateInfo, emails []string) error {
log.Printf("Sending no_cla for %s/%s/%d", owner, repo, prnum)
message := "To get this change in and collaborate in code review, please register on Gerrit"
message += " and accept our CLA. The easiest way to do this is to follow the link below,"
message += " sign in with your GitHub account and then follow through the steps provided"
message += " on that page to sign an 'Individual' agreement:"
message += " http://review.couchbase.org/#/settings/new-agreement."
message += "\n\n"
message += "Keep in mind that the emails we are seeing on the commits are: "
for i, email := range emails {
if i != 0 {
message += ", "
}
message += "`" + RedactEmail(email) + "`"
}
message += "\n\n"
message += "Note: Please contact us if you have any issues registering with Gerrit!"
message += " If you have not signed our CLA within 7 days, the Pull Request will be"
message += " automatically closed."
return SendPrStateComment(owner, repo, prnum, message, BOTSTATE_NO_CLA, state.CurrentState == BOTSTATE_NEW)
}
func SendPushedText(owner, repo string, prnum int, state *PrStateInfo, changeNum int) error {
log.Printf("Sending pushed for %s/%s/%d", owner, repo, prnum)
message := "Your changes (commit: " + state.CurrentSha1 + ") have been pushed to the Couchbase Review Site:\n"
message += "http://review.couchbase.org/" + strconv.FormatInt(int64(changeNum), 10)
if state.NumOfCommits > 1 {
message += "\n\n"
message += "Note: As your pull request contains multiple commits, we have"
message += " performed an automatic squash of these commits into a single"
message += " change-set. If this is not the desired behaviour, please"
message += " consider submitting a pull request per discreet feature."
}
if state.CurrentState == BOTSTATE_CREATED || state.CurrentState == BOTSTATE_UPDATED {
return SendPrStateComment(owner, repo, prnum, message, BOTSTATE_UPDATED, false)
} else {
return SendPrStateComment(owner, repo, prnum, message, BOTSTATE_CREATED, state.CurrentState == BOTSTATE_NEW)
}
}
func ClosePrForTimeout(owner, repo string, prnum int, state *PrStateInfo) error {
log.Printf("Closing due to timeout for %s/%s/%d", owner, repo, prnum)
message := "Unfortunately it has been 7 days and we are still unable to confirm that you"
message += " have signed our CLA. We sincerely appreciate your submission and hope that"
message += " you will register and resubmit this Pull Request in the future!"
return SendPrStateCommentAndClose(owner, repo, prnum, message, BOTSTATE_TIMEOUT, state.CurrentState == BOTSTATE_NEW)
}
func ClosePrForMerge(owner, repo string, prnum int, state *PrStateInfo) error {
log.Printf("Closing due to gerrit merge for %s/%s/%d", owner, repo, prnum)
message := "This Pull Request has been closed as the associated Gerrit change was merged."
return SendPrStateCommentAndClose(owner, repo, prnum, message, BOTSTATE_MERGED, state.CurrentState == BOTSTATE_NEW)
}
func ClosePrForAbandon(owner, repo string, prnum int, state *PrStateInfo) error {
log.Printf("Closing due to gerrit abandon for %s/%s/%d", owner, repo, prnum)
message := "This Pull Request has been closed as the associated Gerrit change was abandoned."
return SendPrStateCommentAndClose(owner, repo, prnum, message, BOTSTATE_ABANDONED, state.CurrentState == BOTSTATE_NEW)
}
func GetGerritRepo(owner, repo string) string {
return repo
}
func TransferPrToGerrit(owner, repo string, prnum int, prstate *PrStateInfo) error {
log.Printf("Attempting to gerrit transfer PR %s/%s/%d %v", owner, repo, prnum, prstate)
csstate, err := GetChangesetState(owner, repo, prnum)
if err != nil {
return err
}
if csstate != nil {
if csstate.Status == "ABANDONED" {
return ClosePrForAbandon(owner, repo, prnum, prstate)
}
if csstate.Status == "MERGED" {
return ClosePrForMerge(owner, repo, prnum, prstate)
}
if csstate.CurrentSha1 == prstate.CurrentSha1 {
// Already up to date!
log.Printf("Nothing to do, already up to date.")
return nil
}
}
thisChangeId := RandomChangeId()
if csstate != nil {
thisChangeId = csstate.ChangeId
}
pr, _, err := githubClient.PullRequests.Get(context.Background(), owner, repo, prnum)
if err != nil {
return makeErr("failed to request pull request data", err)
}
gitRepoPath := "/tmp/gtest"
os.RemoveAll(gitRepoPath)
gitRepo, err := git.Clone(*pr.Head.Repo.CloneURL, gitRepoPath, &git.CloneOptions{
CheckoutBranch: *pr.Head.Ref,
})
if err != nil {
return makeErr("failed to clone repository head", err)
}
err = SquashHead(gitRepo, *pr.Commits, *pr.Title, thisChangeId)
if err != nil {
return err
}
log.Printf("Generated squash commit with ChangeId `%s`", thisChangeId)
if isDryRun {
log.Printf("Skipping remote push and comment due to dry run.")
return nil
}
reviewRemote, err := gitRepo.Remotes.Create("review",
"ssh://"+gerritUser+"@"+gerritHost+":29418/"+GetGerritRepo(owner, repo))
if err != nil {
return makeErr("failed to add gerrit as a remote", err)
}
log.Printf("Assigned remote.")
statusText := ""
err = reviewRemote.Push([]string{"HEAD:refs/for/master"}, &git.PushOptions{
RemoteCallbacks: git.RemoteCallbacks{
PushUpdateReferenceCallback: func(refname, status string) git.ErrorCode {
statusText = status
return 0
},
CredentialsCallback: gerritGitCredentialsHandler,
CertificateCheckCallback: func(cert *git.Certificate, valid bool, hostname string) git.ErrorCode {
return 0
},
},
})
if err != nil {
return makeErr("failed to push to gerrit", err)
}
log.Printf("Successfully pushed to Gerrit with status `%s`", statusText)
if statusText != "" {
if statusText == "no new changes" && prstate != nil &&
(prstate.CurrentState == BOTSTATE_CREATED || prstate.CurrentState == BOTSTATE_UPDATED) {
// Nothing changed
return nil
}
return makeErr("failed to upload to gerrit", errors.New(statusText))
}
var reviewMessage string
reviewMessage += fmt.Sprintf("Change-Set generated from https://github.com/%s/%s/pull/%d (commit:%s).",
owner, repo, prnum, *pr.Head.SHA)
reviewMessage += "\n" + BOT_IDENT_TAG
_, _, err = gerritClient.Changes.SetReview(thisChangeId, "current", &gerrit.ReviewInput{
Message: reviewMessage,
})
if err != nil {
return makeErr("failed to publish comment to gerrit", err)
}
if csstate == nil {
csstate, err = GetChangesetState(owner, repo, prnum)
if err != nil {
return makeErr("failed to retrieve updated change from gerrit", err)
}
if csstate == nil {
return makeErr("could not locate pushed change on gerrit", err)
}
}
err = SendPushedText(owner, repo, prnum, prstate, csstate.ChangeNum)
if err != nil {
return err
}
return nil
}
func ProcessPullRequest(owner, repo string, prnum int, noCheckCla bool) error {
state, err := GetPullRequestState(owner, repo, prnum)
if err != nil {
return err
}
if state.CurrentState == BOTSTATE_ABANDONED || state.CurrentState == BOTSTATE_TIMEOUT ||
state.CurrentState == BOTSTATE_MERGED {
// That's odd... This ticket should not even be open...
// Let's do nothing in case someone intentionally reopened it.
return nil
}
if state.CurrentState == BOTSTATE_NEW || state.CurrentState == BOTSTATE_NO_CLA ||
state.CurrentState == BOTSTATE_CREATED || state.CurrentState == BOTSTATE_UPDATED {
// Check CLA
allSigned, authorEmails, err := VerifyPrAuthorClas(owner, repo, prnum)
if err != nil {
return err
}
if noCheckCla {
log.Printf("Skipping no_cla warning for this pull request.")
allSigned = true
}
if !allSigned {
if state.CurrentState == BOTSTATE_NO_CLA {
// If we already sent the no_cla message, lets not do it again,
// instead we should check if this is now timed out...
if time.Now().After(state.LastUpdatedTime.Add(10 * 24 * time.Hour)) {
return ClosePrForTimeout(owner, repo, prnum, state)
}
log.Printf("Skipping this pull request as no_cla was already sent.")
return nil
}
return SendClaText(owner, repo, prnum, state, authorEmails)
} else {
// Need to do normal process
return TransferPrToGerrit(owner, repo, prnum, state)
}
}
return makeErr("unexpected pull request state", nil)
}
func ProcessProject(owner, repo string) error {
log.Printf("Processing project %s/%s", owner, repo)
prs, _, err := githubClient.PullRequests.List(context.Background(), owner, repo, &github.PullRequestListOptions{
State: "open",
})
if err != nil {
return makeErr("failed to list all pull requests", err)
}
for i := 0; i < len(prs); i++ {
prNum := *prs[i].Number
log.Printf("Processing pull request %d", prNum)
err := ProcessPullRequest(owner, repo, prNum, false)
if err != nil {
return err
}
}
log.Printf("Processed project %s/%s", owner, repo)
return nil
}
func ProcessAllProjects() error {
log.Printf("Processing all projects")
for i := 0; i < len(allRepos); i++ {
thisRepo := allRepos[i]
err := ProcessProject(thisRepo.Owner, thisRepo.Name)
if err != nil {
return err
}
}
log.Printf("Processed all projects")
return nil
}
func initClients() error {
_, err := os.Stat(gerritPrivateKey)
if err != nil {
return makeErr("failed to locate gerrit private key", err)
}
_, err = os.Stat(gerritPublicKey)
if err != nil {
return makeErr("failed to locate gerrit public key", err)
}
err = initGerritClient()
if err != nil {
return err
}
err = initGitHubClient()
if err != nil {
return err
}
return nil
}
func readConfig() error {
configBytes, err := ioutil.ReadFile("./config.json")
if err != nil {
return makeErr("failed to read config file at `./config.json`", err)
}
var configData struct {
DryRun bool
GitHub struct {
User string
Token string
Owners []string
}
Gerrit struct {
Host string
User string
Pass string
Keys struct {
Public string
Private string
}
ClaGroupName string
}
Repos []struct {
Owner string
Name string
Repo string
}
}
err = json.Unmarshal(configBytes, &configData)
if err != nil {
return makeErr("failed to parse config file", err)
}
isDryRun = configData.DryRun
botOwners = configData.GitHub.Owners
githubUser = configData.GitHub.User
githubToken = configData.GitHub.Token
gerritHost = configData.Gerrit.Host
gerritUser = configData.Gerrit.User
gerritPass = configData.Gerrit.Pass
gerritPublicKey = configData.Gerrit.Keys.Public
gerritPrivateKey = configData.Gerrit.Keys.Private
gerritClaGroupName = configData.Gerrit.ClaGroupName
allRepos = nil
for i := 0; i < len(configData.Repos); i++ {
allRepos = append(allRepos, RepoInfo{
Owner: configData.Repos[i].Owner,
Name: configData.Repos[i].Name,
Repo: configData.Repos[i].Repo,
})
}
return nil
}
func githubHttpHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received GitHub webhook")
var data github.WebHookPayload
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&data)
if err != nil {
log.Printf("Failed to parse GitHub data %+v", err)
return
}
fmt.Fprintf(w, "success")
if data.Repo == nil || data.Repo.Owner == nil {
// No repository data in the webhook
return
}
if data.Sender != nil && IsGitHubUserBot(data.Sender) {
// Ignore hooks triggered by the bot itself.
return
}
ownerName := *data.Repo.Owner.Login
repoName := *data.Repo.Name
go func() {
err := ProcessProject(ownerName, repoName)
if err != nil {
log.Printf("githubHttpHandler error: %+v\n", err)
}
}()
}
func gerritHttpHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Gerrit webhook")
fmt.Fprintf(w, "success")
go func() {
err := ProcessAllProjects()
if err != nil {
log.Printf("gerritHttpHandler error: %+v\n", err)
}
}()
}
func forceCheckHttpHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Force Check request")
fmt.Fprintf(w, "Running!")
go func() {
err := ProcessAllProjects()
if err != nil {
log.Printf("forceCheckHttpHandler error: %+v\n", err)
}
}()
}
func forceTransferHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Force Transfer request")
owner := r.FormValue("owner")
repo := r.FormValue("repo")
prnum := r.FormValue("prnum")
prnumParsed, err := strconv.Atoi(prnum)
if err != nil {
err = makeErr("You specified an invalid numeric `prnum` value", err)
} else {
err = ProcessPullRequest(owner, repo, prnumParsed, true)
}
if err != nil {
fmt.Fprintf(w, "Error: %s\n", err)
return
}
fmt.Fprintf(w, "success")
}
func checkClaHttpHandler(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Check CLA Request")
var target string
var err error
var res bool
email := r.FormValue("email")
owner := r.FormValue("owner")
repo := r.FormValue("repo")
prnum := r.FormValue("prnum")
if email != "" {
target = email
res, err = VerifyEmailCla(email)
} else if owner != "" && repo != "" {
prnumParsed, err := strconv.Atoi(prnum)
if err != nil {
err = makeErr("You specified an invalid numeric `prnum` value", err)
} else {
target = fmt.Sprintf("github.com/%s/%s/%d", owner, repo, prnumParsed)
res, _, err = VerifyPrAuthorClas(owner, repo, prnumParsed)
}
} else {
fmt.Fprintf(w, "You must specify either an email or owner/repo/prnum.")
return
}
if err != nil {
fmt.Fprintf(w, "Error: %s\n", err)
return
}
resText := "NOT SIGNED"
if res {
resText = "signed"
}
fmt.Fprintf(w, "CLA Status for `%s` is: %s\n", target, resText)
}
func proxyRepoStats(w http.ResponseWriter, r *http.Request) {
log.Printf("Received Proxy Repo Stats Request")
owner := r.FormValue("owner")
repo := r.FormValue("repo")
clones, _, err := githubClient.Repositories.ListTrafficClones(context.Background(), owner, repo, &github.TrafficBreakdownOptions{
Per: "day",
})
if err != nil {
fmt.Fprintf(w, "Error: %s", err)
return
}
jsonWriter := json.NewEncoder(w)
jsonWriter.Encode(clones)
}
func rootHttpHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "This is just a bot...")
}
func main() {
log.Printf("Reading configuration...")
err := readConfig()
if err != nil {
log.Printf("Failed to initalize configuration:")
log.Printf("%+v", err)
return
}
log.Printf("Initializing API clients...")
err = initClients()
if err != nil {
log.Printf("Failed to initalize clients:")
log.Printf("%+v", err)
return
}
log.Printf("Starting web server on :4455...")
http.HandleFunc("/", rootHttpHandler)
http.HandleFunc("/github", githubHttpHandler)
http.HandleFunc("/gerrit", gerritHttpHandler)
http.HandleFunc("/forcecheck", forceCheckHttpHandler)
http.HandleFunc("/forcetransfer", forceTransferHandler)
http.HandleFunc("/checkcla", checkClaHttpHandler)
http.HandleFunc("/repostats", proxyRepoStats)
err = http.ListenAndServe(":4455", nil)
if err != nil {
log.Printf("Failed to start http listening.")
log.Printf("%+v", err)
return
}
/*
ownerName := "couchbase"
repoName := "couchnode"
prNum := 0
if prNum > 0 {
err = ProcessPullRequest(ownerName, repoName, prNum)
} else {
err = ProcessProject(ownerName, repoName)
}
if err != nil {
log.Printf("An error occured during processing:")
log.Printf("%+v", err)
}
*/
}
| IsGitHubUserBotOwner | identifier_name |
tvlist.js | 'use strict';
/**
* @class TVList
* @constructor
* @author Roman Stoian
*/
function TVList(parent) {
| // extending
TVList.prototype = Object.create(CScrollList.prototype);
TVList.prototype.constructor = TVList;
/**
* Setter for linked component
* @param {CBase} component associated object
*/
TVList.prototype.SetBreadCrumb = function ( component ) {
this.bcrumb = component;
};
/**
* Setter for linked component
* @param {CBase} component associated object
*/
TVList.prototype.SetSearchBar = function ( component ) {
this.sbar = component;
};
/**
* Shows/hides items depending on the given filter string match
* unmarks all hidden items
*/
TVList.prototype.Filter = function () {
// link to the object for limited scopes
var self = this;
// check all items
this.Each(function(item){
// check file name if regular file
var text_ok = item.data.type === MEDIA_TYPE_BACK || (item.data.name && item.data.name.toLowerCase().indexOf(self.filterText) !== -1);
// check file type if regular file
var type_ok = item.data.type === self.filterType || self.filterType === MEDIA_TYPE_NONE || item.data.type === MEDIA_TYPE_BACK;
// hide not matching items
self.Hidden(item, !(text_ok && type_ok));
});
};
/**
* Finds the first appropriate item
* @param {string} value
* @return {Node}
*/
TVList.prototype.FirstMatch = function ( value ) {
// preparing
var items;
if ( value === '' ) {
return null;
}
items = this.handleInner.children; // all list items
// iterate all items till all items are found
for ( var i = 0; i < items.length; i++ ) {
// floating pointer depends on direction
var item = items[i];
// check file name if regular file
if ( item.data.type !== MEDIA_TYPE_BACK && item.data.name && item.data.name.toLowerCase().indexOf(value.toLowerCase()) !== -1 ) {
return item;
}
}
return null;
};
/**
* Create new item and put it in the list
* @param {string} obj item label
* @param {Object} attrs set of item data parameters
* @param {Object} states set of additional parameters (stared)
* @return {Node}
*/
TVList.prototype.Add = function (obj, attrs, states) {
var self = this, number;
// is it necessary to filter
if ( this.filterText) { // || this.filterType !== MEDIA_TYPE_NONE
// check file name if regular file
var text_ok = attrs.type === MEDIA_TYPE_BACK || (obj.name && obj.name.toLowerCase().indexOf(this.filterText.toLowerCase()) !== -1);
// check file type if regular file
var type_ok = attrs.type === this.filterType || this.filterType === MEDIA_TYPE_NONE || attrs.type === MEDIA_TYPE_BACK;
// hide not matching items
if ( !(text_ok && type_ok) ) {
return null;
}
}
if (this.mtypes.indexOf(attrs.type) === -1) {
this.mtypes.push(attrs.type);
}
// html prepare
var body = element('div', {className: 'data'}, obj.name);
var star = element('div', {className: 'star'});
if (obj.number) {
number = element('div', {className: 'number'}, obj.number);
} else {
number = element('div', {className: 'number'});
number.style.background = 'url("' + PATH_IMG_PUBLIC + 'media/type_' + attrs.type + '.png") no-repeat center';
}
var timeshift = element('div', {className: obj.tsOn? 'timeshift tsOn' : 'timeshift'});
// decoration
// make sure name is set
if (!attrs.name) {
attrs.name = obj.name;
}
// actual filling
var item = CScrollList.prototype.Add.call(this, [number, body, timeshift, star], {
star: star,
data: attrs,
// handlers
onclick: function () {
// open or enter the item
this.self.Open(this.data);
return false;
},
oncontextmenu: EMULATION ? null : function () {
// mark/unmark the item
self.parent.actionF2(false);
return false;
}
});
if(obj.number){
item.domNumber = number;
}
// mark as favourite
if (states && states.stared) {
item.self.SetStar(item, true);
}
return item;
};
/**
* Set inner item flags and decoration
* @param {Node} item the element to be processed
* @param {boolean} state flag of the operation (true if change is made)
*/
TVList.prototype.SetStar = function (item, state) {
if (item.stared === state) {
return;
}
this.SetState(item, 'stared', state);
if (state !== false) {
item.star.style.background = 'url("' + PATH_IMG_PUBLIC + 'ico_fav_s.png") no-repeat right';
} else {
item.star.style.background = 'none';
}
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, state !== false ? true : false);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, state !== false ? false : true);
};
/**
* Hook method on focus item change
* @param {Node} item the new focused item
*/
TVList.prototype.onFocus = function (item) {
var self = this;
if ( MediaPlayer.ts_inProgress ) {
if (MediaPlayer.tsExitCheck('focus', item)) {
return true;
}
}
clearTimeout(this.timer.OnFocusPlay);
if ( item.data.markable ) {
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, false);
} else {
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, true);
}
if (item.data.type === MEDIA_TYPE_STREAM) {
if (!this.states.marked || this.states.marked.length === 0) {
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, item.stared);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, !item.stared);
} else {
echo(this.states.marked[0].data,'this.states.marked');
}
} else {
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, true);
}
self.parent.clearEPG();
this.timer.OnFocusPlay = setTimeout(function () {
if ( item.data.type === MEDIA_TYPE_BACK ){
if(self.filterText){
self.parent.domInfoTitle.innerHTML = _('Contains the list of items corresponding to the given filter request');
} else {
self.parent.domInfoTitle.innerHTML = self.parentItem.name?self.parentItem.name:'';
}
} else {
self.parent.initEPGNow();
self.parent.domInfoTitle.innerHTML = item.data.name?item.data.name:'';
}
if (item.data.type === MEDIA_TYPE_STREAM) {
self.prevChannel = self.lastChannel;
self.lastChannel = self.parentItem.data[item.data.index];
self.parent.domURL.innerHTML = (self.parentItem.data[item.data.index].sol && self.parentItem.data[item.data.index].sol !==''?self.parentItem.data[item.data.index].sol+' ':'')+self.parentItem.data[item.data.index].url;
if ( MediaPlayer.obj !== self.parentItem.data[item.data.index] ) {
MediaPlayer.preparePlayer(self.parentItem.data[item.data.index], self.parent, true, false, true);
}
if ( self.parent.pvr.arr.length ) {
self.parent.pvr.check(true);
}
} else {
self.lastChannel = null;
self.parent.domURL.innerHTML = '';
MediaPlayer.end();
}
}, 500);
return false;
};
/**
* Reset and clear all items
* This will make the component ready for a new filling.
*/
TVList.prototype.Clear = function () {
CScrollList.prototype.Clear.call(this);
this.parent.domURL.innerHTML = '';
this.parent.domInfoTitle.innerHTML = '';
this.filterType = MEDIA_TYPE_NONE;
this.mtypes = [];
};
/**
* Move one level up
*/
TVList.prototype.Back = function () {
var self = this;
// there are some levels
if ( this.path.length > 1 ) {
// exiting from favs and there are some changes
// normal exit
this.path.pop();
self.lastChannel = null;
if ( this.bcrumb ) {
this.bcrumb.Pop();
}
// render the previous level
this.Build(this.path[this.path.length-1]);
// apply specific button visibility
setTimeout(function(){
self.onFocus(self.Current());
}, 0);
// go up
return this.LEVEL_CHANGE_UP;
}
// stay here
return this.LEVEL_CHANGE_NONE;
};
/**
* Go to channel by number
* @param {number} number
*/
TVList.prototype.goToChannel = function (number) {
if (this.handle.children.length > number && number > this.channelStart) {
this.Focused(this.handle.children[number+(this.handle.children[0].data.type === MEDIA_TYPE_BACK? 1:0)], true);
this.SetPosition(this.activeItem);
}
};
/**
* Enter the item or open it
* @param {Object} data media item inner data
*/
TVList.prototype.Open = function (data, noPlay) {
// render the list
echo(data,'data Open');
var levelChange = this.Build(data, noPlay);
// level changed
if ( levelChange !== this.LEVEL_CHANGE_NONE ) {
// reset tray filter icon
if ( this.parent.Tray.iconFilter.parentNode === this.parent.Tray.handleInner ) {
this.parent.Tray.handleInner.removeChild(this.parent.Tray.iconFilter);
}
// and hide at all if not necessary
this.parent.Tray.Show(globalBuffer.size() > 0, false);
// particular direction
if ( levelChange === this.LEVEL_CHANGE_DOWN ) {
// build breadcrumbs
if ( this.filterText ) {
// filter
if ( this.bcrumb ) {
this.bcrumb.Push('/', 'media/ico_filter.png', this.filterText);
}
} else {
// default
// build breadcrumbs
if ( this.bcrumb ) {
this.bcrumb.Push('/', 'media/type_'+data.type+'.png', data.name ? data.name : '');
}
}
// save this step
this.path.push(data);
// sef focus to the first item
this.Activate(true);
if( data.data && data.data.length ){ this.onFocus(this.activeItem); }
} else {
// go up
if ( !this.Reposition(this.parentItem) ){
this.Activate(true);
}
}
// current level item
this.parentItem = this.path[this.path.length-1];
}
return levelChange;
};
/**
* Open root, clear all breadcrumbs, search options
*/
TVList.prototype.Reset = function () {
this.parentItem = null;
this.path = [];
this.Clear();
// linked components
if ( this.bcrumb ) {
this.bcrumb.Reset();
}
if ( this.sbar ) {
this.sbar.Reset();
}
};
/**
* Renders the given media item by executing associated action
* @param {Object} data media item inner data
*/
TVList.prototype.Build = function (data, noPlay) {
var levelChange = this.LEVEL_CHANGE_NONE;
// apply filter parameter from the current node
this.filterText = data.filterText ? data.filterText : '';
// get item associated open action and execute
if ( data && data.type && typeof this.openAction[data.type] === 'function' ) {
levelChange = this.openAction[data.type].call(this, data, noPlay);
} else {
// wrong item type
new CModalAlert(this.parent, _('Error'), _('Unknown type of selected item'), _('Close'));
}
return levelChange;
};
/**
* Clear the list and fill it again (will try to refocus)
* @param {boolean} [refocus=true] if true then try to set focus to the previous focused element
*/
TVList.prototype.Refresh = function (refocus) {
var data = {data : null};
// some media item is opened at the moment
if ( this.parentItem !== null ) {
// get current focused item
this.Build(this.parentItem);
if ( refocus !== false) {
if ( this.activeItem ) {
data = this.activeItem;
} else {
data = this.FirstMatch(this.filterText);
}
}
// refill
// find it in the new list if necessary
if ( data && data.data ) {
this.Reposition(data.data);
} else {
this.Reposition(data);
}
}
};
/**
* refresh list index in dom objects
*/
TVList.prototype.RefreshIndex = function () {
this.channelStart = this.parentItem.type === MEDIA_TYPE_GROUP? 0 : -1;
var i = 0,
j = 1,
items = this.handleInner.children,
delta = 0;
if ( this.parentItem !== null && items.length ) {
delta = items[0].data.type === MEDIA_TYPE_BACK? 1: 0;
for(i = delta; i < items.length; i++){
items[i].data.index = i - delta;
if(items[i].data.type === MEDIA_TYPE_GROUP){
this.channelStart = i;
} else {
items[i].data.number = j;
items[i].domNumber.innerHTML = j;
j++;
}
}
} else {
this.Refresh(true);
}
};
/**
* Moves the cursor to the given element
* @param {Object} data
* @return {boolean} operation status
*/
TVList.prototype.Reposition = function (data) {
var change = false;
if ( data ) {
for ( var item, i = 0, l = this.Length(); i < l; i++ ) {
item = this.handleInner.children[i];
// url and type match
if (data.index === item.data.index || (data.data && !data.index && data.data.index === item.data.index)) {
change = this.Focused(item, true);
this.SetPosition(this.activeItem); // focus handle bug fix (even items per page problem)
if(!change){
this.onFocus(item);
}
return change;
}
}
}
return false;
};
/**
* Handle checked state for the given item according to the file type.
* Mark only items available for marking.
* @param {Node} item the element to be processed
* @param {boolean} state flag of the state
* @return {boolean} operation status
*/
TVList.prototype.Marked = function (item, state) {
// item exists and only allowed types
if (item && item.data && item.data.markable) {
// parent marking
return CScrollList.prototype.Marked.call(this, item, state);
}
return false;
};
/**
* Show/hide file items according to the specified filter options
* @param {string} text filter file name option
*/
TVList.prototype.SetFilterText = function (text) {
echo('enter to SetFilterText : ' + this.filterText);
// set global (case conversion for future string comparison speedup)
this.filterText = text.toLowerCase();
// apply filter
this.Filter();
};
/**
* Shows/hides items depending on the given filter string match
* unmarks all hidden items
*/
TVList.prototype.Filter = function () {
// link to the object for limited scopes
var self = this;
// check all items
this.Each(function (item) {
// check file name if regular file
var text_ok = item.data.type === MEDIA_TYPE_BACK || (item.data.name && item.data.name.toLowerCase().indexOf(self.filterText) !== -1);
self.Hidden(item, !text_ok);
});
};
/**
* Return all appropriate items available for actions (either marked or current with suitable type)
* @return {Array} list of found Nodes
*/
TVList.prototype.ActiveItems = function () {
// get all marked items
var items = this.states.marked ? this.states.marked.slice() : [];
// no marked, check current and its type
if (items.length === 0 && this.activeItem && this.activeItem.data.markable) {
items.push(this.activeItem);
}
return items;
};
/**
* Create group of channels
*/
TVList.prototype.createGroup = function (name, data, toDelete, deleteList) {
var obj = {}, dataMap = [];
var map = this.parentItem.data.map(function(item){return item.type === MEDIA_TYPE_GROUP?item.name:null;});
var toAdd = map.indexOf(name);
echo(toDelete,'toDelete');
if(toAdd === -1){
obj.name = name;
obj.type = MEDIA_TYPE_GROUP;
obj.data = [];
this.parentItem.data.splice(this.channelStart + 1, 0, obj);
} else {
dataMap = data.map(function(item){if(item.type === MEDIA_TYPE_GROUP){return item.name;}});
for(var i=0; i<dataMap.length; i++){
if(map.indexOf(dataMap[i]) !== -1){
new CModalAlert(currCPage,_('Error'),_('Copying error'));
return false;
}
}
obj = this.parentItem.data[toAdd];
}
var ansvCode = this.addChannelsToList(obj.data, JSON.parse(JSON.stringify(data)), false, true);
if(ansvCode !== 0){
this.parent.actionFileDelete(toDelete,deleteList);
}
this.Refresh(false);
if(toAdd === -1){
this.Focused(this.handle.children[this.channelStart+(this.handle.children[0].data.type === MEDIA_TYPE_BACK? 1:0)], true);
} else {
this.Focused(this.handle.children[toAdd+(this.handle.children[0].data.type === MEDIA_TYPE_BACK? 1:0)], true);
}
if (this.isActive) {
this.Activate();
}
return true;
};
/**
* Add channels to group
* @param {Array} list - group to add
* @param {Array} data - channels array
* @param {boolean} [refocus] - need refocus
* @param {boolean} [noRefresh] - don't refresh list
* @return {boolean} need checked list
*/
TVList.prototype.addChannelsToList = function (list, data, refocus, noRefresh) {
echo(list,'ADD TO LIST');
echo(data,'ADD DATA');
var needCheck = false, tempList = [], channelStart = -1;
var map = this.path.map(function(item){return item.data;});
var mapList = list.map(function(item){return item.type;});
channelStart = mapList.lastIndexOf(MEDIA_TYPE_GROUP);
if (data){
for (var i = 0; i < data.length; i++) {
if ( data[i].type === MEDIA_TYPE_GROUP && map.indexOf(data[i].data) !== -1 ){
new CModalAlert(this.parent,_('Error'),_('Copying error'));
return 0;
}
}
for ( i = 0; i < data.length; i++ ) {
var needAdd = true;
for (var j = 0; j < list.length; j++) {
if (list[j].name === data[i].name && list[j].type === data[i].type && (list[j].url === data[i].url || list[j].sol +' '+ list[j].url === data[i].url || list[j].url === data[i].sol +' '+ data[i].url)) {
needAdd = false;
break;
}
}
if (needAdd) {
if(data[i].type === MEDIA_TYPE_GROUP){
channelStart++;
list.splice(channelStart, 0, data[i]);
} else {
list.push(data[i]);
}
needCheck = true;
}
}
}
if(needCheck){
tempList = list.slice();
tempList = IPTVChannels.checkTS_data(tempList,true);
list = tempList.a;
}
this.parent.needSave = true;
if( !noRefresh ){
this.Refresh(refocus);
}
return needCheck;
};
| // parent constructor
CScrollList.call(this, parent);
/**
* link to the object for limited scopes
* @type {TVList}
*/
var self = this;
/**
* link to the BreadCrumb component
* @type {CBreadCrumb}
*/
this.bcrumb = null;
/**
* link to the BreadCrumb component
* @type {CSearchBar}
*/
this.sbar = null;
/**
* type filter for file listing
* @type Number
*/
this.filterType = MEDIA_TYPE_NONE;
/**
* data filter for file listing
* @type String
*/
this.filterText = '';
/**
* hierarchy change flag: no change
* @type {number}
*/
this.LEVEL_CHANGE_NONE = 0;
/**
* hierarchy change flag: go level up
* @type {number}
*/
this.LEVEL_CHANGE_UP = -1;
/**
* hierarchy change flag: go level deeper
* @type {number}
*/
this.LEVEL_CHANGE_DOWN = 1;
/**
* list of all media types on the current level
* @type {Array}
*/
this.mtypes = [];
/**
* list of media objects data
* full chain from the root
* @type {[Object]}
*/
this.path = [];
/**
* current media object opened
* @type {Object}
*/
this.parentItem = {};
this.timer = {};
this.prevChannel = null;
this.lastChannel = null;
/**
* list of action mapped to the media types
* @type {[Function]}
*/
this.openAction = {};
this.openAction[MEDIA_TYPE_BACK] = function(){
var st = this.Back();
return st;
};
this.openAction[MEDIA_TYPE_TV_ROOT] = function () {
var j = 0;
var item = null;
this.channelStart = -1;
this.Clear();
if ( this.filterText) {
this.Add({name: '..'}, {type: MEDIA_TYPE_BACK});
this.channelStart++;
}
if (this.data.length > 0) {
for (var i = 0; i < this.data.length; i++) {
if (this.data[i].type === MEDIA_TYPE_GROUP) {
item = this.Add({name: this.data[i].name}, {name: this.data[i].name, index: i, markable: true, data: this.data[i].data, type: MEDIA_TYPE_GROUP});
if(item){
this.channelStart = i;
}
} else {
item = this.Add({name: this.data[i].name, number: j+1, tsOn: configuration.mayTimeShift && this.data[i].tsOn}, {name: this.data[i].name, markable: true, index: i, number: j+1, type: MEDIA_TYPE_STREAM}, {stared: FAVORITES_NEW[(this.data[i].sol? this.data[i].sol + ' ' : '') + this.data[i].url] ? true : false});
if(item){
j++;
}
}
}
self.parent.domInfoTitle.innerHTML = self.data[0].name?self.data[0].name:'';
this.parent.BPanel.Hidden(this.parent.BPanel.btnF1, false);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, false);
this.parent.handle.querySelector('.content').querySelector('.crop').className = 'crop';
} else {
if ( this.filterText) {
self.parent.domURL.innerHTML = '';
self.parent.domInfoTitle.innerHTML = '';
}
this.parent.BPanel.Hidden(this.parent.BPanel.btnF1, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, true);
this.parent.handle.querySelector('.content').querySelector('.crop').className = 'crop defImage';
}
return this.LEVEL_CHANGE_DOWN;
};
this.openAction[MEDIA_TYPE_GROUP] = function (data) {
var item = null;
this.Clear();
this.Add({name: '..'}, {type: MEDIA_TYPE_BACK});
var j = 0;
this.channelStart = -1;
if (data.data.length > 0) {
for (var i = 0; i < data.data.length; i++) {
if (data.data[i].type === MEDIA_TYPE_GROUP) {
item = this.Add({name: data.data[i].name}, {name: data.data[i].name, index: i, markable: true, data: data.data[i].data, type: MEDIA_TYPE_GROUP});
if(item){
this.channelStart = i;
}
} else {
item = this.Add({name: data.data[i].name, number: j+1, tsOn: configuration.mayTimeShift && data.data[i].tsOn}, {name: data.data[i].name, markable: true, index: i, number: j+1, type: MEDIA_TYPE_STREAM}, {stared: FAVORITES_NEW[(data.data[i].sol? data.data[i].sol + ' ' : '') + data.data[i].url] ? true : false});
if(item){
j++;
}
}
}
self.parent.domInfoTitle.innerHTML = self.data[0].name?self.data[0].name:'';
} else {
this.parent.BPanel.Hidden(this.parent.BPanel.btnF1, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, true);
}
return this.LEVEL_CHANGE_DOWN;
};
this.openAction[MEDIA_TYPE_STREAM] = function (data, noPlay) {
if (MediaPlayer.obj !== self.parentItem.data[data.index] && !noPlay) {
MediaPlayer.preparePlayer(self.parentItem.data[data.index], this.parent, true, true, true);
} else {
if (MediaPlayer.ts_inProgress) {
if ( environment.ts_icon ){
MediaPlayer.domTSIndicator.style.display = 'block';
}
MediaPlayer.runner.start();
}
MediaPlayer.Show(true, this.parent);
MediaPlayer.showInfo(true);
MediaPlayer.timer.showInfo = setTimeout(function () {
MediaPlayer.showInfo(false);
}, 3000);
}
return this.LEVEL_CHANGE_NONE;
};
}
| identifier_body |
tvlist.js | 'use strict';
/**
* @class TVList
* @constructor
* @author Roman Stoian
*/
function TVList(parent) {
// parent constructor
CScrollList.call(this, parent);
/**
* link to the object for limited scopes
* @type {TVList}
*/
var self = this;
/**
* link to the BreadCrumb component
* @type {CBreadCrumb}
*/
this.bcrumb = null;
/**
* link to the BreadCrumb component
* @type {CSearchBar}
*/
this.sbar = null;
/**
* type filter for file listing
* @type Number
*/
this.filterType = MEDIA_TYPE_NONE;
/**
* data filter for file listing
* @type String
*/
this.filterText = '';
/**
* hierarchy change flag: no change
* @type {number}
*/
this.LEVEL_CHANGE_NONE = 0;
/**
* hierarchy change flag: go level up
* @type {number}
*/
this.LEVEL_CHANGE_UP = -1;
/**
* hierarchy change flag: go level deeper
* @type {number}
*/
this.LEVEL_CHANGE_DOWN = 1;
/**
* list of all media types on the current level
* @type {Array}
*/
this.mtypes = [];
/**
* list of media objects data
* full chain from the root
* @type {[Object]}
*/
this.path = [];
/**
* current media object opened
* @type {Object}
*/
this.parentItem = {};
this.timer = {};
this.prevChannel = null;
this.lastChannel = null;
/**
* list of action mapped to the media types
* @type {[Function]}
*/
this.openAction = {};
this.openAction[MEDIA_TYPE_BACK] = function(){
var st = this.Back();
return st;
};
this.openAction[MEDIA_TYPE_TV_ROOT] = function () {
var j = 0;
var item = null;
this.channelStart = -1;
this.Clear();
if ( this.filterText) {
this.Add({name: '..'}, {type: MEDIA_TYPE_BACK});
this.channelStart++;
}
if (this.data.length > 0) {
for (var i = 0; i < this.data.length; i++) {
if (this.data[i].type === MEDIA_TYPE_GROUP) {
item = this.Add({name: this.data[i].name}, {name: this.data[i].name, index: i, markable: true, data: this.data[i].data, type: MEDIA_TYPE_GROUP});
if(item){
this.channelStart = i;
}
} else {
item = this.Add({name: this.data[i].name, number: j+1, tsOn: configuration.mayTimeShift && this.data[i].tsOn}, {name: this.data[i].name, markable: true, index: i, number: j+1, type: MEDIA_TYPE_STREAM}, {stared: FAVORITES_NEW[(this.data[i].sol? this.data[i].sol + ' ' : '') + this.data[i].url] ? true : false});
if(item){
j++;
}
}
}
self.parent.domInfoTitle.innerHTML = self.data[0].name?self.data[0].name:'';
this.parent.BPanel.Hidden(this.parent.BPanel.btnF1, false);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, false);
this.parent.handle.querySelector('.content').querySelector('.crop').className = 'crop';
} else {
if ( this.filterText) {
self.parent.domURL.innerHTML = '';
self.parent.domInfoTitle.innerHTML = '';
}
this.parent.BPanel.Hidden(this.parent.BPanel.btnF1, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, true);
this.parent.handle.querySelector('.content').querySelector('.crop').className = 'crop defImage';
}
return this.LEVEL_CHANGE_DOWN;
};
this.openAction[MEDIA_TYPE_GROUP] = function (data) {
var item = null;
this.Clear();
this.Add({name: '..'}, {type: MEDIA_TYPE_BACK});
var j = 0;
this.channelStart = -1;
if (data.data.length > 0) {
for (var i = 0; i < data.data.length; i++) {
if (data.data[i].type === MEDIA_TYPE_GROUP) {
item = this.Add({name: data.data[i].name}, {name: data.data[i].name, index: i, markable: true, data: data.data[i].data, type: MEDIA_TYPE_GROUP});
if(item){
this.channelStart = i;
}
} else {
item = this.Add({name: data.data[i].name, number: j+1, tsOn: configuration.mayTimeShift && data.data[i].tsOn}, {name: data.data[i].name, markable: true, index: i, number: j+1, type: MEDIA_TYPE_STREAM}, {stared: FAVORITES_NEW[(data.data[i].sol? data.data[i].sol + ' ' : '') + data.data[i].url] ? true : false});
if(item){
j++;
}
}
}
self.parent.domInfoTitle.innerHTML = self.data[0].name?self.data[0].name:'';
} else {
this.parent.BPanel.Hidden(this.parent.BPanel.btnF1, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, true);
}
return this.LEVEL_CHANGE_DOWN;
};
this.openAction[MEDIA_TYPE_STREAM] = function (data, noPlay) {
if (MediaPlayer.obj !== self.parentItem.data[data.index] && !noPlay) {
MediaPlayer.preparePlayer(self.parentItem.data[data.index], this.parent, true, true, true);
} else {
if (MediaPlayer.ts_inProgress) {
if ( environment.ts_icon ){
MediaPlayer.domTSIndicator.style.display = 'block';
}
MediaPlayer.runner.start();
}
MediaPlayer.Show(true, this.parent);
MediaPlayer.showInfo(true);
MediaPlayer.timer.showInfo = setTimeout(function () {
MediaPlayer.showInfo(false);
}, 3000);
}
return this.LEVEL_CHANGE_NONE;
};
}
// extending |
/**
* Setter for linked component
* @param {CBase} component associated object
*/
TVList.prototype.SetBreadCrumb = function ( component ) {
this.bcrumb = component;
};
/**
* Setter for linked component
* @param {CBase} component associated object
*/
TVList.prototype.SetSearchBar = function ( component ) {
this.sbar = component;
};
/**
* Shows/hides items depending on the given filter string match
* unmarks all hidden items
*/
TVList.prototype.Filter = function () {
// link to the object for limited scopes
var self = this;
// check all items
this.Each(function(item){
// check file name if regular file
var text_ok = item.data.type === MEDIA_TYPE_BACK || (item.data.name && item.data.name.toLowerCase().indexOf(self.filterText) !== -1);
// check file type if regular file
var type_ok = item.data.type === self.filterType || self.filterType === MEDIA_TYPE_NONE || item.data.type === MEDIA_TYPE_BACK;
// hide not matching items
self.Hidden(item, !(text_ok && type_ok));
});
};
/**
* Finds the first appropriate item
* @param {string} value
* @return {Node}
*/
TVList.prototype.FirstMatch = function ( value ) {
// preparing
var items;
if ( value === '' ) {
return null;
}
items = this.handleInner.children; // all list items
// iterate all items till all items are found
for ( var i = 0; i < items.length; i++ ) {
// floating pointer depends on direction
var item = items[i];
// check file name if regular file
if ( item.data.type !== MEDIA_TYPE_BACK && item.data.name && item.data.name.toLowerCase().indexOf(value.toLowerCase()) !== -1 ) {
return item;
}
}
return null;
};
/**
* Create new item and put it in the list
* @param {string} obj item label
* @param {Object} attrs set of item data parameters
* @param {Object} states set of additional parameters (stared)
* @return {Node}
*/
TVList.prototype.Add = function (obj, attrs, states) {
var self = this, number;
// is it necessary to filter
if ( this.filterText) { // || this.filterType !== MEDIA_TYPE_NONE
// check file name if regular file
var text_ok = attrs.type === MEDIA_TYPE_BACK || (obj.name && obj.name.toLowerCase().indexOf(this.filterText.toLowerCase()) !== -1);
// check file type if regular file
var type_ok = attrs.type === this.filterType || this.filterType === MEDIA_TYPE_NONE || attrs.type === MEDIA_TYPE_BACK;
// hide not matching items
if ( !(text_ok && type_ok) ) {
return null;
}
}
if (this.mtypes.indexOf(attrs.type) === -1) {
this.mtypes.push(attrs.type);
}
// html prepare
var body = element('div', {className: 'data'}, obj.name);
var star = element('div', {className: 'star'});
if (obj.number) {
number = element('div', {className: 'number'}, obj.number);
} else {
number = element('div', {className: 'number'});
number.style.background = 'url("' + PATH_IMG_PUBLIC + 'media/type_' + attrs.type + '.png") no-repeat center';
}
var timeshift = element('div', {className: obj.tsOn? 'timeshift tsOn' : 'timeshift'});
// decoration
// make sure name is set
if (!attrs.name) {
attrs.name = obj.name;
}
// actual filling
var item = CScrollList.prototype.Add.call(this, [number, body, timeshift, star], {
star: star,
data: attrs,
// handlers
onclick: function () {
// open or enter the item
this.self.Open(this.data);
return false;
},
oncontextmenu: EMULATION ? null : function () {
// mark/unmark the item
self.parent.actionF2(false);
return false;
}
});
if(obj.number){
item.domNumber = number;
}
// mark as favourite
if (states && states.stared) {
item.self.SetStar(item, true);
}
return item;
};
/**
* Set inner item flags and decoration
* @param {Node} item the element to be processed
* @param {boolean} state flag of the operation (true if change is made)
*/
TVList.prototype.SetStar = function (item, state) {
if (item.stared === state) {
return;
}
this.SetState(item, 'stared', state);
if (state !== false) {
item.star.style.background = 'url("' + PATH_IMG_PUBLIC + 'ico_fav_s.png") no-repeat right';
} else {
item.star.style.background = 'none';
}
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, state !== false ? true : false);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, state !== false ? false : true);
};
/**
* Hook method on focus item change
* @param {Node} item the new focused item
*/
TVList.prototype.onFocus = function (item) {
var self = this;
if ( MediaPlayer.ts_inProgress ) {
if (MediaPlayer.tsExitCheck('focus', item)) {
return true;
}
}
clearTimeout(this.timer.OnFocusPlay);
if ( item.data.markable ) {
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, false);
} else {
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, true);
}
if (item.data.type === MEDIA_TYPE_STREAM) {
if (!this.states.marked || this.states.marked.length === 0) {
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, item.stared);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, !item.stared);
} else {
echo(this.states.marked[0].data,'this.states.marked');
}
} else {
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, true);
}
self.parent.clearEPG();
this.timer.OnFocusPlay = setTimeout(function () {
if ( item.data.type === MEDIA_TYPE_BACK ){
if(self.filterText){
self.parent.domInfoTitle.innerHTML = _('Contains the list of items corresponding to the given filter request');
} else {
self.parent.domInfoTitle.innerHTML = self.parentItem.name?self.parentItem.name:'';
}
} else {
self.parent.initEPGNow();
self.parent.domInfoTitle.innerHTML = item.data.name?item.data.name:'';
}
if (item.data.type === MEDIA_TYPE_STREAM) {
self.prevChannel = self.lastChannel;
self.lastChannel = self.parentItem.data[item.data.index];
self.parent.domURL.innerHTML = (self.parentItem.data[item.data.index].sol && self.parentItem.data[item.data.index].sol !==''?self.parentItem.data[item.data.index].sol+' ':'')+self.parentItem.data[item.data.index].url;
if ( MediaPlayer.obj !== self.parentItem.data[item.data.index] ) {
MediaPlayer.preparePlayer(self.parentItem.data[item.data.index], self.parent, true, false, true);
}
if ( self.parent.pvr.arr.length ) {
self.parent.pvr.check(true);
}
} else {
self.lastChannel = null;
self.parent.domURL.innerHTML = '';
MediaPlayer.end();
}
}, 500);
return false;
};
/**
* Reset and clear all items
* This will make the component ready for a new filling.
*/
TVList.prototype.Clear = function () {
CScrollList.prototype.Clear.call(this);
this.parent.domURL.innerHTML = '';
this.parent.domInfoTitle.innerHTML = '';
this.filterType = MEDIA_TYPE_NONE;
this.mtypes = [];
};
/**
* Move one level up
*/
TVList.prototype.Back = function () {
var self = this;
// there are some levels
if ( this.path.length > 1 ) {
// exiting from favs and there are some changes
// normal exit
this.path.pop();
self.lastChannel = null;
if ( this.bcrumb ) {
this.bcrumb.Pop();
}
// render the previous level
this.Build(this.path[this.path.length-1]);
// apply specific button visibility
setTimeout(function(){
self.onFocus(self.Current());
}, 0);
// go up
return this.LEVEL_CHANGE_UP;
}
// stay here
return this.LEVEL_CHANGE_NONE;
};
/**
* Go to channel by number
* @param {number} number
*/
TVList.prototype.goToChannel = function (number) {
if (this.handle.children.length > number && number > this.channelStart) {
this.Focused(this.handle.children[number+(this.handle.children[0].data.type === MEDIA_TYPE_BACK? 1:0)], true);
this.SetPosition(this.activeItem);
}
};
/**
* Enter the item or open it
* @param {Object} data media item inner data
*/
TVList.prototype.Open = function (data, noPlay) {
// render the list
echo(data,'data Open');
var levelChange = this.Build(data, noPlay);
// level changed
if ( levelChange !== this.LEVEL_CHANGE_NONE ) {
// reset tray filter icon
if ( this.parent.Tray.iconFilter.parentNode === this.parent.Tray.handleInner ) {
this.parent.Tray.handleInner.removeChild(this.parent.Tray.iconFilter);
}
// and hide at all if not necessary
this.parent.Tray.Show(globalBuffer.size() > 0, false);
// particular direction
if ( levelChange === this.LEVEL_CHANGE_DOWN ) {
// build breadcrumbs
if ( this.filterText ) {
// filter
if ( this.bcrumb ) {
this.bcrumb.Push('/', 'media/ico_filter.png', this.filterText);
}
} else {
// default
// build breadcrumbs
if ( this.bcrumb ) {
this.bcrumb.Push('/', 'media/type_'+data.type+'.png', data.name ? data.name : '');
}
}
// save this step
this.path.push(data);
// sef focus to the first item
this.Activate(true);
if( data.data && data.data.length ){ this.onFocus(this.activeItem); }
} else {
// go up
if ( !this.Reposition(this.parentItem) ){
this.Activate(true);
}
}
// current level item
this.parentItem = this.path[this.path.length-1];
}
return levelChange;
};
/**
* Open root, clear all breadcrumbs, search options
*/
TVList.prototype.Reset = function () {
this.parentItem = null;
this.path = [];
this.Clear();
// linked components
if ( this.bcrumb ) {
this.bcrumb.Reset();
}
if ( this.sbar ) {
this.sbar.Reset();
}
};
/**
* Renders the given media item by executing associated action
* @param {Object} data media item inner data
*/
TVList.prototype.Build = function (data, noPlay) {
var levelChange = this.LEVEL_CHANGE_NONE;
// apply filter parameter from the current node
this.filterText = data.filterText ? data.filterText : '';
// get item associated open action and execute
if ( data && data.type && typeof this.openAction[data.type] === 'function' ) {
levelChange = this.openAction[data.type].call(this, data, noPlay);
} else {
// wrong item type
new CModalAlert(this.parent, _('Error'), _('Unknown type of selected item'), _('Close'));
}
return levelChange;
};
/**
* Clear the list and fill it again (will try to refocus)
* @param {boolean} [refocus=true] if true then try to set focus to the previous focused element
*/
TVList.prototype.Refresh = function (refocus) {
var data = {data : null};
// some media item is opened at the moment
if ( this.parentItem !== null ) {
// get current focused item
this.Build(this.parentItem);
if ( refocus !== false) {
if ( this.activeItem ) {
data = this.activeItem;
} else {
data = this.FirstMatch(this.filterText);
}
}
// refill
// find it in the new list if necessary
if ( data && data.data ) {
this.Reposition(data.data);
} else {
this.Reposition(data);
}
}
};
/**
* refresh list index in dom objects
*/
TVList.prototype.RefreshIndex = function () {
this.channelStart = this.parentItem.type === MEDIA_TYPE_GROUP? 0 : -1;
var i = 0,
j = 1,
items = this.handleInner.children,
delta = 0;
if ( this.parentItem !== null && items.length ) {
delta = items[0].data.type === MEDIA_TYPE_BACK? 1: 0;
for(i = delta; i < items.length; i++){
items[i].data.index = i - delta;
if(items[i].data.type === MEDIA_TYPE_GROUP){
this.channelStart = i;
} else {
items[i].data.number = j;
items[i].domNumber.innerHTML = j;
j++;
}
}
} else {
this.Refresh(true);
}
};
/**
* Moves the cursor to the given element
* @param {Object} data
* @return {boolean} operation status
*/
TVList.prototype.Reposition = function (data) {
var change = false;
if ( data ) {
for ( var item, i = 0, l = this.Length(); i < l; i++ ) {
item = this.handleInner.children[i];
// url and type match
if (data.index === item.data.index || (data.data && !data.index && data.data.index === item.data.index)) {
change = this.Focused(item, true);
this.SetPosition(this.activeItem); // focus handle bug fix (even items per page problem)
if(!change){
this.onFocus(item);
}
return change;
}
}
}
return false;
};
/**
* Handle checked state for the given item according to the file type.
* Mark only items available for marking.
* @param {Node} item the element to be processed
* @param {boolean} state flag of the state
* @return {boolean} operation status
*/
TVList.prototype.Marked = function (item, state) {
// item exists and only allowed types
if (item && item.data && item.data.markable) {
// parent marking
return CScrollList.prototype.Marked.call(this, item, state);
}
return false;
};
/**
* Show/hide file items according to the specified filter options
* @param {string} text filter file name option
*/
TVList.prototype.SetFilterText = function (text) {
echo('enter to SetFilterText : ' + this.filterText);
// set global (case conversion for future string comparison speedup)
this.filterText = text.toLowerCase();
// apply filter
this.Filter();
};
/**
* Shows/hides items depending on the given filter string match
* unmarks all hidden items
*/
TVList.prototype.Filter = function () {
// link to the object for limited scopes
var self = this;
// check all items
this.Each(function (item) {
// check file name if regular file
var text_ok = item.data.type === MEDIA_TYPE_BACK || (item.data.name && item.data.name.toLowerCase().indexOf(self.filterText) !== -1);
self.Hidden(item, !text_ok);
});
};
/**
* Return all appropriate items available for actions (either marked or current with suitable type)
* @return {Array} list of found Nodes
*/
TVList.prototype.ActiveItems = function () {
// get all marked items
var items = this.states.marked ? this.states.marked.slice() : [];
// no marked, check current and its type
if (items.length === 0 && this.activeItem && this.activeItem.data.markable) {
items.push(this.activeItem);
}
return items;
};
/**
* Create group of channels
*/
TVList.prototype.createGroup = function (name, data, toDelete, deleteList) {
var obj = {}, dataMap = [];
var map = this.parentItem.data.map(function(item){return item.type === MEDIA_TYPE_GROUP?item.name:null;});
var toAdd = map.indexOf(name);
echo(toDelete,'toDelete');
if(toAdd === -1){
obj.name = name;
obj.type = MEDIA_TYPE_GROUP;
obj.data = [];
this.parentItem.data.splice(this.channelStart + 1, 0, obj);
} else {
dataMap = data.map(function(item){if(item.type === MEDIA_TYPE_GROUP){return item.name;}});
for(var i=0; i<dataMap.length; i++){
if(map.indexOf(dataMap[i]) !== -1){
new CModalAlert(currCPage,_('Error'),_('Copying error'));
return false;
}
}
obj = this.parentItem.data[toAdd];
}
var ansvCode = this.addChannelsToList(obj.data, JSON.parse(JSON.stringify(data)), false, true);
if(ansvCode !== 0){
this.parent.actionFileDelete(toDelete,deleteList);
}
this.Refresh(false);
if(toAdd === -1){
this.Focused(this.handle.children[this.channelStart+(this.handle.children[0].data.type === MEDIA_TYPE_BACK? 1:0)], true);
} else {
this.Focused(this.handle.children[toAdd+(this.handle.children[0].data.type === MEDIA_TYPE_BACK? 1:0)], true);
}
if (this.isActive) {
this.Activate();
}
return true;
};
/**
* Add channels to group
* @param {Array} list - group to add
* @param {Array} data - channels array
* @param {boolean} [refocus] - need refocus
* @param {boolean} [noRefresh] - don't refresh list
* @return {boolean} need checked list
*/
TVList.prototype.addChannelsToList = function (list, data, refocus, noRefresh) {
echo(list,'ADD TO LIST');
echo(data,'ADD DATA');
var needCheck = false, tempList = [], channelStart = -1;
var map = this.path.map(function(item){return item.data;});
var mapList = list.map(function(item){return item.type;});
channelStart = mapList.lastIndexOf(MEDIA_TYPE_GROUP);
if (data){
for (var i = 0; i < data.length; i++) {
if ( data[i].type === MEDIA_TYPE_GROUP && map.indexOf(data[i].data) !== -1 ){
new CModalAlert(this.parent,_('Error'),_('Copying error'));
return 0;
}
}
for ( i = 0; i < data.length; i++ ) {
var needAdd = true;
for (var j = 0; j < list.length; j++) {
if (list[j].name === data[i].name && list[j].type === data[i].type && (list[j].url === data[i].url || list[j].sol +' '+ list[j].url === data[i].url || list[j].url === data[i].sol +' '+ data[i].url)) {
needAdd = false;
break;
}
}
if (needAdd) {
if(data[i].type === MEDIA_TYPE_GROUP){
channelStart++;
list.splice(channelStart, 0, data[i]);
} else {
list.push(data[i]);
}
needCheck = true;
}
}
}
if(needCheck){
tempList = list.slice();
tempList = IPTVChannels.checkTS_data(tempList,true);
list = tempList.a;
}
this.parent.needSave = true;
if( !noRefresh ){
this.Refresh(refocus);
}
return needCheck;
}; | TVList.prototype = Object.create(CScrollList.prototype);
TVList.prototype.constructor = TVList; | random_line_split |
tvlist.js | 'use strict';
/**
* @class TVList
* @constructor
* @author Roman Stoian
*/
function TV | arent) {
// parent constructor
CScrollList.call(this, parent);
/**
* link to the object for limited scopes
* @type {TVList}
*/
var self = this;
/**
* link to the BreadCrumb component
* @type {CBreadCrumb}
*/
this.bcrumb = null;
/**
* link to the BreadCrumb component
* @type {CSearchBar}
*/
this.sbar = null;
/**
* type filter for file listing
* @type Number
*/
this.filterType = MEDIA_TYPE_NONE;
/**
* data filter for file listing
* @type String
*/
this.filterText = '';
/**
* hierarchy change flag: no change
* @type {number}
*/
this.LEVEL_CHANGE_NONE = 0;
/**
* hierarchy change flag: go level up
* @type {number}
*/
this.LEVEL_CHANGE_UP = -1;
/**
* hierarchy change flag: go level deeper
* @type {number}
*/
this.LEVEL_CHANGE_DOWN = 1;
/**
* list of all media types on the current level
* @type {Array}
*/
this.mtypes = [];
/**
* list of media objects data
* full chain from the root
* @type {[Object]}
*/
this.path = [];
/**
* current media object opened
* @type {Object}
*/
this.parentItem = {};
this.timer = {};
this.prevChannel = null;
this.lastChannel = null;
/**
* list of action mapped to the media types
* @type {[Function]}
*/
this.openAction = {};
this.openAction[MEDIA_TYPE_BACK] = function(){
var st = this.Back();
return st;
};
this.openAction[MEDIA_TYPE_TV_ROOT] = function () {
var j = 0;
var item = null;
this.channelStart = -1;
this.Clear();
if ( this.filterText) {
this.Add({name: '..'}, {type: MEDIA_TYPE_BACK});
this.channelStart++;
}
if (this.data.length > 0) {
for (var i = 0; i < this.data.length; i++) {
if (this.data[i].type === MEDIA_TYPE_GROUP) {
item = this.Add({name: this.data[i].name}, {name: this.data[i].name, index: i, markable: true, data: this.data[i].data, type: MEDIA_TYPE_GROUP});
if(item){
this.channelStart = i;
}
} else {
item = this.Add({name: this.data[i].name, number: j+1, tsOn: configuration.mayTimeShift && this.data[i].tsOn}, {name: this.data[i].name, markable: true, index: i, number: j+1, type: MEDIA_TYPE_STREAM}, {stared: FAVORITES_NEW[(this.data[i].sol? this.data[i].sol + ' ' : '') + this.data[i].url] ? true : false});
if(item){
j++;
}
}
}
self.parent.domInfoTitle.innerHTML = self.data[0].name?self.data[0].name:'';
this.parent.BPanel.Hidden(this.parent.BPanel.btnF1, false);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, false);
this.parent.handle.querySelector('.content').querySelector('.crop').className = 'crop';
} else {
if ( this.filterText) {
self.parent.domURL.innerHTML = '';
self.parent.domInfoTitle.innerHTML = '';
}
this.parent.BPanel.Hidden(this.parent.BPanel.btnF1, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, true);
this.parent.handle.querySelector('.content').querySelector('.crop').className = 'crop defImage';
}
return this.LEVEL_CHANGE_DOWN;
};
this.openAction[MEDIA_TYPE_GROUP] = function (data) {
var item = null;
this.Clear();
this.Add({name: '..'}, {type: MEDIA_TYPE_BACK});
var j = 0;
this.channelStart = -1;
if (data.data.length > 0) {
for (var i = 0; i < data.data.length; i++) {
if (data.data[i].type === MEDIA_TYPE_GROUP) {
item = this.Add({name: data.data[i].name}, {name: data.data[i].name, index: i, markable: true, data: data.data[i].data, type: MEDIA_TYPE_GROUP});
if(item){
this.channelStart = i;
}
} else {
item = this.Add({name: data.data[i].name, number: j+1, tsOn: configuration.mayTimeShift && data.data[i].tsOn}, {name: data.data[i].name, markable: true, index: i, number: j+1, type: MEDIA_TYPE_STREAM}, {stared: FAVORITES_NEW[(data.data[i].sol? data.data[i].sol + ' ' : '') + data.data[i].url] ? true : false});
if(item){
j++;
}
}
}
self.parent.domInfoTitle.innerHTML = self.data[0].name?self.data[0].name:'';
} else {
this.parent.BPanel.Hidden(this.parent.BPanel.btnF1, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, true);
}
return this.LEVEL_CHANGE_DOWN;
};
this.openAction[MEDIA_TYPE_STREAM] = function (data, noPlay) {
if (MediaPlayer.obj !== self.parentItem.data[data.index] && !noPlay) {
MediaPlayer.preparePlayer(self.parentItem.data[data.index], this.parent, true, true, true);
} else {
if (MediaPlayer.ts_inProgress) {
if ( environment.ts_icon ){
MediaPlayer.domTSIndicator.style.display = 'block';
}
MediaPlayer.runner.start();
}
MediaPlayer.Show(true, this.parent);
MediaPlayer.showInfo(true);
MediaPlayer.timer.showInfo = setTimeout(function () {
MediaPlayer.showInfo(false);
}, 3000);
}
return this.LEVEL_CHANGE_NONE;
};
}
// extending
TVList.prototype = Object.create(CScrollList.prototype);
TVList.prototype.constructor = TVList;
/**
* Setter for linked component
* @param {CBase} component associated object
*/
TVList.prototype.SetBreadCrumb = function ( component ) {
this.bcrumb = component;
};
/**
* Setter for linked component
* @param {CBase} component associated object
*/
TVList.prototype.SetSearchBar = function ( component ) {
this.sbar = component;
};
/**
* Shows/hides items depending on the given filter string match
* unmarks all hidden items
*/
TVList.prototype.Filter = function () {
// link to the object for limited scopes
var self = this;
// check all items
this.Each(function(item){
// check file name if regular file
var text_ok = item.data.type === MEDIA_TYPE_BACK || (item.data.name && item.data.name.toLowerCase().indexOf(self.filterText) !== -1);
// check file type if regular file
var type_ok = item.data.type === self.filterType || self.filterType === MEDIA_TYPE_NONE || item.data.type === MEDIA_TYPE_BACK;
// hide not matching items
self.Hidden(item, !(text_ok && type_ok));
});
};
/**
* Finds the first appropriate item
* @param {string} value
* @return {Node}
*/
TVList.prototype.FirstMatch = function ( value ) {
// preparing
var items;
if ( value === '' ) {
return null;
}
items = this.handleInner.children; // all list items
// iterate all items till all items are found
for ( var i = 0; i < items.length; i++ ) {
// floating pointer depends on direction
var item = items[i];
// check file name if regular file
if ( item.data.type !== MEDIA_TYPE_BACK && item.data.name && item.data.name.toLowerCase().indexOf(value.toLowerCase()) !== -1 ) {
return item;
}
}
return null;
};
/**
* Create new item and put it in the list
* @param {string} obj item label
* @param {Object} attrs set of item data parameters
* @param {Object} states set of additional parameters (stared)
* @return {Node}
*/
TVList.prototype.Add = function (obj, attrs, states) {
var self = this, number;
// is it necessary to filter
if ( this.filterText) { // || this.filterType !== MEDIA_TYPE_NONE
// check file name if regular file
var text_ok = attrs.type === MEDIA_TYPE_BACK || (obj.name && obj.name.toLowerCase().indexOf(this.filterText.toLowerCase()) !== -1);
// check file type if regular file
var type_ok = attrs.type === this.filterType || this.filterType === MEDIA_TYPE_NONE || attrs.type === MEDIA_TYPE_BACK;
// hide not matching items
if ( !(text_ok && type_ok) ) {
return null;
}
}
if (this.mtypes.indexOf(attrs.type) === -1) {
this.mtypes.push(attrs.type);
}
// html prepare
var body = element('div', {className: 'data'}, obj.name);
var star = element('div', {className: 'star'});
if (obj.number) {
number = element('div', {className: 'number'}, obj.number);
} else {
number = element('div', {className: 'number'});
number.style.background = 'url("' + PATH_IMG_PUBLIC + 'media/type_' + attrs.type + '.png") no-repeat center';
}
var timeshift = element('div', {className: obj.tsOn? 'timeshift tsOn' : 'timeshift'});
// decoration
// make sure name is set
if (!attrs.name) {
attrs.name = obj.name;
}
// actual filling
var item = CScrollList.prototype.Add.call(this, [number, body, timeshift, star], {
star: star,
data: attrs,
// handlers
onclick: function () {
// open or enter the item
this.self.Open(this.data);
return false;
},
oncontextmenu: EMULATION ? null : function () {
// mark/unmark the item
self.parent.actionF2(false);
return false;
}
});
if(obj.number){
item.domNumber = number;
}
// mark as favourite
if (states && states.stared) {
item.self.SetStar(item, true);
}
return item;
};
/**
* Set inner item flags and decoration
* @param {Node} item the element to be processed
* @param {boolean} state flag of the operation (true if change is made)
*/
TVList.prototype.SetStar = function (item, state) {
if (item.stared === state) {
return;
}
this.SetState(item, 'stared', state);
if (state !== false) {
item.star.style.background = 'url("' + PATH_IMG_PUBLIC + 'ico_fav_s.png") no-repeat right';
} else {
item.star.style.background = 'none';
}
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, state !== false ? true : false);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, state !== false ? false : true);
};
/**
* Hook method on focus item change
* @param {Node} item the new focused item
*/
TVList.prototype.onFocus = function (item) {
var self = this;
if ( MediaPlayer.ts_inProgress ) {
if (MediaPlayer.tsExitCheck('focus', item)) {
return true;
}
}
clearTimeout(this.timer.OnFocusPlay);
if ( item.data.markable ) {
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, false);
} else {
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, true);
}
if (item.data.type === MEDIA_TYPE_STREAM) {
if (!this.states.marked || this.states.marked.length === 0) {
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, item.stared);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, !item.stared);
} else {
echo(this.states.marked[0].data,'this.states.marked');
}
} else {
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, true);
}
self.parent.clearEPG();
this.timer.OnFocusPlay = setTimeout(function () {
if ( item.data.type === MEDIA_TYPE_BACK ){
if(self.filterText){
self.parent.domInfoTitle.innerHTML = _('Contains the list of items corresponding to the given filter request');
} else {
self.parent.domInfoTitle.innerHTML = self.parentItem.name?self.parentItem.name:'';
}
} else {
self.parent.initEPGNow();
self.parent.domInfoTitle.innerHTML = item.data.name?item.data.name:'';
}
if (item.data.type === MEDIA_TYPE_STREAM) {
self.prevChannel = self.lastChannel;
self.lastChannel = self.parentItem.data[item.data.index];
self.parent.domURL.innerHTML = (self.parentItem.data[item.data.index].sol && self.parentItem.data[item.data.index].sol !==''?self.parentItem.data[item.data.index].sol+' ':'')+self.parentItem.data[item.data.index].url;
if ( MediaPlayer.obj !== self.parentItem.data[item.data.index] ) {
MediaPlayer.preparePlayer(self.parentItem.data[item.data.index], self.parent, true, false, true);
}
if ( self.parent.pvr.arr.length ) {
self.parent.pvr.check(true);
}
} else {
self.lastChannel = null;
self.parent.domURL.innerHTML = '';
MediaPlayer.end();
}
}, 500);
return false;
};
/**
* Reset and clear all items
* This will make the component ready for a new filling.
*/
TVList.prototype.Clear = function () {
CScrollList.prototype.Clear.call(this);
this.parent.domURL.innerHTML = '';
this.parent.domInfoTitle.innerHTML = '';
this.filterType = MEDIA_TYPE_NONE;
this.mtypes = [];
};
/**
* Move one level up
*/
TVList.prototype.Back = function () {
var self = this;
// there are some levels
if ( this.path.length > 1 ) {
// exiting from favs and there are some changes
// normal exit
this.path.pop();
self.lastChannel = null;
if ( this.bcrumb ) {
this.bcrumb.Pop();
}
// render the previous level
this.Build(this.path[this.path.length-1]);
// apply specific button visibility
setTimeout(function(){
self.onFocus(self.Current());
}, 0);
// go up
return this.LEVEL_CHANGE_UP;
}
// stay here
return this.LEVEL_CHANGE_NONE;
};
/**
* Go to channel by number
* @param {number} number
*/
TVList.prototype.goToChannel = function (number) {
if (this.handle.children.length > number && number > this.channelStart) {
this.Focused(this.handle.children[number+(this.handle.children[0].data.type === MEDIA_TYPE_BACK? 1:0)], true);
this.SetPosition(this.activeItem);
}
};
/**
* Enter the item or open it
* @param {Object} data media item inner data
*/
TVList.prototype.Open = function (data, noPlay) {
// render the list
echo(data,'data Open');
var levelChange = this.Build(data, noPlay);
// level changed
if ( levelChange !== this.LEVEL_CHANGE_NONE ) {
// reset tray filter icon
if ( this.parent.Tray.iconFilter.parentNode === this.parent.Tray.handleInner ) {
this.parent.Tray.handleInner.removeChild(this.parent.Tray.iconFilter);
}
// and hide at all if not necessary
this.parent.Tray.Show(globalBuffer.size() > 0, false);
// particular direction
if ( levelChange === this.LEVEL_CHANGE_DOWN ) {
// build breadcrumbs
if ( this.filterText ) {
// filter
if ( this.bcrumb ) {
this.bcrumb.Push('/', 'media/ico_filter.png', this.filterText);
}
} else {
// default
// build breadcrumbs
if ( this.bcrumb ) {
this.bcrumb.Push('/', 'media/type_'+data.type+'.png', data.name ? data.name : '');
}
}
// save this step
this.path.push(data);
// sef focus to the first item
this.Activate(true);
if( data.data && data.data.length ){ this.onFocus(this.activeItem); }
} else {
// go up
if ( !this.Reposition(this.parentItem) ){
this.Activate(true);
}
}
// current level item
this.parentItem = this.path[this.path.length-1];
}
return levelChange;
};
/**
* Open root, clear all breadcrumbs, search options
*/
TVList.prototype.Reset = function () {
this.parentItem = null;
this.path = [];
this.Clear();
// linked components
if ( this.bcrumb ) {
this.bcrumb.Reset();
}
if ( this.sbar ) {
this.sbar.Reset();
}
};
/**
* Renders the given media item by executing associated action
* @param {Object} data media item inner data
*/
TVList.prototype.Build = function (data, noPlay) {
var levelChange = this.LEVEL_CHANGE_NONE;
// apply filter parameter from the current node
this.filterText = data.filterText ? data.filterText : '';
// get item associated open action and execute
if ( data && data.type && typeof this.openAction[data.type] === 'function' ) {
levelChange = this.openAction[data.type].call(this, data, noPlay);
} else {
// wrong item type
new CModalAlert(this.parent, _('Error'), _('Unknown type of selected item'), _('Close'));
}
return levelChange;
};
/**
* Clear the list and fill it again (will try to refocus)
* @param {boolean} [refocus=true] if true then try to set focus to the previous focused element
*/
TVList.prototype.Refresh = function (refocus) {
var data = {data : null};
// some media item is opened at the moment
if ( this.parentItem !== null ) {
// get current focused item
this.Build(this.parentItem);
if ( refocus !== false) {
if ( this.activeItem ) {
data = this.activeItem;
} else {
data = this.FirstMatch(this.filterText);
}
}
// refill
// find it in the new list if necessary
if ( data && data.data ) {
this.Reposition(data.data);
} else {
this.Reposition(data);
}
}
};
/**
* refresh list index in dom objects
*/
TVList.prototype.RefreshIndex = function () {
this.channelStart = this.parentItem.type === MEDIA_TYPE_GROUP? 0 : -1;
var i = 0,
j = 1,
items = this.handleInner.children,
delta = 0;
if ( this.parentItem !== null && items.length ) {
delta = items[0].data.type === MEDIA_TYPE_BACK? 1: 0;
for(i = delta; i < items.length; i++){
items[i].data.index = i - delta;
if(items[i].data.type === MEDIA_TYPE_GROUP){
this.channelStart = i;
} else {
items[i].data.number = j;
items[i].domNumber.innerHTML = j;
j++;
}
}
} else {
this.Refresh(true);
}
};
/**
* Moves the cursor to the given element
* @param {Object} data
* @return {boolean} operation status
*/
TVList.prototype.Reposition = function (data) {
var change = false;
if ( data ) {
for ( var item, i = 0, l = this.Length(); i < l; i++ ) {
item = this.handleInner.children[i];
// url and type match
if (data.index === item.data.index || (data.data && !data.index && data.data.index === item.data.index)) {
change = this.Focused(item, true);
this.SetPosition(this.activeItem); // focus handle bug fix (even items per page problem)
if(!change){
this.onFocus(item);
}
return change;
}
}
}
return false;
};
/**
* Handle checked state for the given item according to the file type.
* Mark only items available for marking.
* @param {Node} item the element to be processed
* @param {boolean} state flag of the state
* @return {boolean} operation status
*/
TVList.prototype.Marked = function (item, state) {
// item exists and only allowed types
if (item && item.data && item.data.markable) {
// parent marking
return CScrollList.prototype.Marked.call(this, item, state);
}
return false;
};
/**
* Show/hide file items according to the specified filter options
* @param {string} text filter file name option
*/
TVList.prototype.SetFilterText = function (text) {
echo('enter to SetFilterText : ' + this.filterText);
// set global (case conversion for future string comparison speedup)
this.filterText = text.toLowerCase();
// apply filter
this.Filter();
};
/**
* Shows/hides items depending on the given filter string match
* unmarks all hidden items
*/
TVList.prototype.Filter = function () {
// link to the object for limited scopes
var self = this;
// check all items
this.Each(function (item) {
// check file name if regular file
var text_ok = item.data.type === MEDIA_TYPE_BACK || (item.data.name && item.data.name.toLowerCase().indexOf(self.filterText) !== -1);
self.Hidden(item, !text_ok);
});
};
/**
* Return all appropriate items available for actions (either marked or current with suitable type)
* @return {Array} list of found Nodes
*/
TVList.prototype.ActiveItems = function () {
// get all marked items
var items = this.states.marked ? this.states.marked.slice() : [];
// no marked, check current and its type
if (items.length === 0 && this.activeItem && this.activeItem.data.markable) {
items.push(this.activeItem);
}
return items;
};
/**
* Create group of channels
*/
TVList.prototype.createGroup = function (name, data, toDelete, deleteList) {
var obj = {}, dataMap = [];
var map = this.parentItem.data.map(function(item){return item.type === MEDIA_TYPE_GROUP?item.name:null;});
var toAdd = map.indexOf(name);
echo(toDelete,'toDelete');
if(toAdd === -1){
obj.name = name;
obj.type = MEDIA_TYPE_GROUP;
obj.data = [];
this.parentItem.data.splice(this.channelStart + 1, 0, obj);
} else {
dataMap = data.map(function(item){if(item.type === MEDIA_TYPE_GROUP){return item.name;}});
for(var i=0; i<dataMap.length; i++){
if(map.indexOf(dataMap[i]) !== -1){
new CModalAlert(currCPage,_('Error'),_('Copying error'));
return false;
}
}
obj = this.parentItem.data[toAdd];
}
var ansvCode = this.addChannelsToList(obj.data, JSON.parse(JSON.stringify(data)), false, true);
if(ansvCode !== 0){
this.parent.actionFileDelete(toDelete,deleteList);
}
this.Refresh(false);
if(toAdd === -1){
this.Focused(this.handle.children[this.channelStart+(this.handle.children[0].data.type === MEDIA_TYPE_BACK? 1:0)], true);
} else {
this.Focused(this.handle.children[toAdd+(this.handle.children[0].data.type === MEDIA_TYPE_BACK? 1:0)], true);
}
if (this.isActive) {
this.Activate();
}
return true;
};
/**
* Add channels to group
* @param {Array} list - group to add
* @param {Array} data - channels array
* @param {boolean} [refocus] - need refocus
* @param {boolean} [noRefresh] - don't refresh list
* @return {boolean} need checked list
*/
TVList.prototype.addChannelsToList = function (list, data, refocus, noRefresh) {
echo(list,'ADD TO LIST');
echo(data,'ADD DATA');
var needCheck = false, tempList = [], channelStart = -1;
var map = this.path.map(function(item){return item.data;});
var mapList = list.map(function(item){return item.type;});
channelStart = mapList.lastIndexOf(MEDIA_TYPE_GROUP);
if (data){
for (var i = 0; i < data.length; i++) {
if ( data[i].type === MEDIA_TYPE_GROUP && map.indexOf(data[i].data) !== -1 ){
new CModalAlert(this.parent,_('Error'),_('Copying error'));
return 0;
}
}
for ( i = 0; i < data.length; i++ ) {
var needAdd = true;
for (var j = 0; j < list.length; j++) {
if (list[j].name === data[i].name && list[j].type === data[i].type && (list[j].url === data[i].url || list[j].sol +' '+ list[j].url === data[i].url || list[j].url === data[i].sol +' '+ data[i].url)) {
needAdd = false;
break;
}
}
if (needAdd) {
if(data[i].type === MEDIA_TYPE_GROUP){
channelStart++;
list.splice(channelStart, 0, data[i]);
} else {
list.push(data[i]);
}
needCheck = true;
}
}
}
if(needCheck){
tempList = list.slice();
tempList = IPTVChannels.checkTS_data(tempList,true);
list = tempList.a;
}
this.parent.needSave = true;
if( !noRefresh ){
this.Refresh(refocus);
}
return needCheck;
};
| List(p | identifier_name |
tvlist.js | 'use strict';
/**
* @class TVList
* @constructor
* @author Roman Stoian
*/
function TVList(parent) {
// parent constructor
CScrollList.call(this, parent);
/**
* link to the object for limited scopes
* @type {TVList}
*/
var self = this;
/**
* link to the BreadCrumb component
* @type {CBreadCrumb}
*/
this.bcrumb = null;
/**
* link to the BreadCrumb component
* @type {CSearchBar}
*/
this.sbar = null;
/**
* type filter for file listing
* @type Number
*/
this.filterType = MEDIA_TYPE_NONE;
/**
* data filter for file listing
* @type String
*/
this.filterText = '';
/**
* hierarchy change flag: no change
* @type {number}
*/
this.LEVEL_CHANGE_NONE = 0;
/**
* hierarchy change flag: go level up
* @type {number}
*/
this.LEVEL_CHANGE_UP = -1;
/**
* hierarchy change flag: go level deeper
* @type {number}
*/
this.LEVEL_CHANGE_DOWN = 1;
/**
* list of all media types on the current level
* @type {Array}
*/
this.mtypes = [];
/**
* list of media objects data
* full chain from the root
* @type {[Object]}
*/
this.path = [];
/**
* current media object opened
* @type {Object}
*/
this.parentItem = {};
this.timer = {};
this.prevChannel = null;
this.lastChannel = null;
/**
* list of action mapped to the media types
* @type {[Function]}
*/
this.openAction = {};
this.openAction[MEDIA_TYPE_BACK] = function(){
var st = this.Back();
return st;
};
this.openAction[MEDIA_TYPE_TV_ROOT] = function () {
var j = 0;
var item = null;
this.channelStart = -1;
this.Clear();
if ( this.filterText) {
this.Add({name: '..'}, {type: MEDIA_TYPE_BACK});
this.channelStart++;
}
if (this.data.length > 0) {
for (var i = 0; i < this.data.length; i++) {
if (this.data[i].type === MEDIA_TYPE_GROUP) {
item = this.Add({name: this.data[i].name}, {name: this.data[i].name, index: i, markable: true, data: this.data[i].data, type: MEDIA_TYPE_GROUP});
if(item){
this.channelStart = i;
}
} else {
item = this.Add({name: this.data[i].name, number: j+1, tsOn: configuration.mayTimeShift && this.data[i].tsOn}, {name: this.data[i].name, markable: true, index: i, number: j+1, type: MEDIA_TYPE_STREAM}, {stared: FAVORITES_NEW[(this.data[i].sol? this.data[i].sol + ' ' : '') + this.data[i].url] ? true : false});
if(item){
j++;
}
}
}
self.parent.domInfoTitle.innerHTML = self.data[0].name?self.data[0].name:'';
this.parent.BPanel.Hidden(this.parent.BPanel.btnF1, false);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, false);
this.parent.handle.querySelector('.content').querySelector('.crop').className = 'crop';
} else {
if ( this.filterText) {
self.parent.domURL.innerHTML = '';
self.parent.domInfoTitle.innerHTML = '';
}
this.parent.BPanel.Hidden(this.parent.BPanel.btnF1, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, true);
this.parent.handle.querySelector('.content').querySelector('.crop').className = 'crop defImage';
}
return this.LEVEL_CHANGE_DOWN;
};
this.openAction[MEDIA_TYPE_GROUP] = function (data) {
var item = null;
this.Clear();
this.Add({name: '..'}, {type: MEDIA_TYPE_BACK});
var j = 0;
this.channelStart = -1;
if (data.data.length > 0) {
for (var i = 0; i < data.data.length; i++) {
if (data.data[i].type === MEDIA_TYPE_GROUP) {
item = this.Add({name: data.data[i].name}, {name: data.data[i].name, index: i, markable: true, data: data.data[i].data, type: MEDIA_TYPE_GROUP});
if(item){
this.channelStart = i;
}
} else {
item = this.Add({name: data.data[i].name, number: j+1, tsOn: configuration.mayTimeShift && data.data[i].tsOn}, {name: data.data[i].name, markable: true, index: i, number: j+1, type: MEDIA_TYPE_STREAM}, {stared: FAVORITES_NEW[(data.data[i].sol? data.data[i].sol + ' ' : '') + data.data[i].url] ? true : false});
if(item){
j++;
}
}
}
self.parent.domInfoTitle.innerHTML = self.data[0].name?self.data[0].name:'';
} else {
this.parent.BPanel.Hidden(this.parent.BPanel.btnF1, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, true);
}
return this.LEVEL_CHANGE_DOWN;
};
this.openAction[MEDIA_TYPE_STREAM] = function (data, noPlay) {
if (MediaPlayer.obj !== self.parentItem.data[data.index] && !noPlay) {
MediaPlayer.preparePlayer(self.parentItem.data[data.index], this.parent, true, true, true);
} else {
if (MediaPlayer.ts_inProgress) {
if ( environment.ts_icon ){
MediaPlayer.domTSIndicator.style.display = 'block';
}
MediaPlayer.runner.start();
}
MediaPlayer.Show(true, this.parent);
MediaPlayer.showInfo(true);
MediaPlayer.timer.showInfo = setTimeout(function () {
MediaPlayer.showInfo(false);
}, 3000);
}
return this.LEVEL_CHANGE_NONE;
};
}
// extending
TVList.prototype = Object.create(CScrollList.prototype);
TVList.prototype.constructor = TVList;
/**
* Setter for linked component
* @param {CBase} component associated object
*/
TVList.prototype.SetBreadCrumb = function ( component ) {
this.bcrumb = component;
};
/**
* Setter for linked component
* @param {CBase} component associated object
*/
TVList.prototype.SetSearchBar = function ( component ) {
this.sbar = component;
};
/**
* Shows/hides items depending on the given filter string match
* unmarks all hidden items
*/
TVList.prototype.Filter = function () {
// link to the object for limited scopes
var self = this;
// check all items
this.Each(function(item){
// check file name if regular file
var text_ok = item.data.type === MEDIA_TYPE_BACK || (item.data.name && item.data.name.toLowerCase().indexOf(self.filterText) !== -1);
// check file type if regular file
var type_ok = item.data.type === self.filterType || self.filterType === MEDIA_TYPE_NONE || item.data.type === MEDIA_TYPE_BACK;
// hide not matching items
self.Hidden(item, !(text_ok && type_ok));
});
};
/**
* Finds the first appropriate item
* @param {string} value
* @return {Node}
*/
TVList.prototype.FirstMatch = function ( value ) {
// preparing
var items;
if ( value === '' ) {
return null;
}
items = this.handleInner.children; // all list items
// iterate all items till all items are found
for ( var i = 0; i < items.length; i++ ) {
// floating pointer depends on direction
var item = items[i];
// check file name if regular file
if ( item.data.type !== MEDIA_TYPE_BACK && item.data.name && item.data.name.toLowerCase().indexOf(value.toLowerCase()) !== -1 ) {
return item;
}
}
return null;
};
/**
* Create new item and put it in the list
* @param {string} obj item label
* @param {Object} attrs set of item data parameters
* @param {Object} states set of additional parameters (stared)
* @return {Node}
*/
TVList.prototype.Add = function (obj, attrs, states) {
var self = this, number;
// is it necessary to filter
if ( this.filterText) { // || this.filterType !== MEDIA_TYPE_NONE
// check file name if regular file
var text_ok = attrs.type === MEDIA_TYPE_BACK || (obj.name && obj.name.toLowerCase().indexOf(this.filterText.toLowerCase()) !== -1);
// check file type if regular file
var type_ok = attrs.type === this.filterType || this.filterType === MEDIA_TYPE_NONE || attrs.type === MEDIA_TYPE_BACK;
// hide not matching items
if ( !(text_ok && type_ok) ) {
return null;
}
}
if (this.mtypes.indexOf(attrs.type) === -1) {
this.mtypes.push(attrs.type);
}
// html prepare
var body = element('div', {className: 'data'}, obj.name);
var star = element('div', {className: 'star'});
if (obj.number) {
number = element('div', {className: 'number'}, obj.number);
} else {
number = element('div', {className: 'number'});
number.style.background = 'url("' + PATH_IMG_PUBLIC + 'media/type_' + attrs.type + '.png") no-repeat center';
}
var timeshift = element('div', {className: obj.tsOn? 'timeshift tsOn' : 'timeshift'});
// decoration
// make sure name is set
if (!attrs.name) {
attrs.name = obj.name;
}
// actual filling
var item = CScrollList.prototype.Add.call(this, [number, body, timeshift, star], {
star: star,
data: attrs,
// handlers
onclick: function () {
// open or enter the item
this.self.Open(this.data);
return false;
},
oncontextmenu: EMULATION ? null : function () {
// mark/unmark the item
self.parent.actionF2(false);
return false;
}
});
if(obj.number){
item.domNumber = number;
}
// mark as favourite
if (states && states.stared) {
item.self.SetStar(item, true);
}
return item;
};
/**
* Set inner item flags and decoration
* @param {Node} item the element to be processed
* @param {boolean} state flag of the operation (true if change is made)
*/
TVList.prototype.SetStar = function (item, state) {
if (item.stared === state) {
return;
}
this.SetState(item, 'stared', state);
if (state !== false) {
item.star.style.background = 'url("' + PATH_IMG_PUBLIC + 'ico_fav_s.png") no-repeat right';
} else {
item.star.style.background = 'none';
}
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, state !== false ? true : false);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, state !== false ? false : true);
};
/**
* Hook method on focus item change
* @param {Node} item the new focused item
*/
TVList.prototype.onFocus = function (item) {
var self = this;
if ( MediaPlayer.ts_inProgress ) {
if (MediaPlayer.tsExitCheck('focus', item)) {
return true;
}
}
clearTimeout(this.timer.OnFocusPlay);
if ( item.data.markable ) {
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, false);
} else {
this.parent.BPanel.Hidden(this.parent.BPanel.btnF2, true);
}
if (item.data.type === MEDIA_TYPE_STREAM) {
if (!this.states.marked || this.states.marked.length === 0) {
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, item.stared);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, !item.stared);
} else {
echo(this.states.marked[0].data,'this.states.marked');
}
} else {
| self.parent.clearEPG();
this.timer.OnFocusPlay = setTimeout(function () {
if ( item.data.type === MEDIA_TYPE_BACK ){
if(self.filterText){
self.parent.domInfoTitle.innerHTML = _('Contains the list of items corresponding to the given filter request');
} else {
self.parent.domInfoTitle.innerHTML = self.parentItem.name?self.parentItem.name:'';
}
} else {
self.parent.initEPGNow();
self.parent.domInfoTitle.innerHTML = item.data.name?item.data.name:'';
}
if (item.data.type === MEDIA_TYPE_STREAM) {
self.prevChannel = self.lastChannel;
self.lastChannel = self.parentItem.data[item.data.index];
self.parent.domURL.innerHTML = (self.parentItem.data[item.data.index].sol && self.parentItem.data[item.data.index].sol !==''?self.parentItem.data[item.data.index].sol+' ':'')+self.parentItem.data[item.data.index].url;
if ( MediaPlayer.obj !== self.parentItem.data[item.data.index] ) {
MediaPlayer.preparePlayer(self.parentItem.data[item.data.index], self.parent, true, false, true);
}
if ( self.parent.pvr.arr.length ) {
self.parent.pvr.check(true);
}
} else {
self.lastChannel = null;
self.parent.domURL.innerHTML = '';
MediaPlayer.end();
}
}, 500);
return false;
};
/**
* Reset and clear all items
* This will make the component ready for a new filling.
*/
TVList.prototype.Clear = function () {
CScrollList.prototype.Clear.call(this);
this.parent.domURL.innerHTML = '';
this.parent.domInfoTitle.innerHTML = '';
this.filterType = MEDIA_TYPE_NONE;
this.mtypes = [];
};
/**
* Move one level up
*/
TVList.prototype.Back = function () {
var self = this;
// there are some levels
if ( this.path.length > 1 ) {
// exiting from favs and there are some changes
// normal exit
this.path.pop();
self.lastChannel = null;
if ( this.bcrumb ) {
this.bcrumb.Pop();
}
// render the previous level
this.Build(this.path[this.path.length-1]);
// apply specific button visibility
setTimeout(function(){
self.onFocus(self.Current());
}, 0);
// go up
return this.LEVEL_CHANGE_UP;
}
// stay here
return this.LEVEL_CHANGE_NONE;
};
/**
* Go to channel by number
* @param {number} number
*/
TVList.prototype.goToChannel = function (number) {
if (this.handle.children.length > number && number > this.channelStart) {
this.Focused(this.handle.children[number+(this.handle.children[0].data.type === MEDIA_TYPE_BACK? 1:0)], true);
this.SetPosition(this.activeItem);
}
};
/**
* Enter the item or open it
* @param {Object} data media item inner data
*/
TVList.prototype.Open = function (data, noPlay) {
// render the list
echo(data,'data Open');
var levelChange = this.Build(data, noPlay);
// level changed
if ( levelChange !== this.LEVEL_CHANGE_NONE ) {
// reset tray filter icon
if ( this.parent.Tray.iconFilter.parentNode === this.parent.Tray.handleInner ) {
this.parent.Tray.handleInner.removeChild(this.parent.Tray.iconFilter);
}
// and hide at all if not necessary
this.parent.Tray.Show(globalBuffer.size() > 0, false);
// particular direction
if ( levelChange === this.LEVEL_CHANGE_DOWN ) {
// build breadcrumbs
if ( this.filterText ) {
// filter
if ( this.bcrumb ) {
this.bcrumb.Push('/', 'media/ico_filter.png', this.filterText);
}
} else {
// default
// build breadcrumbs
if ( this.bcrumb ) {
this.bcrumb.Push('/', 'media/type_'+data.type+'.png', data.name ? data.name : '');
}
}
// save this step
this.path.push(data);
// sef focus to the first item
this.Activate(true);
if( data.data && data.data.length ){ this.onFocus(this.activeItem); }
} else {
// go up
if ( !this.Reposition(this.parentItem) ){
this.Activate(true);
}
}
// current level item
this.parentItem = this.path[this.path.length-1];
}
return levelChange;
};
/**
* Open root, clear all breadcrumbs, search options
*/
TVList.prototype.Reset = function () {
this.parentItem = null;
this.path = [];
this.Clear();
// linked components
if ( this.bcrumb ) {
this.bcrumb.Reset();
}
if ( this.sbar ) {
this.sbar.Reset();
}
};
/**
* Renders the given media item by executing associated action
* @param {Object} data media item inner data
*/
TVList.prototype.Build = function (data, noPlay) {
var levelChange = this.LEVEL_CHANGE_NONE;
// apply filter parameter from the current node
this.filterText = data.filterText ? data.filterText : '';
// get item associated open action and execute
if ( data && data.type && typeof this.openAction[data.type] === 'function' ) {
levelChange = this.openAction[data.type].call(this, data, noPlay);
} else {
// wrong item type
new CModalAlert(this.parent, _('Error'), _('Unknown type of selected item'), _('Close'));
}
return levelChange;
};
/**
* Clear the list and fill it again (will try to refocus)
* @param {boolean} [refocus=true] if true then try to set focus to the previous focused element
*/
TVList.prototype.Refresh = function (refocus) {
var data = {data : null};
// some media item is opened at the moment
if ( this.parentItem !== null ) {
// get current focused item
this.Build(this.parentItem);
if ( refocus !== false) {
if ( this.activeItem ) {
data = this.activeItem;
} else {
data = this.FirstMatch(this.filterText);
}
}
// refill
// find it in the new list if necessary
if ( data && data.data ) {
this.Reposition(data.data);
} else {
this.Reposition(data);
}
}
};
/**
* refresh list index in dom objects
*/
TVList.prototype.RefreshIndex = function () {
this.channelStart = this.parentItem.type === MEDIA_TYPE_GROUP? 0 : -1;
var i = 0,
j = 1,
items = this.handleInner.children,
delta = 0;
if ( this.parentItem !== null && items.length ) {
delta = items[0].data.type === MEDIA_TYPE_BACK? 1: 0;
for(i = delta; i < items.length; i++){
items[i].data.index = i - delta;
if(items[i].data.type === MEDIA_TYPE_GROUP){
this.channelStart = i;
} else {
items[i].data.number = j;
items[i].domNumber.innerHTML = j;
j++;
}
}
} else {
this.Refresh(true);
}
};
/**
* Moves the cursor to the given element
* @param {Object} data
* @return {boolean} operation status
*/
TVList.prototype.Reposition = function (data) {
var change = false;
if ( data ) {
for ( var item, i = 0, l = this.Length(); i < l; i++ ) {
item = this.handleInner.children[i];
// url and type match
if (data.index === item.data.index || (data.data && !data.index && data.data.index === item.data.index)) {
change = this.Focused(item, true);
this.SetPosition(this.activeItem); // focus handle bug fix (even items per page problem)
if(!change){
this.onFocus(item);
}
return change;
}
}
}
return false;
};
/**
* Handle checked state for the given item according to the file type.
* Mark only items available for marking.
* @param {Node} item the element to be processed
* @param {boolean} state flag of the state
* @return {boolean} operation status
*/
TVList.prototype.Marked = function (item, state) {
// item exists and only allowed types
if (item && item.data && item.data.markable) {
// parent marking
return CScrollList.prototype.Marked.call(this, item, state);
}
return false;
};
/**
* Show/hide file items according to the specified filter options
* @param {string} text filter file name option
*/
TVList.prototype.SetFilterText = function (text) {
echo('enter to SetFilterText : ' + this.filterText);
// set global (case conversion for future string comparison speedup)
this.filterText = text.toLowerCase();
// apply filter
this.Filter();
};
/**
* Shows/hides items depending on the given filter string match
* unmarks all hidden items
*/
TVList.prototype.Filter = function () {
// link to the object for limited scopes
var self = this;
// check all items
this.Each(function (item) {
// check file name if regular file
var text_ok = item.data.type === MEDIA_TYPE_BACK || (item.data.name && item.data.name.toLowerCase().indexOf(self.filterText) !== -1);
self.Hidden(item, !text_ok);
});
};
/**
* Return all appropriate items available for actions (either marked or current with suitable type)
* @return {Array} list of found Nodes
*/
TVList.prototype.ActiveItems = function () {
// get all marked items
var items = this.states.marked ? this.states.marked.slice() : [];
// no marked, check current and its type
if (items.length === 0 && this.activeItem && this.activeItem.data.markable) {
items.push(this.activeItem);
}
return items;
};
/**
* Create group of channels
*/
TVList.prototype.createGroup = function (name, data, toDelete, deleteList) {
var obj = {}, dataMap = [];
var map = this.parentItem.data.map(function(item){return item.type === MEDIA_TYPE_GROUP?item.name:null;});
var toAdd = map.indexOf(name);
echo(toDelete,'toDelete');
if(toAdd === -1){
obj.name = name;
obj.type = MEDIA_TYPE_GROUP;
obj.data = [];
this.parentItem.data.splice(this.channelStart + 1, 0, obj);
} else {
dataMap = data.map(function(item){if(item.type === MEDIA_TYPE_GROUP){return item.name;}});
for(var i=0; i<dataMap.length; i++){
if(map.indexOf(dataMap[i]) !== -1){
new CModalAlert(currCPage,_('Error'),_('Copying error'));
return false;
}
}
obj = this.parentItem.data[toAdd];
}
var ansvCode = this.addChannelsToList(obj.data, JSON.parse(JSON.stringify(data)), false, true);
if(ansvCode !== 0){
this.parent.actionFileDelete(toDelete,deleteList);
}
this.Refresh(false);
if(toAdd === -1){
this.Focused(this.handle.children[this.channelStart+(this.handle.children[0].data.type === MEDIA_TYPE_BACK? 1:0)], true);
} else {
this.Focused(this.handle.children[toAdd+(this.handle.children[0].data.type === MEDIA_TYPE_BACK? 1:0)], true);
}
if (this.isActive) {
this.Activate();
}
return true;
};
/**
* Add channels to group
* @param {Array} list - group to add
* @param {Array} data - channels array
* @param {boolean} [refocus] - need refocus
* @param {boolean} [noRefresh] - don't refresh list
* @return {boolean} need checked list
*/
TVList.prototype.addChannelsToList = function (list, data, refocus, noRefresh) {
echo(list,'ADD TO LIST');
echo(data,'ADD DATA');
var needCheck = false, tempList = [], channelStart = -1;
var map = this.path.map(function(item){return item.data;});
var mapList = list.map(function(item){return item.type;});
channelStart = mapList.lastIndexOf(MEDIA_TYPE_GROUP);
if (data){
for (var i = 0; i < data.length; i++) {
if ( data[i].type === MEDIA_TYPE_GROUP && map.indexOf(data[i].data) !== -1 ){
new CModalAlert(this.parent,_('Error'),_('Copying error'));
return 0;
}
}
for ( i = 0; i < data.length; i++ ) {
var needAdd = true;
for (var j = 0; j < list.length; j++) {
if (list[j].name === data[i].name && list[j].type === data[i].type && (list[j].url === data[i].url || list[j].sol +' '+ list[j].url === data[i].url || list[j].url === data[i].sol +' '+ data[i].url)) {
needAdd = false;
break;
}
}
if (needAdd) {
if(data[i].type === MEDIA_TYPE_GROUP){
channelStart++;
list.splice(channelStart, 0, data[i]);
} else {
list.push(data[i]);
}
needCheck = true;
}
}
}
if(needCheck){
tempList = list.slice();
tempList = IPTVChannels.checkTS_data(tempList,true);
list = tempList.a;
}
this.parent.needSave = true;
if( !noRefresh ){
this.Refresh(refocus);
}
return needCheck;
};
| this.parent.BPanel.Hidden(this.parent.BPanel.btnF3add, true);
this.parent.BPanel.Hidden(this.parent.BPanel.btnF3del, true);
}
| conditional_block |
server.rs | // Copyright (c) 2019 Parity Technologies (UK) Ltd.
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
//! Websocket server [handshake].
//!
//! [handshake]: https://tools.ietf.org/html/rfc6455#section-4
use super::{
append_extensions, configure_extensions, expect_ascii_header, with_first_header, Error, WebSocketKey,
MAX_NUM_HEADERS, SEC_WEBSOCKET_EXTENSIONS, SEC_WEBSOCKET_PROTOCOL,
};
use crate::connection::{self, Mode};
use crate::extension::Extension;
use bytes::BytesMut;
use futures::prelude::*;
use std::{mem, str};
// Most HTTP servers default to 8KB limit on headers
const MAX_HEADERS_SIZE: usize = 8 * 1024;
const BLOCK_SIZE: usize = 8 * 1024;
/// Websocket handshake server.
#[derive(Debug)]
pub struct Server<'a, T> {
socket: T,
/// Protocols the server supports.
protocols: Vec<&'a str>,
/// Extensions the server supports.
extensions: Vec<Box<dyn Extension + Send>>,
/// Encoding/decoding buffer.
buffer: BytesMut,
}
impl<'a, T: AsyncRead + AsyncWrite + Unpin> Server<'a, T> {
/// Create a new server handshake.
pub fn new(socket: T) -> Self {
Server { socket, protocols: Vec::new(), extensions: Vec::new(), buffer: BytesMut::new() }
}
/// Override the buffer to use for request/response handling.
pub fn set_buffer(&mut self, b: BytesMut) -> &mut Self {
self.buffer = b;
self
}
/// Extract the buffer.
pub fn take_buffer(&mut self) -> BytesMut {
mem::take(&mut self.buffer)
}
/// Add a protocol the server supports.
pub fn add_protocol(&mut self, p: &'a str) -> &mut Self {
self.protocols.push(p);
self
}
/// Add an extension the server supports.
pub fn add_extension(&mut self, e: Box<dyn Extension + Send>) -> &mut Self {
self.extensions.push(e);
self
}
/// Get back all extensions.
pub fn drain_extensions(&mut self) -> impl Iterator<Item = Box<dyn Extension + Send>> + '_ {
self.extensions.drain(..)
}
/// Await an incoming client handshake request.
pub async fn receive_request(&mut self) -> Result<ClientRequest<'_>, Error> {
self.buffer.clear();
let mut skip = 0;
loop {
crate::read(&mut self.socket, &mut self.buffer, BLOCK_SIZE).await?;
let limit = std::cmp::min(self.buffer.len(), MAX_HEADERS_SIZE);
// We don't expect body, so can search for the CRLF headers tail from
// the end of the buffer.
if self.buffer[skip..limit].windows(4).rev().any(|w| w == b"\r\n\r\n") {
break;
}
// Give up if we've reached the limit. We could emit a specific error here,
// but httparse will produce meaningful error for us regardless.
if limit == MAX_HEADERS_SIZE {
break;
}
// Skip bytes that did not contain CRLF in the next iteration.
// If we only read a partial CRLF sequence, we would miss it if we skipped the full buffer
// length, hence backing off the full 4 bytes.
skip = self.buffer.len().saturating_sub(4);
}
self.decode_request()
}
/// Respond to the client.
pub async fn send_response(&mut self, r: &Response<'_>) -> Result<(), Error> {
self.buffer.clear();
self.encode_response(r);
self.socket.write_all(&self.buffer).await?;
self.socket.flush().await?;
self.buffer.clear();
Ok(())
}
/// Turn this handshake into a [`connection::Builder`].
pub fn into_builder(mut self) -> connection::Builder<T> {
let mut builder = connection::Builder::new(self.socket, Mode::Server);
builder.set_buffer(self.buffer);
builder.add_extensions(self.extensions.drain(..));
builder
}
/// Get out the inner socket of the server.
pub fn into_inner(self) -> T { | let mut header_buf = [httparse::EMPTY_HEADER; MAX_NUM_HEADERS];
let mut request = httparse::Request::new(&mut header_buf);
match request.parse(self.buffer.as_ref()) {
Ok(httparse::Status::Complete(_)) => (),
Ok(httparse::Status::Partial) => return Err(Error::IncompleteHttpRequest),
Err(e) => return Err(Error::Http(Box::new(e))),
};
if request.method != Some("GET") {
return Err(Error::InvalidRequestMethod);
}
if request.version != Some(1) {
return Err(Error::UnsupportedHttpVersion);
}
let host = with_first_header(&request.headers, "Host", Ok)?;
expect_ascii_header(request.headers, "Upgrade", "websocket")?;
expect_ascii_header(request.headers, "Connection", "upgrade")?;
expect_ascii_header(request.headers, "Sec-WebSocket-Version", "13")?;
let origin =
request.headers.iter().find_map(
|h| {
if h.name.eq_ignore_ascii_case("Origin") {
Some(h.value)
} else {
None
}
},
);
let headers = RequestHeaders { host, origin };
let ws_key = with_first_header(&request.headers, "Sec-WebSocket-Key", |k| {
WebSocketKey::try_from(k).map_err(|_| Error::SecWebSocketKeyInvalidLength(k.len()))
})?;
for h in request.headers.iter().filter(|h| h.name.eq_ignore_ascii_case(SEC_WEBSOCKET_EXTENSIONS)) {
configure_extensions(&mut self.extensions, std::str::from_utf8(h.value)?)?
}
let mut protocols = Vec::new();
for p in request.headers.iter().filter(|h| h.name.eq_ignore_ascii_case(SEC_WEBSOCKET_PROTOCOL)) {
if let Some(&p) = self.protocols.iter().find(|x| x.as_bytes() == p.value) {
protocols.push(p)
}
}
let path = request.path.unwrap_or("/");
Ok(ClientRequest { ws_key, protocols, path, headers })
}
// Encode server handshake response.
fn encode_response(&mut self, response: &Response<'_>) {
match response {
Response::Accept { key, protocol } => {
let accept_value = super::generate_accept_key(&key);
self.buffer.extend_from_slice(
concat![
"HTTP/1.1 101 Switching Protocols",
"\r\nServer: soketto-",
env!("CARGO_PKG_VERSION"),
"\r\nUpgrade: websocket",
"\r\nConnection: upgrade",
"\r\nSec-WebSocket-Accept: ",
]
.as_bytes(),
);
self.buffer.extend_from_slice(&accept_value);
if let Some(p) = protocol {
self.buffer.extend_from_slice(b"\r\nSec-WebSocket-Protocol: ");
self.buffer.extend_from_slice(p.as_bytes())
}
append_extensions(self.extensions.iter().filter(|e| e.is_enabled()), &mut self.buffer);
self.buffer.extend_from_slice(b"\r\n\r\n")
}
Response::Reject { status_code } => {
self.buffer.extend_from_slice(b"HTTP/1.1 ");
let (_, reason) = if let Ok(i) = STATUSCODES.binary_search_by_key(status_code, |(n, _)| *n) {
STATUSCODES[i]
} else {
(500, "500 Internal Server Error")
};
self.buffer.extend_from_slice(reason.as_bytes());
self.buffer.extend_from_slice(b"\r\n\r\n")
}
}
}
}
/// Handshake request received from the client.
#[derive(Debug)]
pub struct ClientRequest<'a> {
ws_key: WebSocketKey,
protocols: Vec<&'a str>,
path: &'a str,
headers: RequestHeaders<'a>,
}
/// Select HTTP headers sent by the client.
#[derive(Debug, Copy, Clone)]
pub struct RequestHeaders<'a> {
/// The [`Host`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Host) header.
pub host: &'a [u8],
/// The [`Origin`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin) header, if provided.
pub origin: Option<&'a [u8]>,
}
impl<'a> ClientRequest<'a> {
/// The `Sec-WebSocket-Key` header nonce value.
pub fn key(&self) -> WebSocketKey {
self.ws_key
}
/// The protocols the client is proposing.
pub fn protocols(&self) -> impl Iterator<Item = &str> {
self.protocols.iter().cloned()
}
/// The path the client is requesting.
pub fn path(&self) -> &str {
self.path
}
/// Select HTTP headers sent by the client.
pub fn headers(&self) -> RequestHeaders {
self.headers
}
}
/// Handshake response the server sends back to the client.
#[derive(Debug)]
pub enum Response<'a> {
/// The server accepts the handshake request.
Accept { key: WebSocketKey, protocol: Option<&'a str> },
/// The server rejects the handshake request.
Reject { status_code: u16 },
}
/// Known status codes and their reason phrases.
const STATUSCODES: &[(u16, &str)] = &[
(100, "100 Continue"),
(101, "101 Switching Protocols"),
(102, "102 Processing"),
(200, "200 OK"),
(201, "201 Created"),
(202, "202 Accepted"),
(203, "203 Non Authoritative Information"),
(204, "204 No Content"),
(205, "205 Reset Content"),
(206, "206 Partial Content"),
(207, "207 Multi-Status"),
(208, "208 Already Reported"),
(226, "226 IM Used"),
(300, "300 Multiple Choices"),
(301, "301 Moved Permanently"),
(302, "302 Found"),
(303, "303 See Other"),
(304, "304 Not Modified"),
(305, "305 Use Proxy"),
(307, "307 Temporary Redirect"),
(308, "308 Permanent Redirect"),
(400, "400 Bad Request"),
(401, "401 Unauthorized"),
(402, "402 Payment Required"),
(403, "403 Forbidden"),
(404, "404 Not Found"),
(405, "405 Method Not Allowed"),
(406, "406 Not Acceptable"),
(407, "407 Proxy Authentication Required"),
(408, "408 Request Timeout"),
(409, "409 Conflict"),
(410, "410 Gone"),
(411, "411 Length Required"),
(412, "412 Precondition Failed"),
(413, "413 Payload Too Large"),
(414, "414 URI Too Long"),
(415, "415 Unsupported Media Type"),
(416, "416 Range Not Satisfiable"),
(417, "417 Expectation Failed"),
(418, "418 I'm a teapot"),
(421, "421 Misdirected Request"),
(422, "422 Unprocessable Entity"),
(423, "423 Locked"),
(424, "424 Failed Dependency"),
(426, "426 Upgrade Required"),
(428, "428 Precondition Required"),
(429, "429 Too Many Requests"),
(431, "431 Request Header Fields Too Large"),
(451, "451 Unavailable For Legal Reasons"),
(500, "500 Internal Server Error"),
(501, "501 Not Implemented"),
(502, "502 Bad Gateway"),
(503, "503 Service Unavailable"),
(504, "504 Gateway Timeout"),
(505, "505 HTTP Version Not Supported"),
(506, "506 Variant Also Negotiates"),
(507, "507 Insufficient Storage"),
(508, "508 Loop Detected"),
(510, "510 Not Extended"),
(511, "511 Network Authentication Required"),
]; | self.socket
}
// Decode client handshake request.
fn decode_request(&mut self) -> Result<ClientRequest, Error> { | random_line_split |
server.rs | // Copyright (c) 2019 Parity Technologies (UK) Ltd.
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
//! Websocket server [handshake].
//!
//! [handshake]: https://tools.ietf.org/html/rfc6455#section-4
use super::{
append_extensions, configure_extensions, expect_ascii_header, with_first_header, Error, WebSocketKey,
MAX_NUM_HEADERS, SEC_WEBSOCKET_EXTENSIONS, SEC_WEBSOCKET_PROTOCOL,
};
use crate::connection::{self, Mode};
use crate::extension::Extension;
use bytes::BytesMut;
use futures::prelude::*;
use std::{mem, str};
// Most HTTP servers default to 8KB limit on headers
const MAX_HEADERS_SIZE: usize = 8 * 1024;
const BLOCK_SIZE: usize = 8 * 1024;
/// Websocket handshake server.
#[derive(Debug)]
pub struct Server<'a, T> {
socket: T,
/// Protocols the server supports.
protocols: Vec<&'a str>,
/// Extensions the server supports.
extensions: Vec<Box<dyn Extension + Send>>,
/// Encoding/decoding buffer.
buffer: BytesMut,
}
impl<'a, T: AsyncRead + AsyncWrite + Unpin> Server<'a, T> {
/// Create a new server handshake.
pub fn new(socket: T) -> Self {
Server { socket, protocols: Vec::new(), extensions: Vec::new(), buffer: BytesMut::new() }
}
/// Override the buffer to use for request/response handling.
pub fn set_buffer(&mut self, b: BytesMut) -> &mut Self {
self.buffer = b;
self
}
/// Extract the buffer.
pub fn take_buffer(&mut self) -> BytesMut {
mem::take(&mut self.buffer)
}
/// Add a protocol the server supports.
pub fn add_protocol(&mut self, p: &'a str) -> &mut Self {
self.protocols.push(p);
self
}
/// Add an extension the server supports.
pub fn add_extension(&mut self, e: Box<dyn Extension + Send>) -> &mut Self {
self.extensions.push(e);
self
}
/// Get back all extensions.
pub fn drain_extensions(&mut self) -> impl Iterator<Item = Box<dyn Extension + Send>> + '_ {
self.extensions.drain(..)
}
/// Await an incoming client handshake request.
pub async fn receive_request(&mut self) -> Result<ClientRequest<'_>, Error> {
self.buffer.clear();
let mut skip = 0;
loop {
crate::read(&mut self.socket, &mut self.buffer, BLOCK_SIZE).await?;
let limit = std::cmp::min(self.buffer.len(), MAX_HEADERS_SIZE);
// We don't expect body, so can search for the CRLF headers tail from
// the end of the buffer.
if self.buffer[skip..limit].windows(4).rev().any(|w| w == b"\r\n\r\n") {
break;
}
// Give up if we've reached the limit. We could emit a specific error here,
// but httparse will produce meaningful error for us regardless.
if limit == MAX_HEADERS_SIZE {
break;
}
// Skip bytes that did not contain CRLF in the next iteration.
// If we only read a partial CRLF sequence, we would miss it if we skipped the full buffer
// length, hence backing off the full 4 bytes.
skip = self.buffer.len().saturating_sub(4);
}
self.decode_request()
}
/// Respond to the client.
pub async fn send_response(&mut self, r: &Response<'_>) -> Result<(), Error> {
self.buffer.clear();
self.encode_response(r);
self.socket.write_all(&self.buffer).await?;
self.socket.flush().await?;
self.buffer.clear();
Ok(())
}
/// Turn this handshake into a [`connection::Builder`].
pub fn into_builder(mut self) -> connection::Builder<T> {
let mut builder = connection::Builder::new(self.socket, Mode::Server);
builder.set_buffer(self.buffer);
builder.add_extensions(self.extensions.drain(..));
builder
}
/// Get out the inner socket of the server.
pub fn into_inner(self) -> T {
self.socket
}
// Decode client handshake request.
fn decode_request(&mut self) -> Result<ClientRequest, Error> {
let mut header_buf = [httparse::EMPTY_HEADER; MAX_NUM_HEADERS];
let mut request = httparse::Request::new(&mut header_buf);
match request.parse(self.buffer.as_ref()) {
Ok(httparse::Status::Complete(_)) => (),
Ok(httparse::Status::Partial) => return Err(Error::IncompleteHttpRequest),
Err(e) => return Err(Error::Http(Box::new(e))),
};
if request.method != Some("GET") {
return Err(Error::InvalidRequestMethod);
}
if request.version != Some(1) {
return Err(Error::UnsupportedHttpVersion);
}
let host = with_first_header(&request.headers, "Host", Ok)?;
expect_ascii_header(request.headers, "Upgrade", "websocket")?;
expect_ascii_header(request.headers, "Connection", "upgrade")?;
expect_ascii_header(request.headers, "Sec-WebSocket-Version", "13")?;
let origin =
request.headers.iter().find_map(
|h| {
if h.name.eq_ignore_ascii_case("Origin") {
Some(h.value)
} else {
None
}
},
);
let headers = RequestHeaders { host, origin };
let ws_key = with_first_header(&request.headers, "Sec-WebSocket-Key", |k| {
WebSocketKey::try_from(k).map_err(|_| Error::SecWebSocketKeyInvalidLength(k.len()))
})?;
for h in request.headers.iter().filter(|h| h.name.eq_ignore_ascii_case(SEC_WEBSOCKET_EXTENSIONS)) {
configure_extensions(&mut self.extensions, std::str::from_utf8(h.value)?)?
}
let mut protocols = Vec::new();
for p in request.headers.iter().filter(|h| h.name.eq_ignore_ascii_case(SEC_WEBSOCKET_PROTOCOL)) {
if let Some(&p) = self.protocols.iter().find(|x| x.as_bytes() == p.value) |
}
let path = request.path.unwrap_or("/");
Ok(ClientRequest { ws_key, protocols, path, headers })
}
// Encode server handshake response.
fn encode_response(&mut self, response: &Response<'_>) {
match response {
Response::Accept { key, protocol } => {
let accept_value = super::generate_accept_key(&key);
self.buffer.extend_from_slice(
concat![
"HTTP/1.1 101 Switching Protocols",
"\r\nServer: soketto-",
env!("CARGO_PKG_VERSION"),
"\r\nUpgrade: websocket",
"\r\nConnection: upgrade",
"\r\nSec-WebSocket-Accept: ",
]
.as_bytes(),
);
self.buffer.extend_from_slice(&accept_value);
if let Some(p) = protocol {
self.buffer.extend_from_slice(b"\r\nSec-WebSocket-Protocol: ");
self.buffer.extend_from_slice(p.as_bytes())
}
append_extensions(self.extensions.iter().filter(|e| e.is_enabled()), &mut self.buffer);
self.buffer.extend_from_slice(b"\r\n\r\n")
}
Response::Reject { status_code } => {
self.buffer.extend_from_slice(b"HTTP/1.1 ");
let (_, reason) = if let Ok(i) = STATUSCODES.binary_search_by_key(status_code, |(n, _)| *n) {
STATUSCODES[i]
} else {
(500, "500 Internal Server Error")
};
self.buffer.extend_from_slice(reason.as_bytes());
self.buffer.extend_from_slice(b"\r\n\r\n")
}
}
}
}
/// Handshake request received from the client.
#[derive(Debug)]
pub struct ClientRequest<'a> {
ws_key: WebSocketKey,
protocols: Vec<&'a str>,
path: &'a str,
headers: RequestHeaders<'a>,
}
/// Select HTTP headers sent by the client.
#[derive(Debug, Copy, Clone)]
pub struct RequestHeaders<'a> {
/// The [`Host`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Host) header.
pub host: &'a [u8],
/// The [`Origin`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin) header, if provided.
pub origin: Option<&'a [u8]>,
}
impl<'a> ClientRequest<'a> {
/// The `Sec-WebSocket-Key` header nonce value.
pub fn key(&self) -> WebSocketKey {
self.ws_key
}
/// The protocols the client is proposing.
pub fn protocols(&self) -> impl Iterator<Item = &str> {
self.protocols.iter().cloned()
}
/// The path the client is requesting.
pub fn path(&self) -> &str {
self.path
}
/// Select HTTP headers sent by the client.
pub fn headers(&self) -> RequestHeaders {
self.headers
}
}
/// Handshake response the server sends back to the client.
#[derive(Debug)]
pub enum Response<'a> {
/// The server accepts the handshake request.
Accept { key: WebSocketKey, protocol: Option<&'a str> },
/// The server rejects the handshake request.
Reject { status_code: u16 },
}
/// Known status codes and their reason phrases.
const STATUSCODES: &[(u16, &str)] = &[
(100, "100 Continue"),
(101, "101 Switching Protocols"),
(102, "102 Processing"),
(200, "200 OK"),
(201, "201 Created"),
(202, "202 Accepted"),
(203, "203 Non Authoritative Information"),
(204, "204 No Content"),
(205, "205 Reset Content"),
(206, "206 Partial Content"),
(207, "207 Multi-Status"),
(208, "208 Already Reported"),
(226, "226 IM Used"),
(300, "300 Multiple Choices"),
(301, "301 Moved Permanently"),
(302, "302 Found"),
(303, "303 See Other"),
(304, "304 Not Modified"),
(305, "305 Use Proxy"),
(307, "307 Temporary Redirect"),
(308, "308 Permanent Redirect"),
(400, "400 Bad Request"),
(401, "401 Unauthorized"),
(402, "402 Payment Required"),
(403, "403 Forbidden"),
(404, "404 Not Found"),
(405, "405 Method Not Allowed"),
(406, "406 Not Acceptable"),
(407, "407 Proxy Authentication Required"),
(408, "408 Request Timeout"),
(409, "409 Conflict"),
(410, "410 Gone"),
(411, "411 Length Required"),
(412, "412 Precondition Failed"),
(413, "413 Payload Too Large"),
(414, "414 URI Too Long"),
(415, "415 Unsupported Media Type"),
(416, "416 Range Not Satisfiable"),
(417, "417 Expectation Failed"),
(418, "418 I'm a teapot"),
(421, "421 Misdirected Request"),
(422, "422 Unprocessable Entity"),
(423, "423 Locked"),
(424, "424 Failed Dependency"),
(426, "426 Upgrade Required"),
(428, "428 Precondition Required"),
(429, "429 Too Many Requests"),
(431, "431 Request Header Fields Too Large"),
(451, "451 Unavailable For Legal Reasons"),
(500, "500 Internal Server Error"),
(501, "501 Not Implemented"),
(502, "502 Bad Gateway"),
(503, "503 Service Unavailable"),
(504, "504 Gateway Timeout"),
(505, "505 HTTP Version Not Supported"),
(506, "506 Variant Also Negotiates"),
(507, "507 Insufficient Storage"),
(508, "508 Loop Detected"),
(510, "510 Not Extended"),
(511, "511 Network Authentication Required"),
];
| {
protocols.push(p)
} | conditional_block |
server.rs | // Copyright (c) 2019 Parity Technologies (UK) Ltd.
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
//! Websocket server [handshake].
//!
//! [handshake]: https://tools.ietf.org/html/rfc6455#section-4
use super::{
append_extensions, configure_extensions, expect_ascii_header, with_first_header, Error, WebSocketKey,
MAX_NUM_HEADERS, SEC_WEBSOCKET_EXTENSIONS, SEC_WEBSOCKET_PROTOCOL,
};
use crate::connection::{self, Mode};
use crate::extension::Extension;
use bytes::BytesMut;
use futures::prelude::*;
use std::{mem, str};
// Most HTTP servers default to 8KB limit on headers
const MAX_HEADERS_SIZE: usize = 8 * 1024;
const BLOCK_SIZE: usize = 8 * 1024;
/// Websocket handshake server.
#[derive(Debug)]
pub struct Server<'a, T> {
socket: T,
/// Protocols the server supports.
protocols: Vec<&'a str>,
/// Extensions the server supports.
extensions: Vec<Box<dyn Extension + Send>>,
/// Encoding/decoding buffer.
buffer: BytesMut,
}
impl<'a, T: AsyncRead + AsyncWrite + Unpin> Server<'a, T> {
/// Create a new server handshake.
pub fn new(socket: T) -> Self {
Server { socket, protocols: Vec::new(), extensions: Vec::new(), buffer: BytesMut::new() }
}
/// Override the buffer to use for request/response handling.
pub fn set_buffer(&mut self, b: BytesMut) -> &mut Self {
self.buffer = b;
self
}
/// Extract the buffer.
pub fn take_buffer(&mut self) -> BytesMut {
mem::take(&mut self.buffer)
}
/// Add a protocol the server supports.
pub fn add_protocol(&mut self, p: &'a str) -> &mut Self {
self.protocols.push(p);
self
}
/// Add an extension the server supports.
pub fn add_extension(&mut self, e: Box<dyn Extension + Send>) -> &mut Self {
self.extensions.push(e);
self
}
/// Get back all extensions.
pub fn drain_extensions(&mut self) -> impl Iterator<Item = Box<dyn Extension + Send>> + '_ |
/// Await an incoming client handshake request.
pub async fn receive_request(&mut self) -> Result<ClientRequest<'_>, Error> {
self.buffer.clear();
let mut skip = 0;
loop {
crate::read(&mut self.socket, &mut self.buffer, BLOCK_SIZE).await?;
let limit = std::cmp::min(self.buffer.len(), MAX_HEADERS_SIZE);
// We don't expect body, so can search for the CRLF headers tail from
// the end of the buffer.
if self.buffer[skip..limit].windows(4).rev().any(|w| w == b"\r\n\r\n") {
break;
}
// Give up if we've reached the limit. We could emit a specific error here,
// but httparse will produce meaningful error for us regardless.
if limit == MAX_HEADERS_SIZE {
break;
}
// Skip bytes that did not contain CRLF in the next iteration.
// If we only read a partial CRLF sequence, we would miss it if we skipped the full buffer
// length, hence backing off the full 4 bytes.
skip = self.buffer.len().saturating_sub(4);
}
self.decode_request()
}
/// Respond to the client.
pub async fn send_response(&mut self, r: &Response<'_>) -> Result<(), Error> {
self.buffer.clear();
self.encode_response(r);
self.socket.write_all(&self.buffer).await?;
self.socket.flush().await?;
self.buffer.clear();
Ok(())
}
/// Turn this handshake into a [`connection::Builder`].
pub fn into_builder(mut self) -> connection::Builder<T> {
let mut builder = connection::Builder::new(self.socket, Mode::Server);
builder.set_buffer(self.buffer);
builder.add_extensions(self.extensions.drain(..));
builder
}
/// Get out the inner socket of the server.
pub fn into_inner(self) -> T {
self.socket
}
// Decode client handshake request.
fn decode_request(&mut self) -> Result<ClientRequest, Error> {
let mut header_buf = [httparse::EMPTY_HEADER; MAX_NUM_HEADERS];
let mut request = httparse::Request::new(&mut header_buf);
match request.parse(self.buffer.as_ref()) {
Ok(httparse::Status::Complete(_)) => (),
Ok(httparse::Status::Partial) => return Err(Error::IncompleteHttpRequest),
Err(e) => return Err(Error::Http(Box::new(e))),
};
if request.method != Some("GET") {
return Err(Error::InvalidRequestMethod);
}
if request.version != Some(1) {
return Err(Error::UnsupportedHttpVersion);
}
let host = with_first_header(&request.headers, "Host", Ok)?;
expect_ascii_header(request.headers, "Upgrade", "websocket")?;
expect_ascii_header(request.headers, "Connection", "upgrade")?;
expect_ascii_header(request.headers, "Sec-WebSocket-Version", "13")?;
let origin =
request.headers.iter().find_map(
|h| {
if h.name.eq_ignore_ascii_case("Origin") {
Some(h.value)
} else {
None
}
},
);
let headers = RequestHeaders { host, origin };
let ws_key = with_first_header(&request.headers, "Sec-WebSocket-Key", |k| {
WebSocketKey::try_from(k).map_err(|_| Error::SecWebSocketKeyInvalidLength(k.len()))
})?;
for h in request.headers.iter().filter(|h| h.name.eq_ignore_ascii_case(SEC_WEBSOCKET_EXTENSIONS)) {
configure_extensions(&mut self.extensions, std::str::from_utf8(h.value)?)?
}
let mut protocols = Vec::new();
for p in request.headers.iter().filter(|h| h.name.eq_ignore_ascii_case(SEC_WEBSOCKET_PROTOCOL)) {
if let Some(&p) = self.protocols.iter().find(|x| x.as_bytes() == p.value) {
protocols.push(p)
}
}
let path = request.path.unwrap_or("/");
Ok(ClientRequest { ws_key, protocols, path, headers })
}
// Encode server handshake response.
fn encode_response(&mut self, response: &Response<'_>) {
match response {
Response::Accept { key, protocol } => {
let accept_value = super::generate_accept_key(&key);
self.buffer.extend_from_slice(
concat![
"HTTP/1.1 101 Switching Protocols",
"\r\nServer: soketto-",
env!("CARGO_PKG_VERSION"),
"\r\nUpgrade: websocket",
"\r\nConnection: upgrade",
"\r\nSec-WebSocket-Accept: ",
]
.as_bytes(),
);
self.buffer.extend_from_slice(&accept_value);
if let Some(p) = protocol {
self.buffer.extend_from_slice(b"\r\nSec-WebSocket-Protocol: ");
self.buffer.extend_from_slice(p.as_bytes())
}
append_extensions(self.extensions.iter().filter(|e| e.is_enabled()), &mut self.buffer);
self.buffer.extend_from_slice(b"\r\n\r\n")
}
Response::Reject { status_code } => {
self.buffer.extend_from_slice(b"HTTP/1.1 ");
let (_, reason) = if let Ok(i) = STATUSCODES.binary_search_by_key(status_code, |(n, _)| *n) {
STATUSCODES[i]
} else {
(500, "500 Internal Server Error")
};
self.buffer.extend_from_slice(reason.as_bytes());
self.buffer.extend_from_slice(b"\r\n\r\n")
}
}
}
}
/// Handshake request received from the client.
#[derive(Debug)]
pub struct ClientRequest<'a> {
ws_key: WebSocketKey,
protocols: Vec<&'a str>,
path: &'a str,
headers: RequestHeaders<'a>,
}
/// Select HTTP headers sent by the client.
#[derive(Debug, Copy, Clone)]
pub struct RequestHeaders<'a> {
/// The [`Host`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Host) header.
pub host: &'a [u8],
/// The [`Origin`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin) header, if provided.
pub origin: Option<&'a [u8]>,
}
impl<'a> ClientRequest<'a> {
/// The `Sec-WebSocket-Key` header nonce value.
pub fn key(&self) -> WebSocketKey {
self.ws_key
}
/// The protocols the client is proposing.
pub fn protocols(&self) -> impl Iterator<Item = &str> {
self.protocols.iter().cloned()
}
/// The path the client is requesting.
pub fn path(&self) -> &str {
self.path
}
/// Select HTTP headers sent by the client.
pub fn headers(&self) -> RequestHeaders {
self.headers
}
}
/// Handshake response the server sends back to the client.
#[derive(Debug)]
pub enum Response<'a> {
/// The server accepts the handshake request.
Accept { key: WebSocketKey, protocol: Option<&'a str> },
/// The server rejects the handshake request.
Reject { status_code: u16 },
}
/// Known status codes and their reason phrases.
const STATUSCODES: &[(u16, &str)] = &[
(100, "100 Continue"),
(101, "101 Switching Protocols"),
(102, "102 Processing"),
(200, "200 OK"),
(201, "201 Created"),
(202, "202 Accepted"),
(203, "203 Non Authoritative Information"),
(204, "204 No Content"),
(205, "205 Reset Content"),
(206, "206 Partial Content"),
(207, "207 Multi-Status"),
(208, "208 Already Reported"),
(226, "226 IM Used"),
(300, "300 Multiple Choices"),
(301, "301 Moved Permanently"),
(302, "302 Found"),
(303, "303 See Other"),
(304, "304 Not Modified"),
(305, "305 Use Proxy"),
(307, "307 Temporary Redirect"),
(308, "308 Permanent Redirect"),
(400, "400 Bad Request"),
(401, "401 Unauthorized"),
(402, "402 Payment Required"),
(403, "403 Forbidden"),
(404, "404 Not Found"),
(405, "405 Method Not Allowed"),
(406, "406 Not Acceptable"),
(407, "407 Proxy Authentication Required"),
(408, "408 Request Timeout"),
(409, "409 Conflict"),
(410, "410 Gone"),
(411, "411 Length Required"),
(412, "412 Precondition Failed"),
(413, "413 Payload Too Large"),
(414, "414 URI Too Long"),
(415, "415 Unsupported Media Type"),
(416, "416 Range Not Satisfiable"),
(417, "417 Expectation Failed"),
(418, "418 I'm a teapot"),
(421, "421 Misdirected Request"),
(422, "422 Unprocessable Entity"),
(423, "423 Locked"),
(424, "424 Failed Dependency"),
(426, "426 Upgrade Required"),
(428, "428 Precondition Required"),
(429, "429 Too Many Requests"),
(431, "431 Request Header Fields Too Large"),
(451, "451 Unavailable For Legal Reasons"),
(500, "500 Internal Server Error"),
(501, "501 Not Implemented"),
(502, "502 Bad Gateway"),
(503, "503 Service Unavailable"),
(504, "504 Gateway Timeout"),
(505, "505 HTTP Version Not Supported"),
(506, "506 Variant Also Negotiates"),
(507, "507 Insufficient Storage"),
(508, "508 Loop Detected"),
(510, "510 Not Extended"),
(511, "511 Network Authentication Required"),
];
| {
self.extensions.drain(..)
} | identifier_body |
server.rs | // Copyright (c) 2019 Parity Technologies (UK) Ltd.
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
//! Websocket server [handshake].
//!
//! [handshake]: https://tools.ietf.org/html/rfc6455#section-4
use super::{
append_extensions, configure_extensions, expect_ascii_header, with_first_header, Error, WebSocketKey,
MAX_NUM_HEADERS, SEC_WEBSOCKET_EXTENSIONS, SEC_WEBSOCKET_PROTOCOL,
};
use crate::connection::{self, Mode};
use crate::extension::Extension;
use bytes::BytesMut;
use futures::prelude::*;
use std::{mem, str};
// Most HTTP servers default to 8KB limit on headers
const MAX_HEADERS_SIZE: usize = 8 * 1024;
const BLOCK_SIZE: usize = 8 * 1024;
/// Websocket handshake server.
#[derive(Debug)]
pub struct | <'a, T> {
socket: T,
/// Protocols the server supports.
protocols: Vec<&'a str>,
/// Extensions the server supports.
extensions: Vec<Box<dyn Extension + Send>>,
/// Encoding/decoding buffer.
buffer: BytesMut,
}
impl<'a, T: AsyncRead + AsyncWrite + Unpin> Server<'a, T> {
/// Create a new server handshake.
pub fn new(socket: T) -> Self {
Server { socket, protocols: Vec::new(), extensions: Vec::new(), buffer: BytesMut::new() }
}
/// Override the buffer to use for request/response handling.
pub fn set_buffer(&mut self, b: BytesMut) -> &mut Self {
self.buffer = b;
self
}
/// Extract the buffer.
pub fn take_buffer(&mut self) -> BytesMut {
mem::take(&mut self.buffer)
}
/// Add a protocol the server supports.
pub fn add_protocol(&mut self, p: &'a str) -> &mut Self {
self.protocols.push(p);
self
}
/// Add an extension the server supports.
pub fn add_extension(&mut self, e: Box<dyn Extension + Send>) -> &mut Self {
self.extensions.push(e);
self
}
/// Get back all extensions.
pub fn drain_extensions(&mut self) -> impl Iterator<Item = Box<dyn Extension + Send>> + '_ {
self.extensions.drain(..)
}
/// Await an incoming client handshake request.
pub async fn receive_request(&mut self) -> Result<ClientRequest<'_>, Error> {
self.buffer.clear();
let mut skip = 0;
loop {
crate::read(&mut self.socket, &mut self.buffer, BLOCK_SIZE).await?;
let limit = std::cmp::min(self.buffer.len(), MAX_HEADERS_SIZE);
// We don't expect body, so can search for the CRLF headers tail from
// the end of the buffer.
if self.buffer[skip..limit].windows(4).rev().any(|w| w == b"\r\n\r\n") {
break;
}
// Give up if we've reached the limit. We could emit a specific error here,
// but httparse will produce meaningful error for us regardless.
if limit == MAX_HEADERS_SIZE {
break;
}
// Skip bytes that did not contain CRLF in the next iteration.
// If we only read a partial CRLF sequence, we would miss it if we skipped the full buffer
// length, hence backing off the full 4 bytes.
skip = self.buffer.len().saturating_sub(4);
}
self.decode_request()
}
/// Respond to the client.
pub async fn send_response(&mut self, r: &Response<'_>) -> Result<(), Error> {
self.buffer.clear();
self.encode_response(r);
self.socket.write_all(&self.buffer).await?;
self.socket.flush().await?;
self.buffer.clear();
Ok(())
}
/// Turn this handshake into a [`connection::Builder`].
pub fn into_builder(mut self) -> connection::Builder<T> {
let mut builder = connection::Builder::new(self.socket, Mode::Server);
builder.set_buffer(self.buffer);
builder.add_extensions(self.extensions.drain(..));
builder
}
/// Get out the inner socket of the server.
pub fn into_inner(self) -> T {
self.socket
}
// Decode client handshake request.
fn decode_request(&mut self) -> Result<ClientRequest, Error> {
let mut header_buf = [httparse::EMPTY_HEADER; MAX_NUM_HEADERS];
let mut request = httparse::Request::new(&mut header_buf);
match request.parse(self.buffer.as_ref()) {
Ok(httparse::Status::Complete(_)) => (),
Ok(httparse::Status::Partial) => return Err(Error::IncompleteHttpRequest),
Err(e) => return Err(Error::Http(Box::new(e))),
};
if request.method != Some("GET") {
return Err(Error::InvalidRequestMethod);
}
if request.version != Some(1) {
return Err(Error::UnsupportedHttpVersion);
}
let host = with_first_header(&request.headers, "Host", Ok)?;
expect_ascii_header(request.headers, "Upgrade", "websocket")?;
expect_ascii_header(request.headers, "Connection", "upgrade")?;
expect_ascii_header(request.headers, "Sec-WebSocket-Version", "13")?;
let origin =
request.headers.iter().find_map(
|h| {
if h.name.eq_ignore_ascii_case("Origin") {
Some(h.value)
} else {
None
}
},
);
let headers = RequestHeaders { host, origin };
let ws_key = with_first_header(&request.headers, "Sec-WebSocket-Key", |k| {
WebSocketKey::try_from(k).map_err(|_| Error::SecWebSocketKeyInvalidLength(k.len()))
})?;
for h in request.headers.iter().filter(|h| h.name.eq_ignore_ascii_case(SEC_WEBSOCKET_EXTENSIONS)) {
configure_extensions(&mut self.extensions, std::str::from_utf8(h.value)?)?
}
let mut protocols = Vec::new();
for p in request.headers.iter().filter(|h| h.name.eq_ignore_ascii_case(SEC_WEBSOCKET_PROTOCOL)) {
if let Some(&p) = self.protocols.iter().find(|x| x.as_bytes() == p.value) {
protocols.push(p)
}
}
let path = request.path.unwrap_or("/");
Ok(ClientRequest { ws_key, protocols, path, headers })
}
// Encode server handshake response.
fn encode_response(&mut self, response: &Response<'_>) {
match response {
Response::Accept { key, protocol } => {
let accept_value = super::generate_accept_key(&key);
self.buffer.extend_from_slice(
concat![
"HTTP/1.1 101 Switching Protocols",
"\r\nServer: soketto-",
env!("CARGO_PKG_VERSION"),
"\r\nUpgrade: websocket",
"\r\nConnection: upgrade",
"\r\nSec-WebSocket-Accept: ",
]
.as_bytes(),
);
self.buffer.extend_from_slice(&accept_value);
if let Some(p) = protocol {
self.buffer.extend_from_slice(b"\r\nSec-WebSocket-Protocol: ");
self.buffer.extend_from_slice(p.as_bytes())
}
append_extensions(self.extensions.iter().filter(|e| e.is_enabled()), &mut self.buffer);
self.buffer.extend_from_slice(b"\r\n\r\n")
}
Response::Reject { status_code } => {
self.buffer.extend_from_slice(b"HTTP/1.1 ");
let (_, reason) = if let Ok(i) = STATUSCODES.binary_search_by_key(status_code, |(n, _)| *n) {
STATUSCODES[i]
} else {
(500, "500 Internal Server Error")
};
self.buffer.extend_from_slice(reason.as_bytes());
self.buffer.extend_from_slice(b"\r\n\r\n")
}
}
}
}
/// Handshake request received from the client.
#[derive(Debug)]
pub struct ClientRequest<'a> {
ws_key: WebSocketKey,
protocols: Vec<&'a str>,
path: &'a str,
headers: RequestHeaders<'a>,
}
/// Select HTTP headers sent by the client.
#[derive(Debug, Copy, Clone)]
pub struct RequestHeaders<'a> {
/// The [`Host`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Host) header.
pub host: &'a [u8],
/// The [`Origin`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin) header, if provided.
pub origin: Option<&'a [u8]>,
}
impl<'a> ClientRequest<'a> {
/// The `Sec-WebSocket-Key` header nonce value.
pub fn key(&self) -> WebSocketKey {
self.ws_key
}
/// The protocols the client is proposing.
pub fn protocols(&self) -> impl Iterator<Item = &str> {
self.protocols.iter().cloned()
}
/// The path the client is requesting.
pub fn path(&self) -> &str {
self.path
}
/// Select HTTP headers sent by the client.
pub fn headers(&self) -> RequestHeaders {
self.headers
}
}
/// Handshake response the server sends back to the client.
#[derive(Debug)]
pub enum Response<'a> {
/// The server accepts the handshake request.
Accept { key: WebSocketKey, protocol: Option<&'a str> },
/// The server rejects the handshake request.
Reject { status_code: u16 },
}
/// Known status codes and their reason phrases.
const STATUSCODES: &[(u16, &str)] = &[
(100, "100 Continue"),
(101, "101 Switching Protocols"),
(102, "102 Processing"),
(200, "200 OK"),
(201, "201 Created"),
(202, "202 Accepted"),
(203, "203 Non Authoritative Information"),
(204, "204 No Content"),
(205, "205 Reset Content"),
(206, "206 Partial Content"),
(207, "207 Multi-Status"),
(208, "208 Already Reported"),
(226, "226 IM Used"),
(300, "300 Multiple Choices"),
(301, "301 Moved Permanently"),
(302, "302 Found"),
(303, "303 See Other"),
(304, "304 Not Modified"),
(305, "305 Use Proxy"),
(307, "307 Temporary Redirect"),
(308, "308 Permanent Redirect"),
(400, "400 Bad Request"),
(401, "401 Unauthorized"),
(402, "402 Payment Required"),
(403, "403 Forbidden"),
(404, "404 Not Found"),
(405, "405 Method Not Allowed"),
(406, "406 Not Acceptable"),
(407, "407 Proxy Authentication Required"),
(408, "408 Request Timeout"),
(409, "409 Conflict"),
(410, "410 Gone"),
(411, "411 Length Required"),
(412, "412 Precondition Failed"),
(413, "413 Payload Too Large"),
(414, "414 URI Too Long"),
(415, "415 Unsupported Media Type"),
(416, "416 Range Not Satisfiable"),
(417, "417 Expectation Failed"),
(418, "418 I'm a teapot"),
(421, "421 Misdirected Request"),
(422, "422 Unprocessable Entity"),
(423, "423 Locked"),
(424, "424 Failed Dependency"),
(426, "426 Upgrade Required"),
(428, "428 Precondition Required"),
(429, "429 Too Many Requests"),
(431, "431 Request Header Fields Too Large"),
(451, "451 Unavailable For Legal Reasons"),
(500, "500 Internal Server Error"),
(501, "501 Not Implemented"),
(502, "502 Bad Gateway"),
(503, "503 Service Unavailable"),
(504, "504 Gateway Timeout"),
(505, "505 HTTP Version Not Supported"),
(506, "506 Variant Also Negotiates"),
(507, "507 Insufficient Storage"),
(508, "508 Loop Detected"),
(510, "510 Not Extended"),
(511, "511 Network Authentication Required"),
];
| Server | identifier_name |
utils.py | import datetime
import functools
import json
import logging
import os
import shutil
import subprocess
import sys
import time
import pymongo
import pandas as pd
import pymysql
import schedule
from raven import Client
from sqlalchemy import create_engine
from config import MONGOURL, MYSQLHOST, MYSQLUSER, MYSQLPASSWORD, MYSQLPORT, MYSQLDB, MYSQLTABLE, SENTRY_DSN
logger = logging.getLogger("main_log")
db = pymongo.MongoClient(MONGOURL)
coll = db.stock.mins
sentry = Client(SENTRY_DSN)
def all_codes_now():
codes = coll.find().distinct("code")
return codes # 12332 12359 果然每天的 codes 数量不一样
def write_codes_to_file(codes):
"""如果codes不是经常有新增的 查询一次写入文件
下次需要的时候直接从文件中读取要比数据库 distinct 查询要快"""
with open("codes.json", "w") as f:
json.dump(codes, f)
def wirte_code_date_to_file(dt1, dt2, date_int_str):
"""
将指定 code 制定时间内的增量 写入文件
:param dt1:
:param dt2:
:param date_int_str:
:return:
"""
f = open("codes.json", "r")
codes = json.load(f)
f.close()
file_path = os.path.join(os.getcwd(), "exportdir/" + date_int_str)
os.makedirs(file_path, exist_ok=True)
for code in codes:
q = '{{code:"{0}",time: {{$gte:ISODate("{1}"), $lte:ISODate("{2}")}}}}'.format(code, dt1, dt2)
file_name = os.path.join(file_path, code)
command = "mongoexport -d stock -c mins -q '{}' --fieldFile mins_fields.txt --type=csv --out {}.csv".format(q, file_name)
# print(command)
log_file = open("export_log.log", "a+")
subprocess.call(command, shell=True, stderr=log_file)
def merge_csv(folder_path, savefile_path, savefile_name):
"""
将多个 csv 文件合并成一个
:param folder_path:
:param savefile_path:
:param savefile_name:
:return:
"""
# 修改当前工作目录
os.chdir(folder_path)
# 将该文件夹下的所有文件名存入一个列表
file_list = os.listdir()
# 读取第一个CSV文件并包含表头
df = pd.read_csv(os.path.join(folder_path, file_list[0]))
# 创建要保存的文件夹
os.makedirs(savefile_path, exist_ok=True)
# 将读取的第一个CSV文件写入合并后的文件保存
save_file = os.path.join(savefile_path, savefile_name)
df.to_csv(save_file)
# 循环遍历列表中各个CSV文件名,并追加到合并后的文件
count = 0
try:
for i in range(1, len(file_list)):
# print(os.path.join(Folder_Path, file_list[i]))
df = pd.read_csv(os.path.join(folder_path, file_list[i]))
# print(df)
# print(df.shape[0])
count += df.shape[0]
# print()
# print()
df.to_csv(save_file, encoding="utf-8", index=False, header=False, mode='a+')
except Exception:
pass
return count
def csv_to_mysql(load_sql, host, user, password):
"""
This function load a csv file to MySQL table according to
the load_sql statement.
:param load_sql:
:param host:
:param user:
:param password:
:return:
"""
try:
con = pymysql.connect(host=host,
user=user,
password=password,
autocommit=True,
local_infile=1)
print('Connected to DB: {}'.format(host))
# Create cursor and execute Load SQL
cursor = con.cursor()
cursor.execute(load_sql)
print('Succuessfully loaded the table from csv.')
con.close()
except Exception as e:
print('Error: {}'.format(str(e)))
sys.exit(1)
def gen_times():
"""
确定拉取的边界时间
一般情况
:return:
"""
dt1 = datetime.datetime.combine(datetime.date.today() - datetime.timedelta(days=1),
datetime.time.min).strftime("%Y-%m-%dT%H:%M:%SZ")
dt2 = datetime.datetime.combine(datetime.date.today(),
datetime.time.min).strftime("%Y-%m-%dT%H:%M:%SZ")
date_int_str = datetime.datetime.combine(datetime.date.today() - datetime.timedelta(days=1),
datetime.time.min).strftime("%Y%m%d")
return dt1, dt2, date_int_str
def gen_temp_times(start, end):
"""
需要补充数据的特殊情况
end 是先对当前已经过去的时间
:param start:
:param end:
:return:
"""
while start.date() <= end.date():
dt1 = datetime.datetime.combine(start, datetime.time.min).strftime("%Y-%m-%dT%H:%M:%SZ")
dt2 = datetime.datetime.combine(start + datetime.timedelta(days=1),
datetime.time.min).strftime("%Y-%m-%dT%H:%M:%SZ")
date_int_str = datetime.datetime.combine(start, datetime.time.min).strftime("%Y%m%d")
yield dt1, dt2, date_int_str
start += datetime.timedelta(days=1)
# def gen_mongo_count(dt1, dt2):
# """
# 计算在dt1 和 dt2之间的增量数量 理论上是一天的增量
# :param dt1:
# :param dt2:
# :return:
# """
# # {
# # "_id": ObjectId("59ce1e1d6e6dc7768c7140dc"),
# # "code": "SH900955",
# # "time": ISODate("1999-07-26T09:59:00Z"),
# # "open": 0.462,
# # "close": 0.462,
# # "low": 0.462,
# # "high": 0.462,
# # "volume": 0,
# # "amount": 0
# # }
# # 现将 dt1 和 dt2 进行转换
# ret = coll.find({"time": {"$gte": dt1, "$lte": dt2}}).count_documents
# return ret
def gene(dt1, dt2, date_int_str):
"""整个生成逻辑"""
logger.info(f"dt1:{dt1}")
logger.info(f"dt2:{dt2}")
mysqlhost = MYSQLHOST
user = MYSQLUSER
password = MYSQLPASSWORD
mysqlport = MYSQLPORT
mysqldb = MYSQLDB
mysqltable = MYSQLTABLE
export_path = os.path.join(os.getcwd(), "exportdir")
folder_path = os.path.join(export_path, date_int_str)
save_file_path = os.path.join(os.getcwd(), "savedir")
save_file_name = date_int_str + ".csv"
# 生成截止到当前的全种类列表
codes = all_codes_now()
# 将其以重写的方式存入 codes.json 文件
write_codes_to_file(codes)
# 将 codes 读出到内存 同时将每一个code的增量写入文件
wirte_code_date_to_file(dt1, dt2, date_int_str)
# 将 csv 文件进行合并 并且计算被导入的增量数量
count = merge_csv(folder_path, save_file_path, save_file_name)
logger.info(f"由csv文件计算出的当天需要进行增量的数据量为 {count}")
sentry.captureMessage(f"需要进行增量的数据量为 {count}")
# 检查与 mongo 中的增量结果是否一致
# 这个查询也比较耗时 先不检查了
# mongo_count = gen_mongo_count(dt1, dt2)
if count:
# 将合并后的 csv 导入 mysql
save_file = os.path.join(save_file_path, save_file_name)
load_sql = f"""LOAD DATA LOCAL INFILE '{save_file}' \
REPLACE INTO TABLE {mysqldb}.{mysqltable} \
FIELDS TERMINATED BY ',' \
ENCLOSED BY '"' \
IGNORE 1 LINES;"""
csv_to_mysql(load_sql, mysqlhost, user, password)
# 检查 csv 中的数目和数据库中查询出的数量是否一致
query_sql = f"""select count(1) from {mysqldb}.{mysqltable} where time >= {date_int_str}"""
mysql_string = f"mysql+pymysql://{user}:{password}@{mysqlhost}:\
{mysqlport}/{mysqldb}?charset=gbk"
DATACENTER = create_engine(mysql_string)
sql_count = DATACENTER.execute(query_sql).first()[0]
if sql_count != count:
raise RuntimeError("数据量不一致,请检查!")
# 合并后删除原始的 csv 文件
shutil.rmtree(folder_path, ignore_errors=True)
logger.info(f"任务完成 删除当日过程 csv 文件 ")
def catch_exceptions(cancel_on_failure=False):
def catch_exceptions_decorator(job_func):
@functools.wraps(job_func)
def wrapper(*args, **kwargs):
try:
return job_func(*args, **kwargs)
except:
import traceback
logger.warning(traceback.format_exc())
sentry.captureException(exc_info=True)
if cancel_on_failure:
# print(schedule.CancelJob)
# schedule.cancel_job()
return schedule.CancelJob
return wrapper
return catch_exceptions_decorator
@catch_exceptions
def main():
import_date_str = (datetime.datetime.today() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
logger.info(f"现在是 {datetime.datetime.today()}, 开始增量 stock.mins 在 "
f"{import_date_str} 全天的增量数据到 mysql 数据库")
sentry.captureMessage(f"现在是 {datetime.datetime.today()}, 开始增量 stock.mins 在 "
f"{import_date_str} 全天的增量数据到 mysql 数据库")
dt1, dt2, date_int_str = gen_times()
gene(dt1, dt2, date_int_str)
if __name__ == "__main__":
# test gen times
# res = gen_times()
# print(res)
# test gen temp times
# start = datetime.datetime(2019, 7, 8, 12, 34, 56)
# end = datetime.datetime(2019, 7, 18, 12, 34, 56)
# generator = gen_temp_times(start, end)
# for data in generator:
# print(data)
# """
# ('2019-07-08T00:00:00Z', '2019-07-09T00:00:00Z', '20190708')
# ('2019-07-09T00:00:00Z', '2019-07-10T00:00:00Z', '20190709')
# ('2019-07-10T00:00:00Z', '2019-07-11T00:00:00Z', '20190710')
# ('2019-07-11T00:00:00Z', '2019-07-12T00:00:00Z', '20190711')
# ('2019-07-12T00:00:00Z', '2019-07-13T00:00:00Z', '20190712')
# ('2019-07-13T00:00:00Z', '2019-07-14T00:00:00Z', '20190713')
# ('2019-07-14T00:00:00Z', '2019-07-15T00:00:00Z', '20190714')
# ('2019-07-15T00:00:00Z', '2019-07-16T00:00:00Z', '20190715')
# ('2019-07-16T00:00:00Z', '2019-07-17T00:00:00Z', '20190716')
# ('2019-07-17T00:00:00Z', '2019-07-18T00:00:00Z', '20190717') | # """
# test gen all codes from mongo today
# t1 = time.time()
# all_codes_now()
# print(time.time() - t1) # 61s
# test wirte code to a file
# t1 = time.time()
# now_codes = all_codes_now()
# write_codes_to_file(now_codes)
# print(time.time() - t1) # 55s
# 写入分散的 csv 文件
dt1, dt2, date_int_str = gen_times()
# t1 = time.time()
# wirte_code_date_to_file(dt1, dt2, date_int_str)
# t2 = time.time()
# print((t2 - t1)/60, "min") # 79min
# test gen momngo count
# mongo_count = gen_mongo_count(dt1, dt2)
# print(mongo_count)
# test merge csv
# dt1, dt2, date_int_str = gen_times()
export_path = os.path.join(os.getcwd(), "exportdir")
folder_path = os.path.join(export_path, date_int_str)
# save_file_path = os.path.join(os.getcwd(), "savedir")
# save_file_name = date_int_str + ".csv"
#
# t1 = time.time()
# count = merge_csv(folder_path, save_file_path, save_file_name)
# t2 = time.time()
# print(t2 - t1) # 时间在 1 min 左右
# print(count) # 1309440
# test csv to mysql
# 将合并后的 csv 导入 mysql
# t1 = time.time()
# mysqlhost = MYSQLHOST
# user = MYSQLUSER
# password = MYSQLPASSWORD
# mysqlport = MYSQLPORT
# mysqldb = MYSQLDB
# mysqltable = MYSQLTABLE
# save_file = os.path.join(save_file_path, save_file_name)
# load_sql = f"""LOAD DATA LOCAL INFILE '{save_file}' \
# REPLACE INTO TABLE {mysqldb}.{mysqltable} \
# FIELDS TERMINATED BY ',' \
# ENCLOSED BY '"' \
# IGNORE 1 LINES;"""
#
# csv_to_mysql(load_sql, mysqlhost, user, password)
# print(time.time() - t1) # 97s
#
# # 检查 csv 中的数目和数据库中查询出的数量是否一致
# query_sql = f"""select count(1) from {mysqldb}.{mysqltable} where time >= '{dt1[:10]}' and time <= '{dt2[:10]}';"""
# mysql_string = f"mysql+pymysql://{user}:{password}@{mysqlhost}:\
# {mysqlport}/{mysqldb}?charset=gbk"
# print(query_sql)
# DATACENTER = create_engine(mysql_string)
# sql_count = DATACENTER.execute(query_sql).first()[0]
# print(sql_count) # 1309440
# 测试删除合并后的文件夹
# shutil.rmtree(folder_path, ignore_errors=True)
pass | # ('2019-07-18T00:00:00Z', '2019-07-19T00:00:00Z', '20190718') | random_line_split |
utils.py | import datetime
import functools
import json
import logging
import os
import shutil
import subprocess
import sys
import time
import pymongo
import pandas as pd
import pymysql
import schedule
from raven import Client
from sqlalchemy import create_engine
from config import MONGOURL, MYSQLHOST, MYSQLUSER, MYSQLPASSWORD, MYSQLPORT, MYSQLDB, MYSQLTABLE, SENTRY_DSN
logger = logging.getLogger("main_log")
db = pymongo.MongoClient(MONGOURL)
coll = db.stock.mins
sentry = Client(SENTRY_DSN)
def all_codes_now():
codes = coll.find().distinct("code")
return codes # 12332 12359 果然每天的 codes 数量不一样
def write_codes_to_file(codes):
"""如果codes不是经常有新增的 查询一次写入文件
下次需要的时候直接从文件中读取要比数据库 distinct 查询要快"""
with open("codes.json", "w") as f:
json.dump(codes, f)
def wirte_code_date_to_file(dt1, dt2, date_int_str):
"""
将指定 code 制定时间内的增量 写入文件
:param dt1:
:param dt2:
:param date_int_str:
:return:
"""
f = open("codes.json", "r")
codes = json.load(f)
f.close()
file_path = os.path.join(os.getcwd(), "exportdir/" + date_int_str)
os.makedirs(file_path, exist_ok=True)
for code in codes:
q = '{{code:"{0}",time: {{$gte:ISODate("{1}"), $lte:ISODate("{2}")}}}}'.format(code, dt1, dt2)
file_name = os.path.join(file_path, code)
command = "mongoexport -d stock -c mins -q '{}' --fieldFile mins_fields.txt --type=csv --out {}.csv".format(q, file_name)
# print(command)
log_file = open("export_log.log", "a+")
subprocess.call(command, shell=True, stderr=log_file)
def merge_csv(folder_path, savefile_path, savefile_name):
"""
将多个 csv 文件合并成一个
:param folder_path:
:param savefile_path:
:param savefile_name:
:return:
"""
# 修改当前工作目录
os.chdir(folder_path)
# 将该文件夹下的所有文件名存入一个列表
file_list = os.listdir()
# 读取第一个CSV文件并包含表头
df = pd.read_csv(os.path.join(folder_path, file_list[0]))
# 创建要保存的文件夹
os.makedirs(savefile_path, exist_ok=True)
# 将读取的第一个CSV文件写入合并后的文件保存
save_file = os.path.join(savefile_path, savefile_name)
df.to_csv(save_file)
# 循环遍历列表中各个CSV文件名,并追加到合并后的文件
count = 0
try:
for i in range(1, len(file_list)):
# print(os.path.join(Folder_Path, file_list[i]))
df = pd.read_csv(os.path.join(folder_path, file_list[i]))
# print(df)
# print(df.shape[0])
count += df.shape[0]
# print()
# print()
df.to_csv(save_file, encoding="utf-8", index=False, header=False, mode='a+')
except Exception:
pass
return count
def csv_to_mysql(load_sql, host, user, password):
"""
This function load a csv file to MySQL table according to
the load_sql statement.
:param load_sql:
:param host:
:param user:
:param password:
:return:
"""
try:
con = pymysql.connect(host=host,
user=user,
password=password,
| datetime.time.min).strftime("%Y-%m-%dT%H:%M:%SZ")
date_int_str = datetime.datetime.combine(datetime.date.today() - datetime.timedelta(days=1),
datetime.time.min).strftime("%Y%m%d")
return dt1, dt2, date_int_str
def gen_temp_times(start, end):
"""
需要补充数据的特殊情况
end 是先对当前已经过去的时间
:param start:
:param end:
:return:
"""
while start.date() <= end.date():
dt1 = datetime.datetime.combine(start, datetime.time.min).strftime("%Y-%m-%dT%H:%M:%SZ")
dt2 = datetime.datetime.combine(start + datetime.timedelta(days=1),
datetime.time.min).strftime("%Y-%m-%dT%H:%M:%SZ")
date_int_str = datetime.datetime.combine(start, datetime.time.min).strftime("%Y%m%d")
yield dt1, dt2, date_int_str
start += datetime.timedelta(days=1)
# def gen_mongo_count(dt1, dt2):
# """
# 计算在dt1 和 dt2之间的增量数量 理论上是一天的增量
# :param dt1:
# :param dt2:
# :return:
# """
# # {
# # "_id": ObjectId("59ce1e1d6e6dc7768c7140dc"),
# # "code": "SH900955",
# # "time": ISODate("1999-07-26T09:59:00Z"),
# # "open": 0.462,
# # "close": 0.462,
# # "low": 0.462,
# # "high": 0.462,
# # "volume": 0,
# # "amount": 0
# # }
# # 现将 dt1 和 dt2 进行转换
# ret = coll.find({"time": {"$gte": dt1, "$lte": dt2}}).count_documents
# return ret
def gene(dt1, dt2, date_int_str):
"""整个生成逻辑"""
logger.info(f"dt1:{dt1}")
logger.info(f"dt2:{dt2}")
mysqlhost = MYSQLHOST
user = MYSQLUSER
password = MYSQLPASSWORD
mysqlport = MYSQLPORT
mysqldb = MYSQLDB
mysqltable = MYSQLTABLE
export_path = os.path.join(os.getcwd(), "exportdir")
folder_path = os.path.join(export_path, date_int_str)
save_file_path = os.path.join(os.getcwd(), "savedir")
save_file_name = date_int_str + ".csv"
# 生成截止到当前的全种类列表
codes = all_codes_now()
# 将其以重写的方式存入 codes.json 文件
write_codes_to_file(codes)
# 将 codes 读出到内存 同时将每一个code的增量写入文件
wirte_code_date_to_file(dt1, dt2, date_int_str)
# 将 csv 文件进行合并 并且计算被导入的增量数量
count = merge_csv(folder_path, save_file_path, save_file_name)
logger.info(f"由csv文件计算出的当天需要进行增量的数据量为 {count}")
sentry.captureMessage(f"需要进行增量的数据量为 {count}")
# 检查与 mongo 中的增量结果是否一致
# 这个查询也比较耗时 先不检查了
# mongo_count = gen_mongo_count(dt1, dt2)
if count:
# 将合并后的 csv 导入 mysql
save_file = os.path.join(save_file_path, save_file_name)
load_sql = f"""LOAD DATA LOCAL INFILE '{save_file}' \
REPLACE INTO TABLE {mysqldb}.{mysqltable} \
FIELDS TERMINATED BY ',' \
ENCLOSED BY '"' \
IGNORE 1 LINES;"""
csv_to_mysql(load_sql, mysqlhost, user, password)
# 检查 csv 中的数目和数据库中查询出的数量是否一致
query_sql = f"""select count(1) from {mysqldb}.{mysqltable} where time >= {date_int_str}"""
mysql_string = f"mysql+pymysql://{user}:{password}@{mysqlhost}:\
{mysqlport}/{mysqldb}?charset=gbk"
DATACENTER = create_engine(mysql_string)
sql_count = DATACENTER.execute(query_sql).first()[0]
if sql_count != count:
raise RuntimeError("数据量不一致,请检查!")
# 合并后删除原始的 csv 文件
shutil.rmtree(folder_path, ignore_errors=True)
logger.info(f"任务完成 删除当日过程 csv 文件 ")
def catch_exceptions(cancel_on_failure=False):
def catch_exceptions_decorator(job_func):
@functools.wraps(job_func)
def wrapper(*args, **kwargs):
try:
return job_func(*args, **kwargs)
except:
import traceback
logger.warning(traceback.format_exc())
sentry.captureException(exc_info=True)
if cancel_on_failure:
# print(schedule.CancelJob)
# schedule.cancel_job()
return schedule.CancelJob
return wrapper
return catch_exceptions_decorator
@catch_exceptions
def main():
import_date_str = (datetime.datetime.today() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
logger.info(f"现在是 {datetime.datetime.today()}, 开始增量 stock.mins 在 "
f"{import_date_str} 全天的增量数据到 mysql 数据库")
sentry.captureMessage(f"现在是 {datetime.datetime.today()}, 开始增量 stock.mins 在 "
f"{import_date_str} 全天的增量数据到 mysql 数据库")
dt1, dt2, date_int_str = gen_times()
gene(dt1, dt2, date_int_str)
if __name__ == "__main__":
# test gen times
# res = gen_times()
# print(res)
# test gen temp times
# start = datetime.datetime(2019, 7, 8, 12, 34, 56)
# end = datetime.datetime(2019, 7, 18, 12, 34, 56)
# generator = gen_temp_times(start, end)
# for data in generator:
# print(data)
# """
# ('2019-07-08T00:00:00Z', '2019-07-09T00:00:00Z', '20190708')
# ('2019-07-09T00:00:00Z', '2019-07-10T00:00:00Z', '20190709')
# ('2019-07-10T00:00:00Z', '2019-07-11T00:00:00Z', '20190710')
# ('2019-07-11T00:00:00Z', '2019-07-12T00:00:00Z', '20190711')
# ('2019-07-12T00:00:00Z', '2019-07-13T00:00:00Z', '20190712')
# ('2019-07-13T00:00:00Z', '2019-07-14T00:00:00Z', '20190713')
# ('2019-07-14T00:00:00Z', '2019-07-15T00:00:00Z', '20190714')
# ('2019-07-15T00:00:00Z', '2019-07-16T00:00:00Z', '20190715')
# ('2019-07-16T00:00:00Z', '2019-07-17T00:00:00Z', '20190716')
# ('2019-07-17T00:00:00Z', '2019-07-18T00:00:00Z', '20190717')
# ('2019-07-18T00:00:00Z', '2019-07-19T00:00:00Z', '20190718')
# """
# test gen all codes from mongo today
# t1 = time.time()
# all_codes_now()
# print(time.time() - t1) # 61s
# test wirte code to a file
# t1 = time.time()
# now_codes = all_codes_now()
# write_codes_to_file(now_codes)
# print(time.time() - t1) # 55s
# 写入分散的 csv 文件
dt1, dt2, date_int_str = gen_times()
# t1 = time.time()
# wirte_code_date_to_file(dt1, dt2, date_int_str)
# t2 = time.time()
# print((t2 - t1)/60, "min") # 79min
# test gen momngo count
# mongo_count = gen_mongo_count(dt1, dt2)
# print(mongo_count)
# test merge csv
# dt1, dt2, date_int_str = gen_times()
export_path = os.path.join(os.getcwd(), "exportdir")
folder_path = os.path.join(export_path, date_int_str)
# save_file_path = os.path.join(os.getcwd(), "savedir")
# save_file_name = date_int_str + ".csv"
#
# t1 = time.time()
# count = merge_csv(folder_path, save_file_path, save_file_name)
# t2 = time.time()
# print(t2 - t1) # 时间在 1 min 左右
# print(count) # 1309440
# test csv to mysql
# 将合并后的 csv 导入 mysql
# t1 = time.time()
# mysqlhost = MYSQLHOST
# user = MYSQLUSER
# password = MYSQLPASSWORD
# mysqlport = MYSQLPORT
# mysqldb = MYSQLDB
# mysqltable = MYSQLTABLE
# save_file = os.path.join(save_file_path, save_file_name)
# load_sql = f"""LOAD DATA LOCAL INFILE '{save_file}' \
# REPLACE INTO TABLE {mysqldb}.{mysqltable} \
# FIELDS TERMINATED BY ',' \
# ENCLOSED BY '"' \
# IGNORE 1 LINES;"""
#
# csv_to_mysql(load_sql, mysqlhost, user, password)
# print(time.time() - t1) # 97s
#
# # 检查 csv 中的数目和数据库中查询出的数量是否一致
# query_sql = f"""select count(1) from {mysqldb}.{mysqltable} where time >= '{dt1[:10]}' and time <= '{dt2[:10]}';"""
# mysql_string = f"mysql+pymysql://{user}:{password}@{mysqlhost}:\
# {mysqlport}/{mysqldb}?charset=gbk"
# print(query_sql)
# DATACENTER = create_engine(mysql_string)
# sql_count = DATACENTER.execute(query_sql).first()[0]
# print(sql_count) # 1309440
# 测试删除合并后的文件夹
# shutil.rmtree(folder_path, ignore_errors=True)
pass
| autocommit=True,
local_infile=1)
print('Connected to DB: {}'.format(host))
# Create cursor and execute Load SQL
cursor = con.cursor()
cursor.execute(load_sql)
print('Succuessfully loaded the table from csv.')
con.close()
except Exception as e:
print('Error: {}'.format(str(e)))
sys.exit(1)
def gen_times():
"""
确定拉取的边界时间
一般情况
:return:
"""
dt1 = datetime.datetime.combine(datetime.date.today() - datetime.timedelta(days=1),
datetime.time.min).strftime("%Y-%m-%dT%H:%M:%SZ")
dt2 = datetime.datetime.combine(datetime.date.today(),
| identifier_body |
utils.py | import datetime
import functools
import json
import logging
import os
import shutil
import subprocess
import sys
import time
import pymongo
import pandas as pd
import pymysql
import schedule
from raven import Client
from sqlalchemy import create_engine
from config import MONGOURL, MYSQLHOST, MYSQLUSER, MYSQLPASSWORD, MYSQLPORT, MYSQLDB, MYSQLTABLE, SENTRY_DSN
logger = logging.getLogger("main_log")
db = pymongo.MongoClient(MONGOURL)
coll = db.stock.mins
sentry = Client(SENTRY_DSN)
def all_codes_now():
codes = coll.find().distinct("code")
return codes # 12332 12359 果然每天的 codes 数量不一样
def write_codes_to_file(codes):
"""如果codes不是经常有新增的 查询一次写入文件
下次需要的时候直接从文件中读取要比数据库 distinct 查询要快"""
with open("codes.json", "w") as f:
json.dump(codes, f)
def wirte_code_date_to_file(dt1, dt2, date_int_str):
"""
将指定 code 制定时间内的增量 写入文件
:param dt1:
:param dt2:
:param date_int_str:
:return:
"""
f = open("codes.json", "r")
codes = json.load(f)
f.close()
file_path = os.path.join(os.getcwd(), "exportdir/" + date_int_str)
os.makedirs(file_path, exist_ok=True)
for code in codes:
q = '{{code:"{0}",time: {{$gte:ISODate("{1}"), $lte:ISODate("{2}")}}}}'.format(code, dt1, dt2)
file_name = os.path.join(file_path, code)
command = "mongoexport -d stock -c mins -q '{}' --fieldFile mins_fields.txt --type=csv --out {}.csv".format(q, file_name)
# print(command)
log_file = open("export_log.log", "a+")
subprocess.call(command, shell=True, stderr=log_file)
def merge_csv(folder_path, savefile_path, savefile_name):
"""
将多个 csv 文件合并成一个
:param folder_path:
:param savefile_path:
:param savefile_name:
:return:
"""
# 修改当前工作目录
os.chdir(folder_path)
# 将该文件夹下的所有文件名存入一个列表
file_list = os.listdir()
# 读取第一个CSV文件并包含表头
df = pd.read_csv(os.path.join(folder_path, file_list[0]))
# 创建要保存的文件夹
os.makedirs(savefile_path, exist_ok=True)
# 将读取的第一个CSV文件写入合并后的文件保存
save_file = os.path.join(savefile_path, savefile_name)
df.to_csv(save_file)
# 循环遍历列表中各个CSV文件名,并追加到合并后的文件
count = 0
try:
for i in range(1, len(file_list)):
# print(os.path.join(Folder_Path, file_list[i]))
df = pd.read_csv(os.path.join(folder_path, file_list[i]))
# print(df)
# print(df.shape[0])
count += df.shape[0]
# print()
# print()
df.to_csv(save_file, encoding="utf-8", index=False, header=False, mode='a+')
except Exception:
pass
return count
def csv_to_mysql(load_sql, host, user, password):
"""
This function load a csv file to MySQL table according to
the load_sql statement.
:param load_sql:
:param host:
:param user:
:param password:
:return:
"""
try:
con = pymysql.connect(host=host,
user=user,
password=password,
autocommit=True,
local_infile=1)
print('Connected to DB: {}'.format(host))
# Create cursor and execute Load SQL
cursor = con.cursor()
cursor.execute(load_sql)
print('Succuessfully loaded the table from csv.')
con.close()
except Exception as e:
print('Error: {}'.format(str(e)))
sys.exit(1)
def gen_times():
"""
确定拉取的边界时间
一般情况
:return:
"""
dt1 = datetime.datetime.combine(datetime.date.today() - datetime.timedelta(days=1),
datetime.time.min).strftime("%Y-%m-%dT%H:%M:%SZ")
dt2 = datetime.datetime.combine(datetime.date.today(),
datetime.time.min).strftime("%Y-%m-%dT%H:%M:%SZ")
date_int_str = datetime.datetime.combine(datetime.date.today() - datetime.timedelta(days=1),
datetime.time.min).strftime("%Y%m%d")
return dt1, dt2, date_int_str
def gen_temp_times(start, end):
"""
需要补充数据的特殊情况
end 是先对当前已经过去的时间
:param start:
:param end:
:return:
"""
while start.date() <= end.date():
dt1 = datetime.datetime.combine(start, datetime.time.min).strftime("%Y-%m-%dT%H:%M:%SZ")
dt2 = datetime.datetime.combine(start + datetime.timedelta(days=1),
datetime.time.min).strftime("%Y-%m-%dT%H:%M:%SZ")
date_int_str = datetime.datetime.combine(start, datetime.time.min).strftime("%Y%m%d")
yield dt1, dt2, date_int_str
start += d | # "volume": 0,
# # "amount": 0
# # }
# # 现将 dt1 和 dt2 进行转换
# ret = coll.find({"time": {"$gte": dt1, "$lte": dt2}}).count_documents
# return ret
def gene(dt1, dt2, date_int_str):
"""整个生成逻辑"""
logger.info(f"dt1:{dt1}")
logger.info(f"dt2:{dt2}")
mysqlhost = MYSQLHOST
user = MYSQLUSER
password = MYSQLPASSWORD
mysqlport = MYSQLPORT
mysqldb = MYSQLDB
mysqltable = MYSQLTABLE
export_path = os.path.join(os.getcwd(), "exportdir")
folder_path = os.path.join(export_path, date_int_str)
save_file_path = os.path.join(os.getcwd(), "savedir")
save_file_name = date_int_str + ".csv"
# 生成截止到当前的全种类列表
codes = all_codes_now()
# 将其以重写的方式存入 codes.json 文件
write_codes_to_file(codes)
# 将 codes 读出到内存 同时将每一个code的增量写入文件
wirte_code_date_to_file(dt1, dt2, date_int_str)
# 将 csv 文件进行合并 并且计算被导入的增量数量
count = merge_csv(folder_path, save_file_path, save_file_name)
logger.info(f"由csv文件计算出的当天需要进行增量的数据量为 {count}")
sentry.captureMessage(f"需要进行增量的数据量为 {count}")
# 检查与 mongo 中的增量结果是否一致
# 这个查询也比较耗时 先不检查了
# mongo_count = gen_mongo_count(dt1, dt2)
if count:
# 将合并后的 csv 导入 mysql
save_file = os.path.join(save_file_path, save_file_name)
load_sql = f"""LOAD DATA LOCAL INFILE '{save_file}' \
REPLACE INTO TABLE {mysqldb}.{mysqltable} \
FIELDS TERMINATED BY ',' \
ENCLOSED BY '"' \
IGNORE 1 LINES;"""
csv_to_mysql(load_sql, mysqlhost, user, password)
# 检查 csv 中的数目和数据库中查询出的数量是否一致
query_sql = f"""select count(1) from {mysqldb}.{mysqltable} where time >= {date_int_str}"""
mysql_string = f"mysql+pymysql://{user}:{password}@{mysqlhost}:\
{mysqlport}/{mysqldb}?charset=gbk"
DATACENTER = create_engine(mysql_string)
sql_count = DATACENTER.execute(query_sql).first()[0]
if sql_count != count:
raise RuntimeError("数据量不一致,请检查!")
# 合并后删除原始的 csv 文件
shutil.rmtree(folder_path, ignore_errors=True)
logger.info(f"任务完成 删除当日过程 csv 文件 ")
def catch_exceptions(cancel_on_failure=False):
def catch_exceptions_decorator(job_func):
@functools.wraps(job_func)
def wrapper(*args, **kwargs):
try:
return job_func(*args, **kwargs)
except:
import traceback
logger.warning(traceback.format_exc())
sentry.captureException(exc_info=True)
if cancel_on_failure:
# print(schedule.CancelJob)
# schedule.cancel_job()
return schedule.CancelJob
return wrapper
return catch_exceptions_decorator
@catch_exceptions
def main():
import_date_str = (datetime.datetime.today() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
logger.info(f"现在是 {datetime.datetime.today()}, 开始增量 stock.mins 在 "
f"{import_date_str} 全天的增量数据到 mysql 数据库")
sentry.captureMessage(f"现在是 {datetime.datetime.today()}, 开始增量 stock.mins 在 "
f"{import_date_str} 全天的增量数据到 mysql 数据库")
dt1, dt2, date_int_str = gen_times()
gene(dt1, dt2, date_int_str)
if __name__ == "__main__":
# test gen times
# res = gen_times()
# print(res)
# test gen temp times
# start = datetime.datetime(2019, 7, 8, 12, 34, 56)
# end = datetime.datetime(2019, 7, 18, 12, 34, 56)
# generator = gen_temp_times(start, end)
# for data in generator:
# print(data)
# """
# ('2019-07-08T00:00:00Z', '2019-07-09T00:00:00Z', '20190708')
# ('2019-07-09T00:00:00Z', '2019-07-10T00:00:00Z', '20190709')
# ('2019-07-10T00:00:00Z', '2019-07-11T00:00:00Z', '20190710')
# ('2019-07-11T00:00:00Z', '2019-07-12T00:00:00Z', '20190711')
# ('2019-07-12T00:00:00Z', '2019-07-13T00:00:00Z', '20190712')
# ('2019-07-13T00:00:00Z', '2019-07-14T00:00:00Z', '20190713')
# ('2019-07-14T00:00:00Z', '2019-07-15T00:00:00Z', '20190714')
# ('2019-07-15T00:00:00Z', '2019-07-16T00:00:00Z', '20190715')
# ('2019-07-16T00:00:00Z', '2019-07-17T00:00:00Z', '20190716')
# ('2019-07-17T00:00:00Z', '2019-07-18T00:00:00Z', '20190717')
# ('2019-07-18T00:00:00Z', '2019-07-19T00:00:00Z', '20190718')
# """
# test gen all codes from mongo today
# t1 = time.time()
# all_codes_now()
# print(time.time() - t1) # 61s
# test wirte code to a file
# t1 = time.time()
# now_codes = all_codes_now()
# write_codes_to_file(now_codes)
# print(time.time() - t1) # 55s
# 写入分散的 csv 文件
dt1, dt2, date_int_str = gen_times()
# t1 = time.time()
# wirte_code_date_to_file(dt1, dt2, date_int_str)
# t2 = time.time()
# print((t2 - t1)/60, "min") # 79min
# test gen momngo count
# mongo_count = gen_mongo_count(dt1, dt2)
# print(mongo_count)
# test merge csv
# dt1, dt2, date_int_str = gen_times()
export_path = os.path.join(os.getcwd(), "exportdir")
folder_path = os.path.join(export_path, date_int_str)
# save_file_path = os.path.join(os.getcwd(), "savedir")
# save_file_name = date_int_str + ".csv"
#
# t1 = time.time()
# count = merge_csv(folder_path, save_file_path, save_file_name)
# t2 = time.time()
# print(t2 - t1) # 时间在 1 min 左右
# print(count) # 1309440
# test csv to mysql
# 将合并后的 csv 导入 mysql
# t1 = time.time()
# mysqlhost = MYSQLHOST
# user = MYSQLUSER
# password = MYSQLPASSWORD
# mysqlport = MYSQLPORT
# mysqldb = MYSQLDB
# mysqltable = MYSQLTABLE
# save_file = os.path.join(save_file_path, save_file_name)
# load_sql = f"""LOAD DATA LOCAL INFILE '{save_file}' \
# REPLACE INTO TABLE {mysqldb}.{mysqltable} \
# FIELDS TERMINATED BY ',' \
# ENCLOSED BY '"' \
# IGNORE 1 LINES;"""
#
# csv_to_mysql(load_sql, mysqlhost, user, password)
# print(time.time() - t1) # 97s
#
# # 检查 csv 中的数目和数据库中查询出的数量是否一致
# query_sql = f"""select count(1) from {mysqldb}.{mysqltable} where time >= '{dt1[:10]}' and time <= '{dt2[:10]}';"""
# mysql_string = f"mysql+pymysql://{user}:{password}@{mysqlhost}:\
# {mysqlport}/{mysqldb}?charset=gbk"
# print(query_sql)
# DATACENTER = create_engine(mysql_string)
# sql_count = DATACENTER.execute(query_sql).first()[0]
# print(sql_count) # 1309440
# 测试删除合并后的文件夹
# shutil.rmtree(folder_path, ignore_errors=True)
pass
| atetime.timedelta(days=1)
# def gen_mongo_count(dt1, dt2):
# """
# 计算在dt1 和 dt2之间的增量数量 理论上是一天的增量
# :param dt1:
# :param dt2:
# :return:
# """
# # {
# # "_id": ObjectId("59ce1e1d6e6dc7768c7140dc"),
# # "code": "SH900955",
# # "time": ISODate("1999-07-26T09:59:00Z"),
# # "open": 0.462,
# # "close": 0.462,
# # "low": 0.462,
# # "high": 0.462,
# | conditional_block |
utils.py | import datetime
import functools
import json
import logging
import os
import shutil
import subprocess
import sys
import time
import pymongo
import pandas as pd
import pymysql
import schedule
from raven import Client
from sqlalchemy import create_engine
from config import MONGOURL, MYSQLHOST, MYSQLUSER, MYSQLPASSWORD, MYSQLPORT, MYSQLDB, MYSQLTABLE, SENTRY_DSN
logger = logging.getLogger("main_log")
db = pymongo.MongoClient(MONGOURL)
coll = db.stock.mins
sentry = Client(SENTRY_DSN)
def all_codes_now():
codes = coll.find().distinct("code")
return codes # 12332 12359 果然每天的 codes 数量不一样
def write_codes_to_file(codes):
"""如果codes不是经常有新增的 查询一次写入文件
下次需要的时候直接从文件中读取要比数据库 distinct 查询要快"""
with open("codes.json", "w") as f:
json.dump(codes, f)
def wirte_code_date_to_file(dt1, dt2, date_int_str):
"""
将指定 code 制定时间内的增量 写入文件
:param dt1:
:param dt2:
:param date_int_str:
:return:
"""
f = open("codes.json", "r")
codes = json.load(f)
f.close()
file_path = os.path.join(os.getcwd(), "exportdir/" + date_int_str)
os.makedirs(file_path, exist_ok=True)
for code in codes:
q = '{{code:"{0}",time: {{$gte:ISODate("{1}"), $lte:ISODate("{2}")}}}}'.format(code, dt1, dt2)
file_name = os.path.join(file_path, code)
command = "mongoexport -d stock -c mins -q '{}' --fieldFile mins_fields.txt --type=csv --out {}.csv".format(q, file_name)
# print(command)
log_file = open("export_log.log", "a+")
subprocess.call(command, shell=True, stderr=log_file)
def merge_csv(folder_path, savefile_path, savefile_name):
"""
将多个 csv 文件合并成一个
:param folder_path:
:param savefile_path:
| savefile_name:
:return:
"""
# 修改当前工作目录
os.chdir(folder_path)
# 将该文件夹下的所有文件名存入一个列表
file_list = os.listdir()
# 读取第一个CSV文件并包含表头
df = pd.read_csv(os.path.join(folder_path, file_list[0]))
# 创建要保存的文件夹
os.makedirs(savefile_path, exist_ok=True)
# 将读取的第一个CSV文件写入合并后的文件保存
save_file = os.path.join(savefile_path, savefile_name)
df.to_csv(save_file)
# 循环遍历列表中各个CSV文件名,并追加到合并后的文件
count = 0
try:
for i in range(1, len(file_list)):
# print(os.path.join(Folder_Path, file_list[i]))
df = pd.read_csv(os.path.join(folder_path, file_list[i]))
# print(df)
# print(df.shape[0])
count += df.shape[0]
# print()
# print()
df.to_csv(save_file, encoding="utf-8", index=False, header=False, mode='a+')
except Exception:
pass
return count
def csv_to_mysql(load_sql, host, user, password):
"""
This function load a csv file to MySQL table according to
the load_sql statement.
:param load_sql:
:param host:
:param user:
:param password:
:return:
"""
try:
con = pymysql.connect(host=host,
user=user,
password=password,
autocommit=True,
local_infile=1)
print('Connected to DB: {}'.format(host))
# Create cursor and execute Load SQL
cursor = con.cursor()
cursor.execute(load_sql)
print('Succuessfully loaded the table from csv.')
con.close()
except Exception as e:
print('Error: {}'.format(str(e)))
sys.exit(1)
def gen_times():
"""
确定拉取的边界时间
一般情况
:return:
"""
dt1 = datetime.datetime.combine(datetime.date.today() - datetime.timedelta(days=1),
datetime.time.min).strftime("%Y-%m-%dT%H:%M:%SZ")
dt2 = datetime.datetime.combine(datetime.date.today(),
datetime.time.min).strftime("%Y-%m-%dT%H:%M:%SZ")
date_int_str = datetime.datetime.combine(datetime.date.today() - datetime.timedelta(days=1),
datetime.time.min).strftime("%Y%m%d")
return dt1, dt2, date_int_str
def gen_temp_times(start, end):
"""
需要补充数据的特殊情况
end 是先对当前已经过去的时间
:param start:
:param end:
:return:
"""
while start.date() <= end.date():
dt1 = datetime.datetime.combine(start, datetime.time.min).strftime("%Y-%m-%dT%H:%M:%SZ")
dt2 = datetime.datetime.combine(start + datetime.timedelta(days=1),
datetime.time.min).strftime("%Y-%m-%dT%H:%M:%SZ")
date_int_str = datetime.datetime.combine(start, datetime.time.min).strftime("%Y%m%d")
yield dt1, dt2, date_int_str
start += datetime.timedelta(days=1)
# def gen_mongo_count(dt1, dt2):
# """
# 计算在dt1 和 dt2之间的增量数量 理论上是一天的增量
# :param dt1:
# :param dt2:
# :return:
# """
# # {
# # "_id": ObjectId("59ce1e1d6e6dc7768c7140dc"),
# # "code": "SH900955",
# # "time": ISODate("1999-07-26T09:59:00Z"),
# # "open": 0.462,
# # "close": 0.462,
# # "low": 0.462,
# # "high": 0.462,
# # "volume": 0,
# # "amount": 0
# # }
# # 现将 dt1 和 dt2 进行转换
# ret = coll.find({"time": {"$gte": dt1, "$lte": dt2}}).count_documents
# return ret
def gene(dt1, dt2, date_int_str):
"""整个生成逻辑"""
logger.info(f"dt1:{dt1}")
logger.info(f"dt2:{dt2}")
mysqlhost = MYSQLHOST
user = MYSQLUSER
password = MYSQLPASSWORD
mysqlport = MYSQLPORT
mysqldb = MYSQLDB
mysqltable = MYSQLTABLE
export_path = os.path.join(os.getcwd(), "exportdir")
folder_path = os.path.join(export_path, date_int_str)
save_file_path = os.path.join(os.getcwd(), "savedir")
save_file_name = date_int_str + ".csv"
# 生成截止到当前的全种类列表
codes = all_codes_now()
# 将其以重写的方式存入 codes.json 文件
write_codes_to_file(codes)
# 将 codes 读出到内存 同时将每一个code的增量写入文件
wirte_code_date_to_file(dt1, dt2, date_int_str)
# 将 csv 文件进行合并 并且计算被导入的增量数量
count = merge_csv(folder_path, save_file_path, save_file_name)
logger.info(f"由csv文件计算出的当天需要进行增量的数据量为 {count}")
sentry.captureMessage(f"需要进行增量的数据量为 {count}")
# 检查与 mongo 中的增量结果是否一致
# 这个查询也比较耗时 先不检查了
# mongo_count = gen_mongo_count(dt1, dt2)
if count:
# 将合并后的 csv 导入 mysql
save_file = os.path.join(save_file_path, save_file_name)
load_sql = f"""LOAD DATA LOCAL INFILE '{save_file}' \
REPLACE INTO TABLE {mysqldb}.{mysqltable} \
FIELDS TERMINATED BY ',' \
ENCLOSED BY '"' \
IGNORE 1 LINES;"""
csv_to_mysql(load_sql, mysqlhost, user, password)
# 检查 csv 中的数目和数据库中查询出的数量是否一致
query_sql = f"""select count(1) from {mysqldb}.{mysqltable} where time >= {date_int_str}"""
mysql_string = f"mysql+pymysql://{user}:{password}@{mysqlhost}:\
{mysqlport}/{mysqldb}?charset=gbk"
DATACENTER = create_engine(mysql_string)
sql_count = DATACENTER.execute(query_sql).first()[0]
if sql_count != count:
raise RuntimeError("数据量不一致,请检查!")
# 合并后删除原始的 csv 文件
shutil.rmtree(folder_path, ignore_errors=True)
logger.info(f"任务完成 删除当日过程 csv 文件 ")
def catch_exceptions(cancel_on_failure=False):
def catch_exceptions_decorator(job_func):
@functools.wraps(job_func)
def wrapper(*args, **kwargs):
try:
return job_func(*args, **kwargs)
except:
import traceback
logger.warning(traceback.format_exc())
sentry.captureException(exc_info=True)
if cancel_on_failure:
# print(schedule.CancelJob)
# schedule.cancel_job()
return schedule.CancelJob
return wrapper
return catch_exceptions_decorator
@catch_exceptions
def main():
import_date_str = (datetime.datetime.today() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
logger.info(f"现在是 {datetime.datetime.today()}, 开始增量 stock.mins 在 "
f"{import_date_str} 全天的增量数据到 mysql 数据库")
sentry.captureMessage(f"现在是 {datetime.datetime.today()}, 开始增量 stock.mins 在 "
f"{import_date_str} 全天的增量数据到 mysql 数据库")
dt1, dt2, date_int_str = gen_times()
gene(dt1, dt2, date_int_str)
if __name__ == "__main__":
# test gen times
# res = gen_times()
# print(res)
# test gen temp times
# start = datetime.datetime(2019, 7, 8, 12, 34, 56)
# end = datetime.datetime(2019, 7, 18, 12, 34, 56)
# generator = gen_temp_times(start, end)
# for data in generator:
# print(data)
# """
# ('2019-07-08T00:00:00Z', '2019-07-09T00:00:00Z', '20190708')
# ('2019-07-09T00:00:00Z', '2019-07-10T00:00:00Z', '20190709')
# ('2019-07-10T00:00:00Z', '2019-07-11T00:00:00Z', '20190710')
# ('2019-07-11T00:00:00Z', '2019-07-12T00:00:00Z', '20190711')
# ('2019-07-12T00:00:00Z', '2019-07-13T00:00:00Z', '20190712')
# ('2019-07-13T00:00:00Z', '2019-07-14T00:00:00Z', '20190713')
# ('2019-07-14T00:00:00Z', '2019-07-15T00:00:00Z', '20190714')
# ('2019-07-15T00:00:00Z', '2019-07-16T00:00:00Z', '20190715')
# ('2019-07-16T00:00:00Z', '2019-07-17T00:00:00Z', '20190716')
# ('2019-07-17T00:00:00Z', '2019-07-18T00:00:00Z', '20190717')
# ('2019-07-18T00:00:00Z', '2019-07-19T00:00:00Z', '20190718')
# """
# test gen all codes from mongo today
# t1 = time.time()
# all_codes_now()
# print(time.time() - t1) # 61s
# test wirte code to a file
# t1 = time.time()
# now_codes = all_codes_now()
# write_codes_to_file(now_codes)
# print(time.time() - t1) # 55s
# 写入分散的 csv 文件
dt1, dt2, date_int_str = gen_times()
# t1 = time.time()
# wirte_code_date_to_file(dt1, dt2, date_int_str)
# t2 = time.time()
# print((t2 - t1)/60, "min") # 79min
# test gen momngo count
# mongo_count = gen_mongo_count(dt1, dt2)
# print(mongo_count)
# test merge csv
# dt1, dt2, date_int_str = gen_times()
export_path = os.path.join(os.getcwd(), "exportdir")
folder_path = os.path.join(export_path, date_int_str)
# save_file_path = os.path.join(os.getcwd(), "savedir")
# save_file_name = date_int_str + ".csv"
#
# t1 = time.time()
# count = merge_csv(folder_path, save_file_path, save_file_name)
# t2 = time.time()
# print(t2 - t1) # 时间在 1 min 左右
# print(count) # 1309440
# test csv to mysql
# 将合并后的 csv 导入 mysql
# t1 = time.time()
# mysqlhost = MYSQLHOST
# user = MYSQLUSER
# password = MYSQLPASSWORD
# mysqlport = MYSQLPORT
# mysqldb = MYSQLDB
# mysqltable = MYSQLTABLE
# save_file = os.path.join(save_file_path, save_file_name)
# load_sql = f"""LOAD DATA LOCAL INFILE '{save_file}' \
# REPLACE INTO TABLE {mysqldb}.{mysqltable} \
# FIELDS TERMINATED BY ',' \
# ENCLOSED BY '"' \
# IGNORE 1 LINES;"""
#
# csv_to_mysql(load_sql, mysqlhost, user, password)
# print(time.time() - t1) # 97s
#
# # 检查 csv 中的数目和数据库中查询出的数量是否一致
# query_sql = f"""select count(1) from {mysqldb}.{mysqltable} where time >= '{dt1[:10]}' and time <= '{dt2[:10]}';"""
# mysql_string = f"mysql+pymysql://{user}:{password}@{mysqlhost}:\
# {mysqlport}/{mysqldb}?charset=gbk"
# print(query_sql)
# DATACENTER = create_engine(mysql_string)
# sql_count = DATACENTER.execute(query_sql).first()[0]
# print(sql_count) # 1309440
# 测试删除合并后的文件夹
# shutil.rmtree(folder_path, ignore_errors=True)
pass
| :param | identifier_name |
vga_buffer.rs | use core::fmt;
use volatile::Volatile;
use spin::Mutex;
#[allow(dead_code)] // prevents compiler warnings that some enumerations are never used
#[derive(Debug, Clone, Copy, PartialEq, Eq)] // enables copy semantics for the type: makes printable & comparable
#[repr(u8)] // makes each enum variant be stored as a u8
pub enum Color {
Black = 0,
Blue = 1,
Green = 2,
Cyan = 3,
Red = 4,
Magenta = 5,
Brown = 6,
LightGray = 7,
DarkGray = 8,
LightBlue = 9, | LightRed = 12,
Pink = 13,
Yellow = 14,
White = 15,
}
// used to represent a full VGA color code (foreground & background)
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct ColorCode(u8); // creates a type which is essentially an alias for a single byte
impl ColorCode {
// creates a single byte detailing the fore and background colors (based on VGA specifications)
fn new(foreground: Color, background: Color) -> ColorCode {
ColorCode((background as u8) << 4 | (foreground as u8))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
// ensures struct's field laid out exactly like a C struct since VGA depends on the order of the two bytes
#[repr(C)]
struct ScreenChar {
ascii_character: u8, // VGA byte representing ascii char
color_code: ColorCode, // VGA byte representing char's color
}
// VGA typical buffer sizes
const BUFFER_HEIGHT: usize = 25; // number of lines
const BUFFER_WIDTH: usize = 80; // number of chars in line
struct Buffer {
// Volatile crate keeps rust compiler from optimizing and removing writes
// since writes are never read and are going to the VGA buffer memory (a side-effect)
// and not just writing to RAM
chars: [[Volatile<ScreenChar>; BUFFER_WIDTH]; BUFFER_HEIGHT],
}
// To actually write to screen: always writes to last line & shift lines up when a line is full (or on \n)
pub struct Writer {
column_position: usize, // keeps track of current position in last row
color_code: ColorCode, // current fore & background colors
buffer: &'static mut Buffer, // reference to VGA buffer: 'static lifetime specifies reference is valid for whole program run time (VGA buffer)
}
impl Writer {
// writes a single byte to the screen at current location
pub fn write_byte(&mut self, byte: u8) {
match byte {
b'\n' => self.new_line(),
byte => {
if self.column_position >= BUFFER_WIDTH {
self.new_line();
}
let row = BUFFER_HEIGHT - 1;
let col = self.column_position;
let color_code = self.color_code;
self.buffer.chars[row][col].write(ScreenChar {
ascii_character: byte,
color_code: color_code,
});
self.column_position += 1;
}
}
}
// accepts a string to be written only writing valid ascii chars
pub fn write_string(&mut self, s: &str) {
for byte in s.bytes() {
match byte {
// printable ASCII byte or newline
0x20...0x7e | b'\n' => self.write_byte(byte),
// not part of printable ASCII range
_ => self.write_byte(0xfe),
}
}
}
fn new_line(&mut self) {
// range notation is exclusive of upper end.
// top line of screen is 0 and is shifted off screen
for row in 1..BUFFER_HEIGHT {
for col in 0..BUFFER_WIDTH {
let char = self.buffer.chars[row][col].read();
self.buffer.chars[row-1][col].write(char);
}
}
// clears last line of output for new input, otherwise if string being written
// is not long enough all previous characters will not be overwritten
self.clear_row(BUFFER_HEIGHT - 1);
self.column_position = 0;
}
// clears row by overwriting characters with spaces
fn clear_row(&mut self, row: usize) {
let blank = ScreenChar {
ascii_character: b' ',
color_code: self.color_code,
};
for col in 0..BUFFER_WIDTH {
self.buffer.chars[row][col].write(blank);
}
}
}
// Provides support for Rust's formatting macros allowing easy printing
// of different types like integers or floats.
// Results in: Write! / Writeln! macro support
impl fmt::Write for Writer {
// The only required method of the fmt::Write trait
fn write_str(&mut self, s: &str) -> fmt::Result {
self.write_string(s);
Ok(())
}
}
// Provides a static Writer object which utilizes non-const functions
// Requires locking to provide interior mutability: since it utilizes &mut self for writing
// it requires mutability, but its mutibility is not provided to users, therefore it is interior
// mutability. The Mutex allows safe usage internally.
lazy_static! {
pub static ref WRITER: Mutex<Writer> = Mutex::new(Writer {
column_position: 0,
color_code: ColorCode::new(Color::Yellow, Color::Black),
// provides a direct mutable reference to the VGA memory-mapped I/O address
// allowing reading and writing. We deem this safe as this address always corresponds to
// VGA, and therefore it is acceptable and required to wrap in an unsafe block
buffer: unsafe { &mut *(0xb8000 as *mut Buffer) },
});
}
// Defines the print! macro
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ($crate::vga_buffer::print(format_args!($($arg)*)));
}
// Defines the println! macro
#[macro_export]
macro_rules! println {
() => (print!("\n"));
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
pub fn print(args: fmt::Arguments) {
use core::fmt::Write; // imports write_fmt method from the Write trait
WRITER.lock().write_fmt(args).unwrap();
}
#[cfg(test)]
mod test {
use super::*; // import all items of parent module: vga_buffer
// Specifies what char represents an empty cell in VGA buffer during testing
fn empty_char() -> ScreenChar {
ScreenChar {
ascii_character: b' ',
color_code: ColorCode::new(Color::Green, Color::Brown),
}
}
fn construct_buffer() -> Buffer {
// bypasses array construction requiring that contained type is Copy
// ScreenChar satisfies this, but the Volatile wrapper does not
use array_init::array_init;
Buffer {
// Provides array initialization without non-Copy types.
// parameter of array_init is a closure. The single parameter to the closure is unused and therefore unimportant
// otherwise it could be used to perform calculations on value before creating the array.
// array_init utilizes type's size to create the required number of indices. In this case
// the number of columns and rows are defined in the Buffer struct
// "The width & height are deduced by type inference"
chars: array_init(|_| array_init(|_| Volatile::new(empty_char()))),
}
}
fn construct_writer() -> Writer {
use std::boxed::Box;
let buffer = construct_buffer();
Writer {
column_position: 0,
color_code: ColorCode::new(Color::Blue, Color::Magenta),
// transforms the created buffer into a &'static mut to satisfy buffer property's type
buffer: Box::leak(Box::new(buffer)),
}
}
#[test] // tells test framework this is a test function
fn write_byte() {
let mut writer = construct_writer();
writer.write_byte(b'X');
writer.write_byte(b'Y');
for (i, row) in writer.buffer.chars.iter().enumerate() {
for (j, screen_char) in row.iter().enumerate() {
let screen_char = screen_char.read();
if i == BUFFER_HEIGHT - 1 && j == 0 {
assert_eq!(screen_char.ascii_character, b'X');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 1 && j == 1 {
assert_eq!(screen_char.ascii_character, b'Y');
assert_eq!(screen_char.color_code, writer.color_code);
} else {
assert_eq!(screen_char, empty_char());
}
}
}
}
#[test]
fn write_formatted() {
use core::fmt::Write;
let mut writer = construct_writer();
writeln!(&mut writer, "a").unwrap();
writeln!(&mut writer, "b{}", "c").unwrap();
for (i, row) in writer.buffer.chars.iter().enumerate() {
for (j, screen_char) in row.iter().enumerate() {
let screen_char = screen_char.read();
if i == BUFFER_HEIGHT - 3 && j == 0 {
assert_eq!(screen_char.ascii_character, b'a');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 2 && j == 0 {
assert_eq!(screen_char.ascii_character, b'b');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 2 && j == 1 {
assert_eq!(screen_char.ascii_character, b'c');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i >= BUFFER_HEIGHT - 2 { // ensures empty lines are shifted in on a new line and have correct color code
assert_eq!(screen_char.ascii_character, b' ');
assert_eq!(screen_char.color_code, writer.color_code);
} else {
assert_eq!(screen_char, empty_char());
}
}
}
}
} | LightGreen = 10,
LightCyan = 11, | random_line_split |
vga_buffer.rs | use core::fmt;
use volatile::Volatile;
use spin::Mutex;
#[allow(dead_code)] // prevents compiler warnings that some enumerations are never used
#[derive(Debug, Clone, Copy, PartialEq, Eq)] // enables copy semantics for the type: makes printable & comparable
#[repr(u8)] // makes each enum variant be stored as a u8
pub enum Color {
Black = 0,
Blue = 1,
Green = 2,
Cyan = 3,
Red = 4,
Magenta = 5,
Brown = 6,
LightGray = 7,
DarkGray = 8,
LightBlue = 9,
LightGreen = 10,
LightCyan = 11,
LightRed = 12,
Pink = 13,
Yellow = 14,
White = 15,
}
// used to represent a full VGA color code (foreground & background)
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct ColorCode(u8); // creates a type which is essentially an alias for a single byte
impl ColorCode {
// creates a single byte detailing the fore and background colors (based on VGA specifications)
fn new(foreground: Color, background: Color) -> ColorCode {
ColorCode((background as u8) << 4 | (foreground as u8))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
// ensures struct's field laid out exactly like a C struct since VGA depends on the order of the two bytes
#[repr(C)]
struct ScreenChar {
ascii_character: u8, // VGA byte representing ascii char
color_code: ColorCode, // VGA byte representing char's color
}
// VGA typical buffer sizes
const BUFFER_HEIGHT: usize = 25; // number of lines
const BUFFER_WIDTH: usize = 80; // number of chars in line
struct Buffer {
// Volatile crate keeps rust compiler from optimizing and removing writes
// since writes are never read and are going to the VGA buffer memory (a side-effect)
// and not just writing to RAM
chars: [[Volatile<ScreenChar>; BUFFER_WIDTH]; BUFFER_HEIGHT],
}
// To actually write to screen: always writes to last line & shift lines up when a line is full (or on \n)
pub struct Writer {
column_position: usize, // keeps track of current position in last row
color_code: ColorCode, // current fore & background colors
buffer: &'static mut Buffer, // reference to VGA buffer: 'static lifetime specifies reference is valid for whole program run time (VGA buffer)
}
impl Writer {
// writes a single byte to the screen at current location
pub fn write_byte(&mut self, byte: u8) {
match byte {
b'\n' => self.new_line(),
byte => {
if self.column_position >= BUFFER_WIDTH {
self.new_line();
}
let row = BUFFER_HEIGHT - 1;
let col = self.column_position;
let color_code = self.color_code;
self.buffer.chars[row][col].write(ScreenChar {
ascii_character: byte,
color_code: color_code,
});
self.column_position += 1;
}
}
}
// accepts a string to be written only writing valid ascii chars
pub fn write_string(&mut self, s: &str) {
for byte in s.bytes() {
match byte {
// printable ASCII byte or newline
0x20...0x7e | b'\n' => self.write_byte(byte),
// not part of printable ASCII range
_ => self.write_byte(0xfe),
}
}
}
fn new_line(&mut self) {
// range notation is exclusive of upper end.
// top line of screen is 0 and is shifted off screen
for row in 1..BUFFER_HEIGHT {
for col in 0..BUFFER_WIDTH {
let char = self.buffer.chars[row][col].read();
self.buffer.chars[row-1][col].write(char);
}
}
// clears last line of output for new input, otherwise if string being written
// is not long enough all previous characters will not be overwritten
self.clear_row(BUFFER_HEIGHT - 1);
self.column_position = 0;
}
// clears row by overwriting characters with spaces
fn clear_row(&mut self, row: usize) {
let blank = ScreenChar {
ascii_character: b' ',
color_code: self.color_code,
};
for col in 0..BUFFER_WIDTH {
self.buffer.chars[row][col].write(blank);
}
}
}
// Provides support for Rust's formatting macros allowing easy printing
// of different types like integers or floats.
// Results in: Write! / Writeln! macro support
impl fmt::Write for Writer {
// The only required method of the fmt::Write trait
fn write_str(&mut self, s: &str) -> fmt::Result {
self.write_string(s);
Ok(())
}
}
// Provides a static Writer object which utilizes non-const functions
// Requires locking to provide interior mutability: since it utilizes &mut self for writing
// it requires mutability, but its mutibility is not provided to users, therefore it is interior
// mutability. The Mutex allows safe usage internally.
lazy_static! {
pub static ref WRITER: Mutex<Writer> = Mutex::new(Writer {
column_position: 0,
color_code: ColorCode::new(Color::Yellow, Color::Black),
// provides a direct mutable reference to the VGA memory-mapped I/O address
// allowing reading and writing. We deem this safe as this address always corresponds to
// VGA, and therefore it is acceptable and required to wrap in an unsafe block
buffer: unsafe { &mut *(0xb8000 as *mut Buffer) },
});
}
// Defines the print! macro
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ($crate::vga_buffer::print(format_args!($($arg)*)));
}
// Defines the println! macro
#[macro_export]
macro_rules! println {
() => (print!("\n"));
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
pub fn print(args: fmt::Arguments) {
use core::fmt::Write; // imports write_fmt method from the Write trait
WRITER.lock().write_fmt(args).unwrap();
}
#[cfg(test)]
mod test {
use super::*; // import all items of parent module: vga_buffer
// Specifies what char represents an empty cell in VGA buffer during testing
fn empty_char() -> ScreenChar {
ScreenChar {
ascii_character: b' ',
color_code: ColorCode::new(Color::Green, Color::Brown),
}
}
fn construct_buffer() -> Buffer {
// bypasses array construction requiring that contained type is Copy
// ScreenChar satisfies this, but the Volatile wrapper does not
use array_init::array_init;
Buffer {
// Provides array initialization without non-Copy types.
// parameter of array_init is a closure. The single parameter to the closure is unused and therefore unimportant
// otherwise it could be used to perform calculations on value before creating the array.
// array_init utilizes type's size to create the required number of indices. In this case
// the number of columns and rows are defined in the Buffer struct
// "The width & height are deduced by type inference"
chars: array_init(|_| array_init(|_| Volatile::new(empty_char()))),
}
}
fn construct_writer() -> Writer {
use std::boxed::Box;
let buffer = construct_buffer();
Writer {
column_position: 0,
color_code: ColorCode::new(Color::Blue, Color::Magenta),
// transforms the created buffer into a &'static mut to satisfy buffer property's type
buffer: Box::leak(Box::new(buffer)),
}
}
#[test] // tells test framework this is a test function
fn write_byte() {
let mut writer = construct_writer();
writer.write_byte(b'X');
writer.write_byte(b'Y');
for (i, row) in writer.buffer.chars.iter().enumerate() {
for (j, screen_char) in row.iter().enumerate() {
let screen_char = screen_char.read();
if i == BUFFER_HEIGHT - 1 && j == 0 {
assert_eq!(screen_char.ascii_character, b'X');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 1 && j == 1 {
assert_eq!(screen_char.ascii_character, b'Y');
assert_eq!(screen_char.color_code, writer.color_code);
} else {
assert_eq!(screen_char, empty_char());
}
}
}
}
#[test]
fn write_formatted() {
use core::fmt::Write;
let mut writer = construct_writer();
writeln!(&mut writer, "a").unwrap();
writeln!(&mut writer, "b{}", "c").unwrap();
for (i, row) in writer.buffer.chars.iter().enumerate() {
for (j, screen_char) in row.iter().enumerate() {
let screen_char = screen_char.read();
if i == BUFFER_HEIGHT - 3 && j == 0 {
assert_eq!(screen_char.ascii_character, b'a');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 2 && j == 0 {
assert_eq!(screen_char.ascii_character, b'b');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 2 && j == 1 {
assert_eq!(screen_char.ascii_character, b'c');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i >= BUFFER_HEIGHT - 2 | else {
assert_eq!(screen_char, empty_char());
}
}
}
}
}
| { // ensures empty lines are shifted in on a new line and have correct color code
assert_eq!(screen_char.ascii_character, b' ');
assert_eq!(screen_char.color_code, writer.color_code);
} | conditional_block |
vga_buffer.rs | use core::fmt;
use volatile::Volatile;
use spin::Mutex;
#[allow(dead_code)] // prevents compiler warnings that some enumerations are never used
#[derive(Debug, Clone, Copy, PartialEq, Eq)] // enables copy semantics for the type: makes printable & comparable
#[repr(u8)] // makes each enum variant be stored as a u8
pub enum | {
Black = 0,
Blue = 1,
Green = 2,
Cyan = 3,
Red = 4,
Magenta = 5,
Brown = 6,
LightGray = 7,
DarkGray = 8,
LightBlue = 9,
LightGreen = 10,
LightCyan = 11,
LightRed = 12,
Pink = 13,
Yellow = 14,
White = 15,
}
// used to represent a full VGA color code (foreground & background)
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct ColorCode(u8); // creates a type which is essentially an alias for a single byte
impl ColorCode {
// creates a single byte detailing the fore and background colors (based on VGA specifications)
fn new(foreground: Color, background: Color) -> ColorCode {
ColorCode((background as u8) << 4 | (foreground as u8))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
// ensures struct's field laid out exactly like a C struct since VGA depends on the order of the two bytes
#[repr(C)]
struct ScreenChar {
ascii_character: u8, // VGA byte representing ascii char
color_code: ColorCode, // VGA byte representing char's color
}
// VGA typical buffer sizes
const BUFFER_HEIGHT: usize = 25; // number of lines
const BUFFER_WIDTH: usize = 80; // number of chars in line
struct Buffer {
// Volatile crate keeps rust compiler from optimizing and removing writes
// since writes are never read and are going to the VGA buffer memory (a side-effect)
// and not just writing to RAM
chars: [[Volatile<ScreenChar>; BUFFER_WIDTH]; BUFFER_HEIGHT],
}
// To actually write to screen: always writes to last line & shift lines up when a line is full (or on \n)
pub struct Writer {
column_position: usize, // keeps track of current position in last row
color_code: ColorCode, // current fore & background colors
buffer: &'static mut Buffer, // reference to VGA buffer: 'static lifetime specifies reference is valid for whole program run time (VGA buffer)
}
impl Writer {
// writes a single byte to the screen at current location
pub fn write_byte(&mut self, byte: u8) {
match byte {
b'\n' => self.new_line(),
byte => {
if self.column_position >= BUFFER_WIDTH {
self.new_line();
}
let row = BUFFER_HEIGHT - 1;
let col = self.column_position;
let color_code = self.color_code;
self.buffer.chars[row][col].write(ScreenChar {
ascii_character: byte,
color_code: color_code,
});
self.column_position += 1;
}
}
}
// accepts a string to be written only writing valid ascii chars
pub fn write_string(&mut self, s: &str) {
for byte in s.bytes() {
match byte {
// printable ASCII byte or newline
0x20...0x7e | b'\n' => self.write_byte(byte),
// not part of printable ASCII range
_ => self.write_byte(0xfe),
}
}
}
fn new_line(&mut self) {
// range notation is exclusive of upper end.
// top line of screen is 0 and is shifted off screen
for row in 1..BUFFER_HEIGHT {
for col in 0..BUFFER_WIDTH {
let char = self.buffer.chars[row][col].read();
self.buffer.chars[row-1][col].write(char);
}
}
// clears last line of output for new input, otherwise if string being written
// is not long enough all previous characters will not be overwritten
self.clear_row(BUFFER_HEIGHT - 1);
self.column_position = 0;
}
// clears row by overwriting characters with spaces
fn clear_row(&mut self, row: usize) {
let blank = ScreenChar {
ascii_character: b' ',
color_code: self.color_code,
};
for col in 0..BUFFER_WIDTH {
self.buffer.chars[row][col].write(blank);
}
}
}
// Provides support for Rust's formatting macros allowing easy printing
// of different types like integers or floats.
// Results in: Write! / Writeln! macro support
impl fmt::Write for Writer {
// The only required method of the fmt::Write trait
fn write_str(&mut self, s: &str) -> fmt::Result {
self.write_string(s);
Ok(())
}
}
// Provides a static Writer object which utilizes non-const functions
// Requires locking to provide interior mutability: since it utilizes &mut self for writing
// it requires mutability, but its mutibility is not provided to users, therefore it is interior
// mutability. The Mutex allows safe usage internally.
lazy_static! {
pub static ref WRITER: Mutex<Writer> = Mutex::new(Writer {
column_position: 0,
color_code: ColorCode::new(Color::Yellow, Color::Black),
// provides a direct mutable reference to the VGA memory-mapped I/O address
// allowing reading and writing. We deem this safe as this address always corresponds to
// VGA, and therefore it is acceptable and required to wrap in an unsafe block
buffer: unsafe { &mut *(0xb8000 as *mut Buffer) },
});
}
// Defines the print! macro
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ($crate::vga_buffer::print(format_args!($($arg)*)));
}
// Defines the println! macro
#[macro_export]
macro_rules! println {
() => (print!("\n"));
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
pub fn print(args: fmt::Arguments) {
use core::fmt::Write; // imports write_fmt method from the Write trait
WRITER.lock().write_fmt(args).unwrap();
}
#[cfg(test)]
mod test {
use super::*; // import all items of parent module: vga_buffer
// Specifies what char represents an empty cell in VGA buffer during testing
fn empty_char() -> ScreenChar {
ScreenChar {
ascii_character: b' ',
color_code: ColorCode::new(Color::Green, Color::Brown),
}
}
fn construct_buffer() -> Buffer {
// bypasses array construction requiring that contained type is Copy
// ScreenChar satisfies this, but the Volatile wrapper does not
use array_init::array_init;
Buffer {
// Provides array initialization without non-Copy types.
// parameter of array_init is a closure. The single parameter to the closure is unused and therefore unimportant
// otherwise it could be used to perform calculations on value before creating the array.
// array_init utilizes type's size to create the required number of indices. In this case
// the number of columns and rows are defined in the Buffer struct
// "The width & height are deduced by type inference"
chars: array_init(|_| array_init(|_| Volatile::new(empty_char()))),
}
}
fn construct_writer() -> Writer {
use std::boxed::Box;
let buffer = construct_buffer();
Writer {
column_position: 0,
color_code: ColorCode::new(Color::Blue, Color::Magenta),
// transforms the created buffer into a &'static mut to satisfy buffer property's type
buffer: Box::leak(Box::new(buffer)),
}
}
#[test] // tells test framework this is a test function
fn write_byte() {
let mut writer = construct_writer();
writer.write_byte(b'X');
writer.write_byte(b'Y');
for (i, row) in writer.buffer.chars.iter().enumerate() {
for (j, screen_char) in row.iter().enumerate() {
let screen_char = screen_char.read();
if i == BUFFER_HEIGHT - 1 && j == 0 {
assert_eq!(screen_char.ascii_character, b'X');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 1 && j == 1 {
assert_eq!(screen_char.ascii_character, b'Y');
assert_eq!(screen_char.color_code, writer.color_code);
} else {
assert_eq!(screen_char, empty_char());
}
}
}
}
#[test]
fn write_formatted() {
use core::fmt::Write;
let mut writer = construct_writer();
writeln!(&mut writer, "a").unwrap();
writeln!(&mut writer, "b{}", "c").unwrap();
for (i, row) in writer.buffer.chars.iter().enumerate() {
for (j, screen_char) in row.iter().enumerate() {
let screen_char = screen_char.read();
if i == BUFFER_HEIGHT - 3 && j == 0 {
assert_eq!(screen_char.ascii_character, b'a');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 2 && j == 0 {
assert_eq!(screen_char.ascii_character, b'b');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 2 && j == 1 {
assert_eq!(screen_char.ascii_character, b'c');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i >= BUFFER_HEIGHT - 2 { // ensures empty lines are shifted in on a new line and have correct color code
assert_eq!(screen_char.ascii_character, b' ');
assert_eq!(screen_char.color_code, writer.color_code);
} else {
assert_eq!(screen_char, empty_char());
}
}
}
}
}
| Color | identifier_name |
vga_buffer.rs | use core::fmt;
use volatile::Volatile;
use spin::Mutex;
#[allow(dead_code)] // prevents compiler warnings that some enumerations are never used
#[derive(Debug, Clone, Copy, PartialEq, Eq)] // enables copy semantics for the type: makes printable & comparable
#[repr(u8)] // makes each enum variant be stored as a u8
pub enum Color {
Black = 0,
Blue = 1,
Green = 2,
Cyan = 3,
Red = 4,
Magenta = 5,
Brown = 6,
LightGray = 7,
DarkGray = 8,
LightBlue = 9,
LightGreen = 10,
LightCyan = 11,
LightRed = 12,
Pink = 13,
Yellow = 14,
White = 15,
}
// used to represent a full VGA color code (foreground & background)
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct ColorCode(u8); // creates a type which is essentially an alias for a single byte
impl ColorCode {
// creates a single byte detailing the fore and background colors (based on VGA specifications)
fn new(foreground: Color, background: Color) -> ColorCode {
ColorCode((background as u8) << 4 | (foreground as u8))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
// ensures struct's field laid out exactly like a C struct since VGA depends on the order of the two bytes
#[repr(C)]
struct ScreenChar {
ascii_character: u8, // VGA byte representing ascii char
color_code: ColorCode, // VGA byte representing char's color
}
// VGA typical buffer sizes
const BUFFER_HEIGHT: usize = 25; // number of lines
const BUFFER_WIDTH: usize = 80; // number of chars in line
struct Buffer {
// Volatile crate keeps rust compiler from optimizing and removing writes
// since writes are never read and are going to the VGA buffer memory (a side-effect)
// and not just writing to RAM
chars: [[Volatile<ScreenChar>; BUFFER_WIDTH]; BUFFER_HEIGHT],
}
// To actually write to screen: always writes to last line & shift lines up when a line is full (or on \n)
pub struct Writer {
column_position: usize, // keeps track of current position in last row
color_code: ColorCode, // current fore & background colors
buffer: &'static mut Buffer, // reference to VGA buffer: 'static lifetime specifies reference is valid for whole program run time (VGA buffer)
}
impl Writer {
// writes a single byte to the screen at current location
pub fn write_byte(&mut self, byte: u8) {
match byte {
b'\n' => self.new_line(),
byte => {
if self.column_position >= BUFFER_WIDTH {
self.new_line();
}
let row = BUFFER_HEIGHT - 1;
let col = self.column_position;
let color_code = self.color_code;
self.buffer.chars[row][col].write(ScreenChar {
ascii_character: byte,
color_code: color_code,
});
self.column_position += 1;
}
}
}
// accepts a string to be written only writing valid ascii chars
pub fn write_string(&mut self, s: &str) {
for byte in s.bytes() {
match byte {
// printable ASCII byte or newline
0x20...0x7e | b'\n' => self.write_byte(byte),
// not part of printable ASCII range
_ => self.write_byte(0xfe),
}
}
}
fn new_line(&mut self) {
// range notation is exclusive of upper end.
// top line of screen is 0 and is shifted off screen
for row in 1..BUFFER_HEIGHT {
for col in 0..BUFFER_WIDTH {
let char = self.buffer.chars[row][col].read();
self.buffer.chars[row-1][col].write(char);
}
}
// clears last line of output for new input, otherwise if string being written
// is not long enough all previous characters will not be overwritten
self.clear_row(BUFFER_HEIGHT - 1);
self.column_position = 0;
}
// clears row by overwriting characters with spaces
fn clear_row(&mut self, row: usize) {
let blank = ScreenChar {
ascii_character: b' ',
color_code: self.color_code,
};
for col in 0..BUFFER_WIDTH {
self.buffer.chars[row][col].write(blank);
}
}
}
// Provides support for Rust's formatting macros allowing easy printing
// of different types like integers or floats.
// Results in: Write! / Writeln! macro support
impl fmt::Write for Writer {
// The only required method of the fmt::Write trait
fn write_str(&mut self, s: &str) -> fmt::Result |
}
// Provides a static Writer object which utilizes non-const functions
// Requires locking to provide interior mutability: since it utilizes &mut self for writing
// it requires mutability, but its mutibility is not provided to users, therefore it is interior
// mutability. The Mutex allows safe usage internally.
lazy_static! {
pub static ref WRITER: Mutex<Writer> = Mutex::new(Writer {
column_position: 0,
color_code: ColorCode::new(Color::Yellow, Color::Black),
// provides a direct mutable reference to the VGA memory-mapped I/O address
// allowing reading and writing. We deem this safe as this address always corresponds to
// VGA, and therefore it is acceptable and required to wrap in an unsafe block
buffer: unsafe { &mut *(0xb8000 as *mut Buffer) },
});
}
// Defines the print! macro
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ($crate::vga_buffer::print(format_args!($($arg)*)));
}
// Defines the println! macro
#[macro_export]
macro_rules! println {
() => (print!("\n"));
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
pub fn print(args: fmt::Arguments) {
use core::fmt::Write; // imports write_fmt method from the Write trait
WRITER.lock().write_fmt(args).unwrap();
}
#[cfg(test)]
mod test {
use super::*; // import all items of parent module: vga_buffer
// Specifies what char represents an empty cell in VGA buffer during testing
fn empty_char() -> ScreenChar {
ScreenChar {
ascii_character: b' ',
color_code: ColorCode::new(Color::Green, Color::Brown),
}
}
fn construct_buffer() -> Buffer {
// bypasses array construction requiring that contained type is Copy
// ScreenChar satisfies this, but the Volatile wrapper does not
use array_init::array_init;
Buffer {
// Provides array initialization without non-Copy types.
// parameter of array_init is a closure. The single parameter to the closure is unused and therefore unimportant
// otherwise it could be used to perform calculations on value before creating the array.
// array_init utilizes type's size to create the required number of indices. In this case
// the number of columns and rows are defined in the Buffer struct
// "The width & height are deduced by type inference"
chars: array_init(|_| array_init(|_| Volatile::new(empty_char()))),
}
}
fn construct_writer() -> Writer {
use std::boxed::Box;
let buffer = construct_buffer();
Writer {
column_position: 0,
color_code: ColorCode::new(Color::Blue, Color::Magenta),
// transforms the created buffer into a &'static mut to satisfy buffer property's type
buffer: Box::leak(Box::new(buffer)),
}
}
#[test] // tells test framework this is a test function
fn write_byte() {
let mut writer = construct_writer();
writer.write_byte(b'X');
writer.write_byte(b'Y');
for (i, row) in writer.buffer.chars.iter().enumerate() {
for (j, screen_char) in row.iter().enumerate() {
let screen_char = screen_char.read();
if i == BUFFER_HEIGHT - 1 && j == 0 {
assert_eq!(screen_char.ascii_character, b'X');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 1 && j == 1 {
assert_eq!(screen_char.ascii_character, b'Y');
assert_eq!(screen_char.color_code, writer.color_code);
} else {
assert_eq!(screen_char, empty_char());
}
}
}
}
#[test]
fn write_formatted() {
use core::fmt::Write;
let mut writer = construct_writer();
writeln!(&mut writer, "a").unwrap();
writeln!(&mut writer, "b{}", "c").unwrap();
for (i, row) in writer.buffer.chars.iter().enumerate() {
for (j, screen_char) in row.iter().enumerate() {
let screen_char = screen_char.read();
if i == BUFFER_HEIGHT - 3 && j == 0 {
assert_eq!(screen_char.ascii_character, b'a');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 2 && j == 0 {
assert_eq!(screen_char.ascii_character, b'b');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i == BUFFER_HEIGHT - 2 && j == 1 {
assert_eq!(screen_char.ascii_character, b'c');
assert_eq!(screen_char.color_code, writer.color_code);
} else if i >= BUFFER_HEIGHT - 2 { // ensures empty lines are shifted in on a new line and have correct color code
assert_eq!(screen_char.ascii_character, b' ');
assert_eq!(screen_char.color_code, writer.color_code);
} else {
assert_eq!(screen_char, empty_char());
}
}
}
}
}
| {
self.write_string(s);
Ok(())
} | identifier_body |
059_Implementacion_plazos.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# execfile(PATH + "syntax/03_Deteccion_info/059_Implementacion_plazos.py")
if __name__ == "__main__":
guardaResultados = True
muestraHTML = True
guardaES = True
es_hdfs = False
import sys
import platform
if platform.system() == "Windows":
PATH = "D:/Judicial/"
sys.path.append('C:/Python27/Lib/site-packages')
eslocal = True
try:
pais
except NameError:
pais = "esp"
else:
PATH = "/home/dcortiada/"
eslocal = False
if len(sys.argv) < 7:
raise Exception("Tiene que haber como minimo 6 argumentos!!\nEjemplo llamada:\n python 019_Implementacion_leyes.py ip_donde_leer(172.22.248.206:9229) ip_donde_escribir(172.22.248.206:9229) indice_donde_leer indice_donde_escribir criterio(name) fichero_a_procesar1 fichero_a_procesar2")
else:
ipread = sys.argv[1]
ipwrite = sys.argv[2]
indiceread = sys.argv[3]
indicewrite = sys.argv[4]
criterion = sys.argv[5]
files2Eval = sys.argv[6].split("/")
# pais = sys.argv[7]
# idioma = sys.argv[8]
# es_hdfs = sys.argv[9]
pais = "esp"
idioma = "es"
es_hdfs = False
print "Los archivos a evaluar son los siguientes:\n '" + reduce(lambda x, y: x + "', '" + y, files2Eval) + "'."
if es_hdfs == "True":
es_hdfs = True
else:
es_hdfs = False
if es_hdfs:
PATH = "hdfs:///user/ijdocs/"
from pyspark import SparkConf, SparkContext
from pyspark.sql import SQLContext, HiveContext
# Crear la configuracion de spark
conf = (SparkConf()
.setAppName("ejemplo")
.set("spark.executor.memory", "1g")
.set("spark.yarn.appMasterEnv.PYSPARK_PYTHON", "/usr/bin/python")
.set("spark.yarn.appMasterEnv.PYSPARK_DRIVER_PYTHON", "/usr/bin/python"))
# Crear el SparkContext con la configuración anterior
sc = SparkContext(conf=conf)
# Conectores SQL para trabajar en nuestro script
sqlContext = SQLContext(sc)
hiveContext = HiveContext(sc)
# Cargamos librerias y funciones
from os import listdir
import numpy as np
from os.path import exists
import pandas as pd
from math import factorial
import gensim
import json
from copy import deepcopy
if not es_hdfs:
execfile(PATH + "syntax/scripts/NER/carga_todas_funciones.py")
else:
filename = PATH + "syntax/scripts/NER/funciones_comparacion_texto.py"
sc.addPyFile(filename)
from funciones_comparacion_texto import *
filename = PATH + "syntax/scripts/NER/funciones_elastic.py"
sc.addPyFile(filename)
from funciones_elastic import *
filename = PATH + "syntax/scripts/NER/funciones_transforma_refs.py"
sc.addPyFile(filename)
from funciones_transforma_refs import *
filename = PATH + "syntax/scripts/NER/funciones_busqueda.py"
sc.addPyFile(filename)
from funciones_busqueda import *
filename = PATH + "syntax/scripts/NER/funciones_preprocesado.py"
sc.addPyFile(filename)
from funciones_preprocesado import *
filename = PATH + "syntax/scripts/NER/funciones_tratamiento_texto.py"
sc.addPyFile(filename)
from funciones_tratamiento_texto import *
filename = PATH + "syntax/scripts/NER/funciones_lectura_datos.py"
sc.addPyFile(filename)
from funciones_lectura_datos import *
# Definimos parametros
OUTPUTDIR = PATH + "output/resultados_plazos/"
if eslocal:
INPUTREAD = PATH + "input/documentos_expedientes/"
files2Eval = listdir(INPUTREAD)
else:
try:
files2Eval
except NameError:
files2Eval = list_docs(criterion = criterion, con = Elasticsearch([ipread]), indice = indiceread)
# Definimos expresiones regulares de las cantidades
DictPaisSearchMaxEnrere = dict(
esp = [
dict(contiene = ['\\bmes\\b', '\\bmeses\\b', '\\bdia\\b', '\\bdias\\b', '\\bano\\b', '\\banos\\b', '\\bsemana'],
# inicios = ['[0-9]+', "\\buno", "\\bdos", "\\btres", "\\bcuatro", "\\bcinco", "\\bseis", "\\bsiete", "\\bocho", "\\bnueve", "\\bdiez", "\\bonce", "\\bdoce", "\\btrece", "\\bcatorce", "\\bquince", "\\bdieciseis", "\\bdiecisiete", "\\bdieciocho", "\\bdiecinueve", "\\bveinte", "\\bventi", "\\bveinti", "\\btreinta", "\\btrenta", "\\bcuarenta", "\\bcincuenta", "\\bsesenta", "\\bochenta", "\\bnoventa", "\\bcien", "\\bdoscientos", "\\btrescientos", "\\bcuatrocientos", "\\bquinientos", "\\bseiscientos", "\\bsetecientos", "\\bochocientos"],
inicios = ['pena', "plazo", "condena", "multa", "beneficio de la comunidad", "beneficio a la comunidad", "sancion"],
finales = ['\\bmes\\b', '\\bmeses\\b', '\\bdia\\b', '\\bdias\\b', '\\bano\\b', '\\banos\\b', '\\bsemana']
)
],
bra = [
dict(contiene = ['\\bmes\\b', '\\bmeses\\b', '\\bdia\\b', '\\bdias\\b', '\\bano\\b', '\\banos\\b', '\\bsemana'],
# inicios = ['[0-9]+', "\\buno", "\\bdos", "\\btres", "\\bcuatro", "\\bcinco", "\\bseis", "\\bsiete", "\\bocho", "\\bnueve", "\\bdiez", "\\bonce", "\\bdoce", "\\btrece", "\\bcatorce", "\\bquince", "\\bdieciseis", "\\bdiecisiete", "\\bdieciocho", "\\bdiecinueve", "\\bveinte", "\\bventi", "\\bveinti", "\\btreinta", "\\btrenta", "\\bcuarenta", "\\bcincuenta", "\\bsesenta", "\\bochenta", "\\bnoventa", "\\bcien", "\\bdoscientos", "\\btrescientos", "\\bcuatrocientos", "\\bquinientos", "\\bseiscientos", "\\bsetecientos", "\\bochocientos"],
inicios = ['pena', "prazo", "conviccao", "fino", "sancao", "sancoes"],
finales = ['\\bmes\\b', '\\bmeses\\b', '\\bdia\\b', '\\bdias\\b', '\\bano\\b', '\\banos\\b', '\\bsemana']
)
]
)
SearchMaxEnrere = DictPaisSearchMaxEnrere[pais]
limSimFrases = 0.15 # Limit a partir del qual diem que 2 frases s'assemblen
guardaES = guardaES and not eslocal # Solo guardamos a elastic si no se ejecuta desde local
for filename in files2Eval:
print("Procesando " + filename + "...")
if eslocal:
RawReadedFile = pdf2txt(INPUTREAD + filename)
else:
RawReadedFile = import_doc(filename, criterion = criterion, con = Elasticsearch([ipread]), indice = indiceread)
readedFile = remove_accents(RawReadedFile)
readedFile = readedFile.lower()
# ================== #
# === Script 050 === #
# ================== #
# Buscamos posiciones teniendo en cuenta lo maximo para atras
index_points_max_enrere = []
for dictstr2search in SearchMaxEnrere:
idxAct = []
for str2search in dictstr2search['contiene']:
idxAct.extend(buscaPosicionRegexTexto(str2search, readedFile))
index_points_max_enrere.append(idxAct)
plazos = []
for i in range(len(SearchMaxEnrere)):
dictAct = SearchMaxEnrere[i]
for item in index_points_max_enrere[i]:
# Preparamos textos de inicios y finales
aux_texto_ini = readedFile[:item[1]]
aux_texto_fin = readedFile[item[0]:]
# Buscamos inicios y fines
listInicios = []
listFinal = []
for i_inicio in dictAct['inicios']:
listInicios.extend(buscaPosicionRegexTexto(i_inicio, aux_texto_ini))
for i_final in dictAct['finales']:
listFinal.extend(buscaPosicionRegexTexto(i_final, aux_texto_fin))
if len(listInicios) == 0 or len(listFinal) == 0:
continue
listInicios = np.array(listInicios)
listFinal = np.array(listFinal)
sel = abs(listInicios[:, 0] - len(aux_texto_ini)) < 50
if any(sel):
selFin = listFinal[:, 1] < 50
if any(selFin):
plazos.append((min(listInicios[sel, 0]), item[0] + max(listFinal[selFin, 1])))
else:
plazos.append((min(listInicios[sel, 0]), item[0] + min(listFinal[:, 1])))
# Buscamos registros unicos
plazos = list(set(plazos))
if len(plazos) == 0:
print "No hemos encontrado plazos en este documento"
continue
# Regulamos que no se incluyan unos dentro de otros los textos
plazos = pd.DataFrame(plazos)
plazos = plazos.sort(0)
plazos = plazos.values.tolist()
jresta = 0
for i in range(1, len(plazos)):
if plazos[i - jresta][0] < plazos[i - 1 - jresta][1]:
plazos[i - jresta] = [plazos[i - 1 - jresta][0], plazos[i - jresta][1]]
plazos.pop(i - 1 - jresta)
jresta += 1
# ================== #
# === Script 051 === #
# ================== #
# === #
# Canvi respecte script 051!!
listRefs = deepcopy(plazos)
listRefs = pd.DataFrame(listRefs)
listRefs.columns = ["PosInicio", "PosFin"]
listRefs["Referencia"] = ""
for i in range(listRefs.shape[0]):
listRefs["Referencia"][i] = readedFile[listRefs["PosInicio"][i]:listRefs["PosFin"][i]]
# === #
listRefs["Ref_Orig"] = listRefs["Referencia"]
# Hacemos modificaciones en las referencias
caracteres_elimina = [":", ".", "(", ")", ",", ";"]
caracteres_espacio = ["\n", "/", "-"]
for ce in caracteres_elimina:
listRefs["Referencia"] = listRefs["Referencia"].str.replace(ce, "")
for ce in caracteres_espacio:
listRefs["Referencia"] = listRefs["Referencia"].str.replace(ce, " ")
for i in range(listRefs.shape[0]):
listRefs["Referencia"][i] = quitaDoblesEspacios(listRefs["Referencia"][i])
listRefs["Referencia"] = listRefs["Referencia"].str.strip()
# Normalizamos los campos
listRefs["Referencia_Normalizada"] = ''
for i in range(listRefs.shape[0]):
normaliza = listRefs["Referencia"][i]
normaliza = normaliza.replace(" un ", " uno ")
aux_str = normaliza.split(" de ")
aux_norm = aux_str[len(aux_str) - 1]
aux_norm2 = aux_norm.split(" y ")
resNums = []
for aux_norm in aux_norm2:
aux_str = aux_norm.split(" ")
unidades = aux_str[len(aux_str) - 1]
number = reduce_concat(aux_str[:-1], sep = " ")
try:
number = transformaNumsString(number)
except:
pass
resNums.append(number + " " + unidades)
listRefs["Referencia_Normalizada"][i] = reduce_concat(resNums, sep = " y ")
# ================== #
# === Script 052 === #
# ================== #
# Montamos un string con la misma estructura y con acentos
try:
RawReadedFileAcc = str(RawReadedFile.decode("utf8").encode("latin1", 'ignore'))
except:
RawReadedFileAcc = str(RawReadedFile.encode("latin1", "ignore"))
i = 0
while i < len(RawReadedFileAcc):
letterAcc = RawReadedFileAcc[i]
letterra = readedFile[i]
if remove_accents(letterAcc, enc = "latin1").lower() != letterra:
if letterAcc == readedFile[i + 1]:
readedFile = readedFile[:i] + readedFile[(i + 1):]
else:
RawReadedFileAcc = RawReadedFileAcc[:i] + RawReadedFileAcc[(i + 1):]
else:
i += 1
if len(RawReadedFileAcc) != len(readedFile):
raise Exception("No pot ser!!")
# Leemos archivo posiciones fechas normalizadas
ddNorm = deepcopy(listRefs)
# Cogemos los textos que hay antes y despues de cada fecha
ddNorm["Antes"] = ""
ddNorm["Despues"] = ""
x = 1000
for irow in range(ddNorm.shape[0]):
ddNorm["Antes"][irow] = RawReadedFileAcc[(ddNorm["PosInicio"][irow] - x):ddNorm["PosInicio"][irow]]
aux_text = RawReadedFileAcc[(ddNorm["PosFin"][irow] - 1):(ddNorm["PosFin"][irow] + x)]
if len(aux_text) > 0:
if aux_text[0] == ddNorm["Referencia"][irow][-1]:
ddNorm["Despues"][irow] = aux_text[1:]
else:
ddNorm["Despues"][irow] = aux_text
else:
ddNorm["Despues"][irow] = ""
# Pasamos a frases los textos de antes y despues
for irow in range(ddNorm.shape[0]):
ddNorm["Antes"][irow] = ddNorm["Antes"][irow].replace("\n", " ")
ddNorm["Despues"][irow] = ddNorm["Despues"][irow].replace("\n", " ")
auxa = sentences(ddNorm["Antes"][irow])
auxa = auxa.split(" .\n")
ddNorm["Antes"][irow] = auxa[len(auxa) - 1]
auxd = sentences(ddNorm["Despues"][irow])
auxd = auxd.split(" .\n")
ddNorm["Despues"][irow] = auxd[0]
# Vemos que relevancia tiene cada elemento
aux_tt = ddNorm["Referencia_Normalizada"].value_counts()
aux_tt2 = aux_tt/max(aux_tt)
aux_tt = pd.DataFrame(data = dict(Referencia_Normalizada = list(aux_tt.index), Apariciones = list(aux_tt.values), Relevancia = list(aux_tt2.values)))
ddNorm = ddNorm.merge(aux_tt, 'left')
# Normalizamos las sentencias
result = dict()
for val in ddNorm["Referencia_Normalizada"].unique():
if int(aux_tt["Apariciones"][aux_tt["Referencia_Normalizada"] == val]) == 1:
# Aqui posem les frases que apareixen a pinyo
sel = ddNorm["Referencia_Normalizada"] == val
aux_data = ddNorm[sel]
aux_data = aux_data.reset_index()
texto = str(aux_data["Antes"][0]) + str(aux_data["Ref_Orig"][0]) + str(aux_data["Despues"][0])
result[val] = dict(descripcion = texto.strip())
result[val]["posiciones"] = dict(inicio = list(aux_data["PosInicio"]), fin = list(aux_data["PosFin"]))
result[val]["referencias"] = list(aux_data["Ref_Orig"])
else:
# Aqui fem l'analisis per mes d'una frase, si son iguals intentem fer una frase descriptiva, sino doncs per cada item
# posem el que s'assembli mes (PROVAR AMB GENSIM!!)
frases = []
aux_data = ddNorm[ddNorm["Referencia_Normalizada"] == val]
aux_data = aux_data.reset_index()
for irow in range(aux_data.shape[0]):
texto = aux_data["Antes"][irow] + aux_data["Ref_Orig"][irow] + aux_data["Despues"][irow]
frases.append(texto)
# Comparem frases i veiem quines s'assemblen a quines
auxRes = comparaFrases(frases, fileMod = PATH + "data/modelos_NER/DS_RNN_2014")
gruposUnidos = unirGrupos(list(auxRes["item1"]), list(auxRes["item2"]), list(auxRes["valor_comp"] > limSimFrases))
# Extraiem les frases mes rellevants per a cada grup
frasesGrup = []
for grupo in gruposUnidos:
frases_grupo_act = [""]
for element in grupo:
frases_grupo_act[0] = frases_grupo_act[0] + " " + frases[element]
frases_grupo_act.append(frases[element])
auxRes = comparaFrases(frases_grupo_act, fileMod = PATH + "data/modelos_NER/DS_RNN_2014", totesContraTotes = False)
auxResSel = auxRes[(auxRes["item1"] == 0) | (auxRes["item2"] == 0)]
auxResSel = auxResSel[auxResSel["valor_comp"] == max(auxResSel["valor_comp"])].reset_index()
auxResSel = auxResSel.loc[0, ["item1", "item2"]]
i = int(auxResSel[auxResSel != 0]) - 1
frasesGrup.append(frases[grupo[i]])
if len(frasesGrup) == 1:
r |
else:
for igrupo in range(len(gruposUnidos)):
result[val + "_" + str(igrupo + 1)] = dict(descripcion = frasesGrup[igrupo])
result[val + "_" + str(igrupo + 1)]["posiciones"] = dict(inicio = list(aux_data.loc[(gruposUnidos[igrupo])]["PosInicio"]), fin = list(aux_data.loc[(gruposUnidos[igrupo])]["PosFin"]))
result[val + "_" + str(igrupo + 1)]["referencias"] = list(aux_data.loc[(gruposUnidos[igrupo])]["Ref_Orig"])
for ielement in range(len(gruposUnidos[igrupo])):
ddNorm["Referencia_Normalizada"][aux_data["index"][gruposUnidos[igrupo][ielement]]] = val + "_" + str(igrupo + 1)
# Reescribimos fichero posiciones con las referencias normalizadas
for ikey in result.keys():
result[ikey]["descripcion"] = result[ikey]["descripcion"].decode("latin1")
for i in range(len(result[ikey]["referencias"])):
result[ikey]["referencias"][i] = result[ikey]["referencias"][i].decode("latin1")
result[ikey]["posiciones"]["inicio"][i] = str(result[ikey]["posiciones"]["inicio"][i])
result[ikey]["posiciones"]["fin"][i] = str(result[ikey]["posiciones"]["fin"][i])
# ============================= #
# === Escribimos resultados === #
# ============================= #
# Guardamos outputs y los dejamos en ES
if guardaResultados:
ddNorm.to_csv(path_or_buf = OUTPUTDIR + filename.lower().replace(".pdf", "") + "_info_pos.csv", sep = "|")
fileOut = OUTPUTDIR + filename.lower().replace(".pdf", "") + "_entities.json"
with open(fileOut, 'w') as fp:
json.dump(result, fp)
if muestraHTML:
dataJSON = deepcopy(result)
name_file = filename.lower().replace(".pdf", "")
ddNorm['order'] = [sum(ddNorm['Referencia_Normalizada'] == ddNorm['Referencia_Normalizada'][i]) for i in xrange(len(ddNorm))]
ddNorm = ddNorm.sort("order", ascending = False)
regUnics = pd.unique(ddNorm["Referencia_Normalizada"])
ddNorm["id_reg"] = ""
compta = 0
for registre in regUnics:
sel = ddNorm["Referencia_Normalizada"] == registre
ddNorm["id_reg"][sel] = compta
compta += 1
# Definimos las marcas
ddNorm['mark'] = 'mark09'
for i in range(1, 9):
sel = ddNorm["id_reg"] == (i - 1)
ddNorm["mark"][sel] = 'mark0' + str(i)
# Escribimos html's con descripciones y definimos href
ddNorm['href'] = ''
for ikey in dataJSON.keys():
hrefFile = "href " + name_file + " " + ikey.replace('"', '').replace("'", "") + ".html"
textohref = "Fecha: " + str(ikey) + "<br>Contexto: " + dataJSON[ikey]["descripcion"].encode("latin1") + "<br>Apariciones: " + str(len(dataJSON[ikey]["referencias"]))
filehtml = open(OUTPUTDIR + hrefFile, mode = "w+")
filehtml.write(textohref)
filehtml.close()
# sel = [x in dataJSON[ikey]["referencias"] for x in ddNorm["Ref_Orig"]]
sel = ddNorm['Referencia_Normalizada'] == ikey
ddNorm['href'][sel] = hrefFile
# Escribimos html
textohtml = "<head> <link rel = 'stylesheet' type = 'text/css' href = 'styles_css.css'> </head>"
i_suma = 0
for i in range(ddNorm.shape[0]):
before = "<a href = '" + ddNorm["href"][i] + "'>" + "<" + ddNorm["mark"][i] + ">"
after = "</" + ddNorm["mark"][i] + "></a>"
readedFile = readedFile[:(int(ddNorm["PosInicio"][i]) + i_suma)] + before + readedFile[(int(ddNorm["PosInicio"][i]) + i_suma):(int(ddNorm["PosFin"][i]) + i_suma)] + after + readedFile[(int(ddNorm["PosFin"][i]) + i_suma):]
i_suma += len(before) + len(after)
textohtml += "<p>" + readedFile + "</p>"
filehtml = OUTPUTDIR + name_file + "_muestra_plazos.html"
filehtml = open(filehtml, mode = "w+")
filehtml.write(textohtml)
filehtml.close()
if guardaES:
# Sacamos las posiciones de cada pagina
perPageDoc = read_file_pagebypage(filename, criterion = criterion, con = Elasticsearch([ipread]), indice = indiceread)
count_len = 0
listpos = []
for item in perPageDoc:
listpos.append((count_len, count_len + len(item) - 1))
count_len += len(item)
rawDoc = reduce_concat(perPageDoc)
# Subimos a ES
# Buscamos el texto en el rawDoc
ddNorm["posRawDocIni"] = -1
ddNorm["posRawDocFin"] = -1
for i in range(ddNorm.shape[0]):
texto = ddNorm["Referencia"][i]
text2search = rawDoc[ddNorm["PosInicio"][i]:]
text2search = remove_accents(text2search)
text2search = text2search.lower()
if texto in text2search:
sumposini = text2search.index(texto)
else:
caracteres_elimina = [":", ".", "(", ")", ",", ";"]
caracteres_espacio = ["\n", "/", "-"]
for cel in caracteres_elimina:
text2search = text2search.replace(cel, "")
for ces in caracteres_espacio:
text2search = text2search.replace(ces, " ")
text2search = quitaDoblesEspacios(text2search)
sumposini = text2search.index(texto)
ddNorm["posRawDocIni"][i] = ddNorm["PosInicio"][i] + sumposini
ddNorm["posRawDocFin"][i] = ddNorm["posRawDocIni"][i] + len(texto)
# Vemos en que pagina y que posicion ocupa cada entidad
ddNorm["pagina"] = '-1'
for i in range(ddNorm.shape[0]):
posBusca = ddNorm.loc[[i], ["posRawDocIni", "posRawDocFin"]]
posBusca = posBusca.values.tolist()[0]
encontrada = False
ipag = 0
while not encontrada and ipag < len(listpos):
ipag += 1
if listpos[ipag - 1][0] <= posBusca[0] and listpos[ipag - 1][1] >= posBusca[1]:
ddNorm["pagina"][i] = str(ipag)
encontrada = True
elif len(listpos) > ipag:
if listpos[ipag - 1][1] >= posBusca[0] and listpos[ipag][0] <= posBusca[1]:
ddNorm["pagina"][i] = str(ipag - 1) + "," + str(ipag)
encontrada = True
ddNorm[ddNorm["pagina"] == -1]["pagina"] = len(listpos)
ddNorm = posicionporpagina(ddNorm, listpos)
# Subimos diccionarios a ES
for i in range(ddNorm.shape[0]):
# Creamos diccionario
dictloadES = {
"Doc_plazo_" + filename.lower().replace(".pdf", "") + "_" + str(i): dict(
tipo = "plazo",
documento = filename,
pagina = ddNorm["pagina"][i],
pos_inicial = ddNorm["posRawDocIni"][i],
pos_final = ddNorm["posRawDocFin"][i],
texto = ddNorm["Referencia"][i],
texto_norm = ddNorm["Referencia_Normalizada"][i],
contexto = result[ddNorm["Referencia_Normalizada"][i]]["descripcion"]
)
}
load2Elastic(dictloadES, INDEX_NAME = indicewrite, TYPE_NAME = "doc", newesconn = Elasticsearch([ipwrite]))
print(filename + " procesado!!")
| esult[val] = dict(descripcion = frasesGrup[0])
result[val]["posiciones"] = dict(inicio = list(aux_data["PosInicio"]), fin = list(aux_data["PosFin"]))
result[val]["referencias"] = list(aux_data["Ref_Orig"])
| conditional_block |
059_Implementacion_plazos.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# execfile(PATH + "syntax/03_Deteccion_info/059_Implementacion_plazos.py")
if __name__ == "__main__":
guardaResultados = True
muestraHTML = True
guardaES = True
es_hdfs = False
import sys
import platform
if platform.system() == "Windows":
PATH = "D:/Judicial/"
sys.path.append('C:/Python27/Lib/site-packages')
eslocal = True
try:
pais
except NameError:
pais = "esp"
else:
PATH = "/home/dcortiada/"
eslocal = False
if len(sys.argv) < 7:
raise Exception("Tiene que haber como minimo 6 argumentos!!\nEjemplo llamada:\n python 019_Implementacion_leyes.py ip_donde_leer(172.22.248.206:9229) ip_donde_escribir(172.22.248.206:9229) indice_donde_leer indice_donde_escribir criterio(name) fichero_a_procesar1 fichero_a_procesar2")
else:
ipread = sys.argv[1]
ipwrite = sys.argv[2]
indiceread = sys.argv[3]
indicewrite = sys.argv[4]
criterion = sys.argv[5]
files2Eval = sys.argv[6].split("/")
# pais = sys.argv[7]
# idioma = sys.argv[8]
# es_hdfs = sys.argv[9]
pais = "esp"
idioma = "es"
es_hdfs = False
print "Los archivos a evaluar son los siguientes:\n '" + reduce(lambda x, y: x + "', '" + y, files2Eval) + "'."
if es_hdfs == "True":
es_hdfs = True
else:
es_hdfs = False
if es_hdfs:
PATH = "hdfs:///user/ijdocs/"
from pyspark import SparkConf, SparkContext
from pyspark.sql import SQLContext, HiveContext
# Crear la configuracion de spark
conf = (SparkConf()
.setAppName("ejemplo")
.set("spark.executor.memory", "1g")
.set("spark.yarn.appMasterEnv.PYSPARK_PYTHON", "/usr/bin/python")
.set("spark.yarn.appMasterEnv.PYSPARK_DRIVER_PYTHON", "/usr/bin/python"))
# Crear el SparkContext con la configuración anterior
sc = SparkContext(conf=conf)
# Conectores SQL para trabajar en nuestro script
sqlContext = SQLContext(sc)
hiveContext = HiveContext(sc)
# Cargamos librerias y funciones
from os import listdir
import numpy as np
from os.path import exists
import pandas as pd
from math import factorial
import gensim
import json
from copy import deepcopy
if not es_hdfs:
execfile(PATH + "syntax/scripts/NER/carga_todas_funciones.py")
else:
filename = PATH + "syntax/scripts/NER/funciones_comparacion_texto.py"
sc.addPyFile(filename)
from funciones_comparacion_texto import *
filename = PATH + "syntax/scripts/NER/funciones_elastic.py"
sc.addPyFile(filename)
from funciones_elastic import *
filename = PATH + "syntax/scripts/NER/funciones_transforma_refs.py"
sc.addPyFile(filename)
from funciones_transforma_refs import *
filename = PATH + "syntax/scripts/NER/funciones_busqueda.py"
sc.addPyFile(filename)
from funciones_busqueda import *
filename = PATH + "syntax/scripts/NER/funciones_preprocesado.py"
sc.addPyFile(filename)
from funciones_preprocesado import *
filename = PATH + "syntax/scripts/NER/funciones_tratamiento_texto.py"
sc.addPyFile(filename)
from funciones_tratamiento_texto import *
filename = PATH + "syntax/scripts/NER/funciones_lectura_datos.py"
sc.addPyFile(filename)
from funciones_lectura_datos import *
# Definimos parametros
OUTPUTDIR = PATH + "output/resultados_plazos/"
if eslocal:
INPUTREAD = PATH + "input/documentos_expedientes/"
files2Eval = listdir(INPUTREAD)
else:
try:
files2Eval
except NameError:
files2Eval = list_docs(criterion = criterion, con = Elasticsearch([ipread]), indice = indiceread)
# Definimos expresiones regulares de las cantidades
DictPaisSearchMaxEnrere = dict(
esp = [
dict(contiene = ['\\bmes\\b', '\\bmeses\\b', '\\bdia\\b', '\\bdias\\b', '\\bano\\b', '\\banos\\b', '\\bsemana'],
# inicios = ['[0-9]+', "\\buno", "\\bdos", "\\btres", "\\bcuatro", "\\bcinco", "\\bseis", "\\bsiete", "\\bocho", "\\bnueve", "\\bdiez", "\\bonce", "\\bdoce", "\\btrece", "\\bcatorce", "\\bquince", "\\bdieciseis", "\\bdiecisiete", "\\bdieciocho", "\\bdiecinueve", "\\bveinte", "\\bventi", "\\bveinti", "\\btreinta", "\\btrenta", "\\bcuarenta", "\\bcincuenta", "\\bsesenta", "\\bochenta", "\\bnoventa", "\\bcien", "\\bdoscientos", "\\btrescientos", "\\bcuatrocientos", "\\bquinientos", "\\bseiscientos", "\\bsetecientos", "\\bochocientos"],
inicios = ['pena', "plazo", "condena", "multa", "beneficio de la comunidad", "beneficio a la comunidad", "sancion"],
finales = ['\\bmes\\b', '\\bmeses\\b', '\\bdia\\b', '\\bdias\\b', '\\bano\\b', '\\banos\\b', '\\bsemana']
)
],
bra = [
dict(contiene = ['\\bmes\\b', '\\bmeses\\b', '\\bdia\\b', '\\bdias\\b', '\\bano\\b', '\\banos\\b', '\\bsemana'],
# inicios = ['[0-9]+', "\\buno", "\\bdos", "\\btres", "\\bcuatro", "\\bcinco", "\\bseis", "\\bsiete", "\\bocho", "\\bnueve", "\\bdiez", "\\bonce", "\\bdoce", "\\btrece", "\\bcatorce", "\\bquince", "\\bdieciseis", "\\bdiecisiete", "\\bdieciocho", "\\bdiecinueve", "\\bveinte", "\\bventi", "\\bveinti", "\\btreinta", "\\btrenta", "\\bcuarenta", "\\bcincuenta", "\\bsesenta", "\\bochenta", "\\bnoventa", "\\bcien", "\\bdoscientos", "\\btrescientos", "\\bcuatrocientos", "\\bquinientos", "\\bseiscientos", "\\bsetecientos", "\\bochocientos"],
inicios = ['pena', "prazo", "conviccao", "fino", "sancao", "sancoes"],
finales = ['\\bmes\\b', '\\bmeses\\b', '\\bdia\\b', '\\bdias\\b', '\\bano\\b', '\\banos\\b', '\\bsemana']
)
]
)
SearchMaxEnrere = DictPaisSearchMaxEnrere[pais]
limSimFrases = 0.15 # Limit a partir del qual diem que 2 frases s'assemblen
guardaES = guardaES and not eslocal # Solo guardamos a elastic si no se ejecuta desde local
for filename in files2Eval:
print("Procesando " + filename + "...")
if eslocal:
RawReadedFile = pdf2txt(INPUTREAD + filename)
else:
RawReadedFile = import_doc(filename, criterion = criterion, con = Elasticsearch([ipread]), indice = indiceread)
readedFile = remove_accents(RawReadedFile)
readedFile = readedFile.lower()
# ================== #
# === Script 050 === #
# ================== #
| # Buscamos posiciones teniendo en cuenta lo maximo para atras
index_points_max_enrere = []
for dictstr2search in SearchMaxEnrere:
idxAct = []
for str2search in dictstr2search['contiene']:
idxAct.extend(buscaPosicionRegexTexto(str2search, readedFile))
index_points_max_enrere.append(idxAct)
plazos = []
for i in range(len(SearchMaxEnrere)):
dictAct = SearchMaxEnrere[i]
for item in index_points_max_enrere[i]:
# Preparamos textos de inicios y finales
aux_texto_ini = readedFile[:item[1]]
aux_texto_fin = readedFile[item[0]:]
# Buscamos inicios y fines
listInicios = []
listFinal = []
for i_inicio in dictAct['inicios']:
listInicios.extend(buscaPosicionRegexTexto(i_inicio, aux_texto_ini))
for i_final in dictAct['finales']:
listFinal.extend(buscaPosicionRegexTexto(i_final, aux_texto_fin))
if len(listInicios) == 0 or len(listFinal) == 0:
continue
listInicios = np.array(listInicios)
listFinal = np.array(listFinal)
sel = abs(listInicios[:, 0] - len(aux_texto_ini)) < 50
if any(sel):
selFin = listFinal[:, 1] < 50
if any(selFin):
plazos.append((min(listInicios[sel, 0]), item[0] + max(listFinal[selFin, 1])))
else:
plazos.append((min(listInicios[sel, 0]), item[0] + min(listFinal[:, 1])))
# Buscamos registros unicos
plazos = list(set(plazos))
if len(plazos) == 0:
print "No hemos encontrado plazos en este documento"
continue
# Regulamos que no se incluyan unos dentro de otros los textos
plazos = pd.DataFrame(plazos)
plazos = plazos.sort(0)
plazos = plazos.values.tolist()
jresta = 0
for i in range(1, len(plazos)):
if plazos[i - jresta][0] < plazos[i - 1 - jresta][1]:
plazos[i - jresta] = [plazos[i - 1 - jresta][0], plazos[i - jresta][1]]
plazos.pop(i - 1 - jresta)
jresta += 1
# ================== #
# === Script 051 === #
# ================== #
# === #
# Canvi respecte script 051!!
listRefs = deepcopy(plazos)
listRefs = pd.DataFrame(listRefs)
listRefs.columns = ["PosInicio", "PosFin"]
listRefs["Referencia"] = ""
for i in range(listRefs.shape[0]):
listRefs["Referencia"][i] = readedFile[listRefs["PosInicio"][i]:listRefs["PosFin"][i]]
# === #
listRefs["Ref_Orig"] = listRefs["Referencia"]
# Hacemos modificaciones en las referencias
caracteres_elimina = [":", ".", "(", ")", ",", ";"]
caracteres_espacio = ["\n", "/", "-"]
for ce in caracteres_elimina:
listRefs["Referencia"] = listRefs["Referencia"].str.replace(ce, "")
for ce in caracteres_espacio:
listRefs["Referencia"] = listRefs["Referencia"].str.replace(ce, " ")
for i in range(listRefs.shape[0]):
listRefs["Referencia"][i] = quitaDoblesEspacios(listRefs["Referencia"][i])
listRefs["Referencia"] = listRefs["Referencia"].str.strip()
# Normalizamos los campos
listRefs["Referencia_Normalizada"] = ''
for i in range(listRefs.shape[0]):
normaliza = listRefs["Referencia"][i]
normaliza = normaliza.replace(" un ", " uno ")
aux_str = normaliza.split(" de ")
aux_norm = aux_str[len(aux_str) - 1]
aux_norm2 = aux_norm.split(" y ")
resNums = []
for aux_norm in aux_norm2:
aux_str = aux_norm.split(" ")
unidades = aux_str[len(aux_str) - 1]
number = reduce_concat(aux_str[:-1], sep = " ")
try:
number = transformaNumsString(number)
except:
pass
resNums.append(number + " " + unidades)
listRefs["Referencia_Normalizada"][i] = reduce_concat(resNums, sep = " y ")
# ================== #
# === Script 052 === #
# ================== #
# Montamos un string con la misma estructura y con acentos
try:
RawReadedFileAcc = str(RawReadedFile.decode("utf8").encode("latin1", 'ignore'))
except:
RawReadedFileAcc = str(RawReadedFile.encode("latin1", "ignore"))
i = 0
while i < len(RawReadedFileAcc):
letterAcc = RawReadedFileAcc[i]
letterra = readedFile[i]
if remove_accents(letterAcc, enc = "latin1").lower() != letterra:
if letterAcc == readedFile[i + 1]:
readedFile = readedFile[:i] + readedFile[(i + 1):]
else:
RawReadedFileAcc = RawReadedFileAcc[:i] + RawReadedFileAcc[(i + 1):]
else:
i += 1
if len(RawReadedFileAcc) != len(readedFile):
raise Exception("No pot ser!!")
# Leemos archivo posiciones fechas normalizadas
ddNorm = deepcopy(listRefs)
# Cogemos los textos que hay antes y despues de cada fecha
ddNorm["Antes"] = ""
ddNorm["Despues"] = ""
x = 1000
for irow in range(ddNorm.shape[0]):
ddNorm["Antes"][irow] = RawReadedFileAcc[(ddNorm["PosInicio"][irow] - x):ddNorm["PosInicio"][irow]]
aux_text = RawReadedFileAcc[(ddNorm["PosFin"][irow] - 1):(ddNorm["PosFin"][irow] + x)]
if len(aux_text) > 0:
if aux_text[0] == ddNorm["Referencia"][irow][-1]:
ddNorm["Despues"][irow] = aux_text[1:]
else:
ddNorm["Despues"][irow] = aux_text
else:
ddNorm["Despues"][irow] = ""
# Pasamos a frases los textos de antes y despues
for irow in range(ddNorm.shape[0]):
ddNorm["Antes"][irow] = ddNorm["Antes"][irow].replace("\n", " ")
ddNorm["Despues"][irow] = ddNorm["Despues"][irow].replace("\n", " ")
auxa = sentences(ddNorm["Antes"][irow])
auxa = auxa.split(" .\n")
ddNorm["Antes"][irow] = auxa[len(auxa) - 1]
auxd = sentences(ddNorm["Despues"][irow])
auxd = auxd.split(" .\n")
ddNorm["Despues"][irow] = auxd[0]
# Vemos que relevancia tiene cada elemento
aux_tt = ddNorm["Referencia_Normalizada"].value_counts()
aux_tt2 = aux_tt/max(aux_tt)
aux_tt = pd.DataFrame(data = dict(Referencia_Normalizada = list(aux_tt.index), Apariciones = list(aux_tt.values), Relevancia = list(aux_tt2.values)))
ddNorm = ddNorm.merge(aux_tt, 'left')
# Normalizamos las sentencias
result = dict()
for val in ddNorm["Referencia_Normalizada"].unique():
if int(aux_tt["Apariciones"][aux_tt["Referencia_Normalizada"] == val]) == 1:
# Aqui posem les frases que apareixen a pinyo
sel = ddNorm["Referencia_Normalizada"] == val
aux_data = ddNorm[sel]
aux_data = aux_data.reset_index()
texto = str(aux_data["Antes"][0]) + str(aux_data["Ref_Orig"][0]) + str(aux_data["Despues"][0])
result[val] = dict(descripcion = texto.strip())
result[val]["posiciones"] = dict(inicio = list(aux_data["PosInicio"]), fin = list(aux_data["PosFin"]))
result[val]["referencias"] = list(aux_data["Ref_Orig"])
else:
# Aqui fem l'analisis per mes d'una frase, si son iguals intentem fer una frase descriptiva, sino doncs per cada item
# posem el que s'assembli mes (PROVAR AMB GENSIM!!)
frases = []
aux_data = ddNorm[ddNorm["Referencia_Normalizada"] == val]
aux_data = aux_data.reset_index()
for irow in range(aux_data.shape[0]):
texto = aux_data["Antes"][irow] + aux_data["Ref_Orig"][irow] + aux_data["Despues"][irow]
frases.append(texto)
# Comparem frases i veiem quines s'assemblen a quines
auxRes = comparaFrases(frases, fileMod = PATH + "data/modelos_NER/DS_RNN_2014")
gruposUnidos = unirGrupos(list(auxRes["item1"]), list(auxRes["item2"]), list(auxRes["valor_comp"] > limSimFrases))
# Extraiem les frases mes rellevants per a cada grup
frasesGrup = []
for grupo in gruposUnidos:
frases_grupo_act = [""]
for element in grupo:
frases_grupo_act[0] = frases_grupo_act[0] + " " + frases[element]
frases_grupo_act.append(frases[element])
auxRes = comparaFrases(frases_grupo_act, fileMod = PATH + "data/modelos_NER/DS_RNN_2014", totesContraTotes = False)
auxResSel = auxRes[(auxRes["item1"] == 0) | (auxRes["item2"] == 0)]
auxResSel = auxResSel[auxResSel["valor_comp"] == max(auxResSel["valor_comp"])].reset_index()
auxResSel = auxResSel.loc[0, ["item1", "item2"]]
i = int(auxResSel[auxResSel != 0]) - 1
frasesGrup.append(frases[grupo[i]])
if len(frasesGrup) == 1:
result[val] = dict(descripcion = frasesGrup[0])
result[val]["posiciones"] = dict(inicio = list(aux_data["PosInicio"]), fin = list(aux_data["PosFin"]))
result[val]["referencias"] = list(aux_data["Ref_Orig"])
else:
for igrupo in range(len(gruposUnidos)):
result[val + "_" + str(igrupo + 1)] = dict(descripcion = frasesGrup[igrupo])
result[val + "_" + str(igrupo + 1)]["posiciones"] = dict(inicio = list(aux_data.loc[(gruposUnidos[igrupo])]["PosInicio"]), fin = list(aux_data.loc[(gruposUnidos[igrupo])]["PosFin"]))
result[val + "_" + str(igrupo + 1)]["referencias"] = list(aux_data.loc[(gruposUnidos[igrupo])]["Ref_Orig"])
for ielement in range(len(gruposUnidos[igrupo])):
ddNorm["Referencia_Normalizada"][aux_data["index"][gruposUnidos[igrupo][ielement]]] = val + "_" + str(igrupo + 1)
# Reescribimos fichero posiciones con las referencias normalizadas
for ikey in result.keys():
result[ikey]["descripcion"] = result[ikey]["descripcion"].decode("latin1")
for i in range(len(result[ikey]["referencias"])):
result[ikey]["referencias"][i] = result[ikey]["referencias"][i].decode("latin1")
result[ikey]["posiciones"]["inicio"][i] = str(result[ikey]["posiciones"]["inicio"][i])
result[ikey]["posiciones"]["fin"][i] = str(result[ikey]["posiciones"]["fin"][i])
# ============================= #
# === Escribimos resultados === #
# ============================= #
# Guardamos outputs y los dejamos en ES
if guardaResultados:
ddNorm.to_csv(path_or_buf = OUTPUTDIR + filename.lower().replace(".pdf", "") + "_info_pos.csv", sep = "|")
fileOut = OUTPUTDIR + filename.lower().replace(".pdf", "") + "_entities.json"
with open(fileOut, 'w') as fp:
json.dump(result, fp)
if muestraHTML:
dataJSON = deepcopy(result)
name_file = filename.lower().replace(".pdf", "")
ddNorm['order'] = [sum(ddNorm['Referencia_Normalizada'] == ddNorm['Referencia_Normalizada'][i]) for i in xrange(len(ddNorm))]
ddNorm = ddNorm.sort("order", ascending = False)
regUnics = pd.unique(ddNorm["Referencia_Normalizada"])
ddNorm["id_reg"] = ""
compta = 0
for registre in regUnics:
sel = ddNorm["Referencia_Normalizada"] == registre
ddNorm["id_reg"][sel] = compta
compta += 1
# Definimos las marcas
ddNorm['mark'] = 'mark09'
for i in range(1, 9):
sel = ddNorm["id_reg"] == (i - 1)
ddNorm["mark"][sel] = 'mark0' + str(i)
# Escribimos html's con descripciones y definimos href
ddNorm['href'] = ''
for ikey in dataJSON.keys():
hrefFile = "href " + name_file + " " + ikey.replace('"', '').replace("'", "") + ".html"
textohref = "Fecha: " + str(ikey) + "<br>Contexto: " + dataJSON[ikey]["descripcion"].encode("latin1") + "<br>Apariciones: " + str(len(dataJSON[ikey]["referencias"]))
filehtml = open(OUTPUTDIR + hrefFile, mode = "w+")
filehtml.write(textohref)
filehtml.close()
# sel = [x in dataJSON[ikey]["referencias"] for x in ddNorm["Ref_Orig"]]
sel = ddNorm['Referencia_Normalizada'] == ikey
ddNorm['href'][sel] = hrefFile
# Escribimos html
textohtml = "<head> <link rel = 'stylesheet' type = 'text/css' href = 'styles_css.css'> </head>"
i_suma = 0
for i in range(ddNorm.shape[0]):
before = "<a href = '" + ddNorm["href"][i] + "'>" + "<" + ddNorm["mark"][i] + ">"
after = "</" + ddNorm["mark"][i] + "></a>"
readedFile = readedFile[:(int(ddNorm["PosInicio"][i]) + i_suma)] + before + readedFile[(int(ddNorm["PosInicio"][i]) + i_suma):(int(ddNorm["PosFin"][i]) + i_suma)] + after + readedFile[(int(ddNorm["PosFin"][i]) + i_suma):]
i_suma += len(before) + len(after)
textohtml += "<p>" + readedFile + "</p>"
filehtml = OUTPUTDIR + name_file + "_muestra_plazos.html"
filehtml = open(filehtml, mode = "w+")
filehtml.write(textohtml)
filehtml.close()
if guardaES:
# Sacamos las posiciones de cada pagina
perPageDoc = read_file_pagebypage(filename, criterion = criterion, con = Elasticsearch([ipread]), indice = indiceread)
count_len = 0
listpos = []
for item in perPageDoc:
listpos.append((count_len, count_len + len(item) - 1))
count_len += len(item)
rawDoc = reduce_concat(perPageDoc)
# Subimos a ES
# Buscamos el texto en el rawDoc
ddNorm["posRawDocIni"] = -1
ddNorm["posRawDocFin"] = -1
for i in range(ddNorm.shape[0]):
texto = ddNorm["Referencia"][i]
text2search = rawDoc[ddNorm["PosInicio"][i]:]
text2search = remove_accents(text2search)
text2search = text2search.lower()
if texto in text2search:
sumposini = text2search.index(texto)
else:
caracteres_elimina = [":", ".", "(", ")", ",", ";"]
caracteres_espacio = ["\n", "/", "-"]
for cel in caracteres_elimina:
text2search = text2search.replace(cel, "")
for ces in caracteres_espacio:
text2search = text2search.replace(ces, " ")
text2search = quitaDoblesEspacios(text2search)
sumposini = text2search.index(texto)
ddNorm["posRawDocIni"][i] = ddNorm["PosInicio"][i] + sumposini
ddNorm["posRawDocFin"][i] = ddNorm["posRawDocIni"][i] + len(texto)
# Vemos en que pagina y que posicion ocupa cada entidad
ddNorm["pagina"] = '-1'
for i in range(ddNorm.shape[0]):
posBusca = ddNorm.loc[[i], ["posRawDocIni", "posRawDocFin"]]
posBusca = posBusca.values.tolist()[0]
encontrada = False
ipag = 0
while not encontrada and ipag < len(listpos):
ipag += 1
if listpos[ipag - 1][0] <= posBusca[0] and listpos[ipag - 1][1] >= posBusca[1]:
ddNorm["pagina"][i] = str(ipag)
encontrada = True
elif len(listpos) > ipag:
if listpos[ipag - 1][1] >= posBusca[0] and listpos[ipag][0] <= posBusca[1]:
ddNorm["pagina"][i] = str(ipag - 1) + "," + str(ipag)
encontrada = True
ddNorm[ddNorm["pagina"] == -1]["pagina"] = len(listpos)
ddNorm = posicionporpagina(ddNorm, listpos)
# Subimos diccionarios a ES
for i in range(ddNorm.shape[0]):
# Creamos diccionario
dictloadES = {
"Doc_plazo_" + filename.lower().replace(".pdf", "") + "_" + str(i): dict(
tipo = "plazo",
documento = filename,
pagina = ddNorm["pagina"][i],
pos_inicial = ddNorm["posRawDocIni"][i],
pos_final = ddNorm["posRawDocFin"][i],
texto = ddNorm["Referencia"][i],
texto_norm = ddNorm["Referencia_Normalizada"][i],
contexto = result[ddNorm["Referencia_Normalizada"][i]]["descripcion"]
)
}
load2Elastic(dictloadES, INDEX_NAME = indicewrite, TYPE_NAME = "doc", newesconn = Elasticsearch([ipwrite]))
print(filename + " procesado!!") | random_line_split |
|
ConcurrentAnimations.py | #!/usr/bin/env python
"""
Use version of DriverSlave that has pixmap and pixheights
"""
import threading
# import base classes and driver
from bibliopixel import LEDStrip, LEDMatrix
# from bibliopixel.drivers.LPD8806 import DriverLPD8806, ChannelOrder
from bibliopixel.drivers.visualizer import DriverVisualizer, ChannelOrder
from bibliopixel.drivers.slave_driver import DriverSlave
# import colors
import bibliopixel.colors
from bibliopixel.animation import BaseStripAnim
from logging import DEBUG, INFO, WARNING, CRITICAL, ERROR
from bibliopixel import log
log.setLogLevel(WARNING)
import re
import time
from operator import or_, ior, ixor
import matplotlib.pyplot as plt
import BiblioPixelAnimations.matrix.bloom as BA
class MasterAnimation(BaseStripAnim):
"""
Takes copies of fake leds, combines using heights and mixing to fill and update
a led
NEED now ledcopies is list of the leds associated with each animation
NEED also mapping of the leds into master led (i.e. path list)
NEED also height of each animations and merging method if same height
"""
def __init__(self, led, animcopies, start=0, end=-1):
super(MasterAnimation, self).__init__(led, start, end)
if not isinstance(animcopies, list):
animcopies = [animcopies]
self._animcopies = animcopies
self._ledcopies = [a._led for a, f in animcopies]
self._idlelist = []
self.timedata = [[] for _ in range(len(self._ledcopies))] # [[]] * 5 NOT define 5 different lists!
self._led.pixheights = [0] * self._led.numLEDs
# def preRun(self, amt=1):
# self._led.all_off()
# for w, f in self._animcopies:
# w.run(fps=f, max_steps=runtime * f, threaded = True)
def preStep(self, amt=1):
#print 'prestep {}'.format(self._step)
# only step the master thread when something from ledcopies
# has been done i.e. its event _wait must be false (I THINK)
# TODO is this good code???? or is there a better way to block
self._idlelist = [True] # to insure goes thru while loop at least once
while all(self._idlelist):
self._idlelist = [not ledcopy.driver[0]._updatenow.isSet() for ledcopy in self._ledcopies]
if self._stopEvent.isSet():
self.animComplete = True
print 'breaking out'
break
#
def postStep(self, amt=1):
# clear the ones found in preStep
activewormind = [i for i, x in enumerate(self._idlelist) if x == False]
[self._ledcopies[i].driver[0]._updatenow.clear() for i in activewormind]
def step(self, amt=1):
"""
combines the buffers from the slave led's
which then gets sent to led via update
"""
# For checking if all the animations have their framse looked at
#activewormind = [i for i, x in enumerate(self._idlelist) if x == False]
#print "Worm {} at {:5g}".format(activewormind, 1000*(time.time() - starttime))
# save times activated for each worm
[self.timedata[i].append(1000*(time.time() - starttime)) for i, x in enumerate(self._idlelist) if x == False]
#self._led.buffer = [0] * 480
self._led.pixheights = [-100] * self._led.numLEDs
#print type(self._led.buffer)
for ledcopy in self._ledcopies:
# self._led.buffer = map(ixor, self._led.buffer, ledcopy.buffer)
# use pixheights but assume all buffers same size
# print ledcopy.driver[0].pixheights
for pix in range(self._led.numLEDs):
#for ledcopy in self._ledcopies:
if self._led.pixheights[pix] == ledcopy.driver[0].pixheights[pix]:
for i in range(3):
self._led.buffer[3*pix + i] ^= ledcopy.buffer[3*pix + i]
elif self._led.pixheights[pix] < ledcopy.driver[0].pixheights[pix]:
for i in range(3):
self._led.buffer[3*pix + i] = ledcopy.buffer[3*pix + i]
self._led.pixheights[pix] = ledcopy.driver[0].pixheights[pix]
self._step += 1
def | (self, amt = 1, fps=None, sleep=None, max_steps = 0, untilComplete = False, max_cycles = 0, joinThread = False, callback=None):
#def run(self, amt = 1, fps=None, sleep=None, max_steps = 0, untilComplete = False, max_cycles = 0, threaded = True, joinThread = False, callback=None):
# self.fps = fps
# self.untilComplete = untilComplete
super(MasterAnimation, self).run(amt = 1, fps=fps, sleep=None, max_steps = 0, untilComplete = untilComplete, max_cycles = 0, threaded = True, joinThread = joinThread, callback=callback)
class Worm(BaseStripAnim):
"""
colors a list the worm segment (starting with head) colors
path a list of the LED indices over which the worm will travel
cyclelen controls speed, worm movement only when LED upload
cycles == 0 mod cyclelen
height (of worm segments) is same length as colors: higher
value worms segments go over top of lower value worms
"""
def __init__(self, led, colors, path, cyclelen, direction=1,
height=None, start=0, end=-1):
super(Worm, self).__init__(led, start, end)
if height is None:
height = [0]*len(colors)
elif type(height) == int:
height = [height]*len(colors)
self._colors = colors
self._colors.append((0, 0, 0)) # add blank seqment to end worm
self._path = path
self._cyclelen = cyclelen
self._height = height
self._height.append(-1) # add lowest value for height
self._activecount = 0
self._direction = direction
self._headposition = -self._direction
#print self._colors
#print self._height
def step(self, amt=1):
if self._activecount == 0:
self._headposition += amt*self._direction
self._headposition %= len(self._path)
# Put worm into strip and blank end
segpos = self._headposition
for x in range(len(self._colors)):
if True: #self._height[x] >= LEDsegheights[self._path[segpos]]: # or x == len(self.colors) - 1:
#if self._height[x] >= self._led.driver[0].pixheights[self._path[segpos]]: # or x == len(self.colors) - 1:
self._led.set(self._path[segpos], self._colors[x])
self._led.driver[0].pixheights[self._path[segpos]] = self._height[x]
segpos -= self._direction
segpos %= len(self._path)
self._activecount += amt
self._activecount %= self._cyclelen
self._step += amt
def pathgen(nleft=0, nright=15, nbot=0, ntop=9, shift=0, turns=10, rounds=16):
"""
A path around a rectangle from strip wound helically
10 turns high by 16 round.
rounds * turns must be number of pixels on strip
nleft and nright is from 0 to rounds-1,
nbot and ntop from 0 to turns-1
"""
def ind(x, y):
return x + y * rounds
assert 0 <= nleft <= nright -1 <= rounds and 0 <= nbot <= ntop -1 <= turns
nled = rounds*turns
sleft = range(ind(nleft, nbot), ind(nleft, ntop), rounds)
tp = range(ind(nleft, ntop), ind(nright, ntop), 1)
sright = range(ind(nright, ntop), ind(nright, nbot), -rounds)
bt = range(ind(nright, nbot), ind(nleft, nbot), -1)
path = sleft+tp+sright+bt
if len(path) == 0:
path = [ind(nleft, nbot)]
path = map(lambda x: (shift+x) % nled, path)
log.logger.info("pathgen({}, {}, {}, {}, {}) is {}".format(nleft, nright, nbot, ntop, shift, path))
return path
if True: #__name__ == '__main__':
drivermaster = DriverVisualizer(160, pixelSize=62, stayTop=False, maxWindowWidth=1024)
# using pixelSize 62 and changed code of visualizer.py to have maxWindowWidth=1024
#drivermaster = DriverVisualizer(160, pixelSize=31, stayTop=False)
#ledmaster = LEDStrip(drivermaster, threadedUpdate=True)
ledmaster = LEDStrip(drivermaster)
lnin = [255, 222, 200, 150, 125]
bluedimming = [(0, 0, i) for i in lnin]
bluedimming = [(0, 0, 0) for i in lnin]
reddimming = [(i, 0, 0) for i in lnin]
greendimming = [(0, i, 0) for i in lnin]
cyandimming = [(0, i, i) for i in lnin]
whitedimming = [(i, i, i) for i in lnin]
# Worm arguments
wormblue = (bluedimming, pathgen(5, 10, 0, 9), 1, 1, 6)
wormred = (reddimming, pathgen(1, 14, 1, 8), 1, 1, 2)
wormgreen = (greendimming, pathgen(2, 13, 2, 7), 1, 1, 3)
wormcyan = (cyandimming, pathgen(3, 12, 3, 6), 1, 1, 4)
wormwhite = (whitedimming, pathgen(4, 11, 4, 5), 1, 1, 5)
# List of pair (animation arguments, fps)
wormdatalist = [(wormblue, 24), (wormred, 20), (wormgreen, 16), (wormcyan, 12), (wormwhite, 8)]
#wormdatalist = [(wormwhite, 8)]
#wormdatalist = []
# dummy strips must each have their own slavedriver as thread is attached
# to the driver
ledslaves = [LEDStrip(DriverSlave(160, pixheights=-1), threadedUpdate=True) for _ in range(len(wormdatalist))]
# Make the Worm animations an list pairs (animation, fps)
wormlist = [(Worm(ledslaves[i], *d[0]), d[1]) for i, d in enumerate(wormdatalist)]
ledslaveb = LEDMatrix(DriverSlave(160, None, 0), width=16, height=10, threadedUpdate=True)
bloom = BA.Bloom(ledslaveb)
wormlist.append((bloom, 10))
#masteranimation = MasterAnimation(ledmaster, [w._led for w, f in wormlist])
masteranimation = MasterAnimation(ledmaster, wormlist)
starttime = time.time()
runtime = 1
# Master steps when it gets a go ahdead signal from one of the
# concurrent annimations
masteranimation.run(fps=None) # if give fps for master will skip faster frames
# Run all the slave animations and master threaded
# The slave animations update their buffers at the correct
# time and rather than update, just signal the master they
# are ready to be combined and sent to the actual leds
for w, f in wormlist:
w.run(fps=f, max_steps=runtime * f, threaded = True)
#print threading.enumerate()
print "THREADS: " + ",".join([re.sub('<class |,|bibliopixel.\w*.|>', '', str(s.__class__)) for s in threading.enumerate()])
# idle and threaded animations will run jointly
while not all([w.stopped() for w, f in wormlist]):
pass
# stop the master
masteranimation.stopThread(True) # need True
print "Master Animation Step Count {}".format(masteranimation._step)
ledmaster.waitForUpdate()
ledmaster.stopUpdateThreads()
[w._led.stopUpdateThreads() for w, f in wormlist]
print "THREADS: " + ",".join([re.sub('<class |,|bibliopixel.\w*.|>', '', str(s.__class__)) for s in threading.enumerate()])
plt.clf()
col = 'brgcwk'
[plt.plot(masteranimation.timedata[i], [i] * len(masteranimation.timedata[i]), col[i%6]+'o') for i in range(len(wormlist))]
ax = plt.axis()
delx = .01 * (ax[1] - ax[0])
plt.axis([ax[0]-delx, ax[1]+delx, ax[2]-1, ax[3]+1])
| run | identifier_name |
ConcurrentAnimations.py | #!/usr/bin/env python
"""
Use version of DriverSlave that has pixmap and pixheights
"""
import threading
# import base classes and driver
from bibliopixel import LEDStrip, LEDMatrix
# from bibliopixel.drivers.LPD8806 import DriverLPD8806, ChannelOrder
from bibliopixel.drivers.visualizer import DriverVisualizer, ChannelOrder
from bibliopixel.drivers.slave_driver import DriverSlave
# import colors
import bibliopixel.colors
from bibliopixel.animation import BaseStripAnim
from logging import DEBUG, INFO, WARNING, CRITICAL, ERROR
from bibliopixel import log
log.setLogLevel(WARNING)
import re
import time
from operator import or_, ior, ixor
import matplotlib.pyplot as plt
import BiblioPixelAnimations.matrix.bloom as BA
class MasterAnimation(BaseStripAnim):
"""
Takes copies of fake leds, combines using heights and mixing to fill and update
a led
NEED now ledcopies is list of the leds associated with each animation
NEED also mapping of the leds into master led (i.e. path list)
NEED also height of each animations and merging method if same height | self._animcopies = animcopies
self._ledcopies = [a._led for a, f in animcopies]
self._idlelist = []
self.timedata = [[] for _ in range(len(self._ledcopies))] # [[]] * 5 NOT define 5 different lists!
self._led.pixheights = [0] * self._led.numLEDs
# def preRun(self, amt=1):
# self._led.all_off()
# for w, f in self._animcopies:
# w.run(fps=f, max_steps=runtime * f, threaded = True)
def preStep(self, amt=1):
#print 'prestep {}'.format(self._step)
# only step the master thread when something from ledcopies
# has been done i.e. its event _wait must be false (I THINK)
# TODO is this good code???? or is there a better way to block
self._idlelist = [True] # to insure goes thru while loop at least once
while all(self._idlelist):
self._idlelist = [not ledcopy.driver[0]._updatenow.isSet() for ledcopy in self._ledcopies]
if self._stopEvent.isSet():
self.animComplete = True
print 'breaking out'
break
#
def postStep(self, amt=1):
# clear the ones found in preStep
activewormind = [i for i, x in enumerate(self._idlelist) if x == False]
[self._ledcopies[i].driver[0]._updatenow.clear() for i in activewormind]
def step(self, amt=1):
"""
combines the buffers from the slave led's
which then gets sent to led via update
"""
# For checking if all the animations have their framse looked at
#activewormind = [i for i, x in enumerate(self._idlelist) if x == False]
#print "Worm {} at {:5g}".format(activewormind, 1000*(time.time() - starttime))
# save times activated for each worm
[self.timedata[i].append(1000*(time.time() - starttime)) for i, x in enumerate(self._idlelist) if x == False]
#self._led.buffer = [0] * 480
self._led.pixheights = [-100] * self._led.numLEDs
#print type(self._led.buffer)
for ledcopy in self._ledcopies:
# self._led.buffer = map(ixor, self._led.buffer, ledcopy.buffer)
# use pixheights but assume all buffers same size
# print ledcopy.driver[0].pixheights
for pix in range(self._led.numLEDs):
#for ledcopy in self._ledcopies:
if self._led.pixheights[pix] == ledcopy.driver[0].pixheights[pix]:
for i in range(3):
self._led.buffer[3*pix + i] ^= ledcopy.buffer[3*pix + i]
elif self._led.pixheights[pix] < ledcopy.driver[0].pixheights[pix]:
for i in range(3):
self._led.buffer[3*pix + i] = ledcopy.buffer[3*pix + i]
self._led.pixheights[pix] = ledcopy.driver[0].pixheights[pix]
self._step += 1
def run(self, amt = 1, fps=None, sleep=None, max_steps = 0, untilComplete = False, max_cycles = 0, joinThread = False, callback=None):
#def run(self, amt = 1, fps=None, sleep=None, max_steps = 0, untilComplete = False, max_cycles = 0, threaded = True, joinThread = False, callback=None):
# self.fps = fps
# self.untilComplete = untilComplete
super(MasterAnimation, self).run(amt = 1, fps=fps, sleep=None, max_steps = 0, untilComplete = untilComplete, max_cycles = 0, threaded = True, joinThread = joinThread, callback=callback)
class Worm(BaseStripAnim):
"""
colors a list the worm segment (starting with head) colors
path a list of the LED indices over which the worm will travel
cyclelen controls speed, worm movement only when LED upload
cycles == 0 mod cyclelen
height (of worm segments) is same length as colors: higher
value worms segments go over top of lower value worms
"""
def __init__(self, led, colors, path, cyclelen, direction=1,
height=None, start=0, end=-1):
super(Worm, self).__init__(led, start, end)
if height is None:
height = [0]*len(colors)
elif type(height) == int:
height = [height]*len(colors)
self._colors = colors
self._colors.append((0, 0, 0)) # add blank seqment to end worm
self._path = path
self._cyclelen = cyclelen
self._height = height
self._height.append(-1) # add lowest value for height
self._activecount = 0
self._direction = direction
self._headposition = -self._direction
#print self._colors
#print self._height
def step(self, amt=1):
if self._activecount == 0:
self._headposition += amt*self._direction
self._headposition %= len(self._path)
# Put worm into strip and blank end
segpos = self._headposition
for x in range(len(self._colors)):
if True: #self._height[x] >= LEDsegheights[self._path[segpos]]: # or x == len(self.colors) - 1:
#if self._height[x] >= self._led.driver[0].pixheights[self._path[segpos]]: # or x == len(self.colors) - 1:
self._led.set(self._path[segpos], self._colors[x])
self._led.driver[0].pixheights[self._path[segpos]] = self._height[x]
segpos -= self._direction
segpos %= len(self._path)
self._activecount += amt
self._activecount %= self._cyclelen
self._step += amt
def pathgen(nleft=0, nright=15, nbot=0, ntop=9, shift=0, turns=10, rounds=16):
"""
A path around a rectangle from strip wound helically
10 turns high by 16 round.
rounds * turns must be number of pixels on strip
nleft and nright is from 0 to rounds-1,
nbot and ntop from 0 to turns-1
"""
def ind(x, y):
return x + y * rounds
assert 0 <= nleft <= nright -1 <= rounds and 0 <= nbot <= ntop -1 <= turns
nled = rounds*turns
sleft = range(ind(nleft, nbot), ind(nleft, ntop), rounds)
tp = range(ind(nleft, ntop), ind(nright, ntop), 1)
sright = range(ind(nright, ntop), ind(nright, nbot), -rounds)
bt = range(ind(nright, nbot), ind(nleft, nbot), -1)
path = sleft+tp+sright+bt
if len(path) == 0:
path = [ind(nleft, nbot)]
path = map(lambda x: (shift+x) % nled, path)
log.logger.info("pathgen({}, {}, {}, {}, {}) is {}".format(nleft, nright, nbot, ntop, shift, path))
return path
if True: #__name__ == '__main__':
drivermaster = DriverVisualizer(160, pixelSize=62, stayTop=False, maxWindowWidth=1024)
# using pixelSize 62 and changed code of visualizer.py to have maxWindowWidth=1024
#drivermaster = DriverVisualizer(160, pixelSize=31, stayTop=False)
#ledmaster = LEDStrip(drivermaster, threadedUpdate=True)
ledmaster = LEDStrip(drivermaster)
lnin = [255, 222, 200, 150, 125]
bluedimming = [(0, 0, i) for i in lnin]
bluedimming = [(0, 0, 0) for i in lnin]
reddimming = [(i, 0, 0) for i in lnin]
greendimming = [(0, i, 0) for i in lnin]
cyandimming = [(0, i, i) for i in lnin]
whitedimming = [(i, i, i) for i in lnin]
# Worm arguments
wormblue = (bluedimming, pathgen(5, 10, 0, 9), 1, 1, 6)
wormred = (reddimming, pathgen(1, 14, 1, 8), 1, 1, 2)
wormgreen = (greendimming, pathgen(2, 13, 2, 7), 1, 1, 3)
wormcyan = (cyandimming, pathgen(3, 12, 3, 6), 1, 1, 4)
wormwhite = (whitedimming, pathgen(4, 11, 4, 5), 1, 1, 5)
# List of pair (animation arguments, fps)
wormdatalist = [(wormblue, 24), (wormred, 20), (wormgreen, 16), (wormcyan, 12), (wormwhite, 8)]
#wormdatalist = [(wormwhite, 8)]
#wormdatalist = []
# dummy strips must each have their own slavedriver as thread is attached
# to the driver
ledslaves = [LEDStrip(DriverSlave(160, pixheights=-1), threadedUpdate=True) for _ in range(len(wormdatalist))]
# Make the Worm animations an list pairs (animation, fps)
wormlist = [(Worm(ledslaves[i], *d[0]), d[1]) for i, d in enumerate(wormdatalist)]
ledslaveb = LEDMatrix(DriverSlave(160, None, 0), width=16, height=10, threadedUpdate=True)
bloom = BA.Bloom(ledslaveb)
wormlist.append((bloom, 10))
#masteranimation = MasterAnimation(ledmaster, [w._led for w, f in wormlist])
masteranimation = MasterAnimation(ledmaster, wormlist)
starttime = time.time()
runtime = 1
# Master steps when it gets a go ahdead signal from one of the
# concurrent annimations
masteranimation.run(fps=None) # if give fps for master will skip faster frames
# Run all the slave animations and master threaded
# The slave animations update their buffers at the correct
# time and rather than update, just signal the master they
# are ready to be combined and sent to the actual leds
for w, f in wormlist:
w.run(fps=f, max_steps=runtime * f, threaded = True)
#print threading.enumerate()
print "THREADS: " + ",".join([re.sub('<class |,|bibliopixel.\w*.|>', '', str(s.__class__)) for s in threading.enumerate()])
# idle and threaded animations will run jointly
while not all([w.stopped() for w, f in wormlist]):
pass
# stop the master
masteranimation.stopThread(True) # need True
print "Master Animation Step Count {}".format(masteranimation._step)
ledmaster.waitForUpdate()
ledmaster.stopUpdateThreads()
[w._led.stopUpdateThreads() for w, f in wormlist]
print "THREADS: " + ",".join([re.sub('<class |,|bibliopixel.\w*.|>', '', str(s.__class__)) for s in threading.enumerate()])
plt.clf()
col = 'brgcwk'
[plt.plot(masteranimation.timedata[i], [i] * len(masteranimation.timedata[i]), col[i%6]+'o') for i in range(len(wormlist))]
ax = plt.axis()
delx = .01 * (ax[1] - ax[0])
plt.axis([ax[0]-delx, ax[1]+delx, ax[2]-1, ax[3]+1]) | """
def __init__(self, led, animcopies, start=0, end=-1):
super(MasterAnimation, self).__init__(led, start, end)
if not isinstance(animcopies, list):
animcopies = [animcopies] | random_line_split |
ConcurrentAnimations.py | #!/usr/bin/env python
"""
Use version of DriverSlave that has pixmap and pixheights
"""
import threading
# import base classes and driver
from bibliopixel import LEDStrip, LEDMatrix
# from bibliopixel.drivers.LPD8806 import DriverLPD8806, ChannelOrder
from bibliopixel.drivers.visualizer import DriverVisualizer, ChannelOrder
from bibliopixel.drivers.slave_driver import DriverSlave
# import colors
import bibliopixel.colors
from bibliopixel.animation import BaseStripAnim
from logging import DEBUG, INFO, WARNING, CRITICAL, ERROR
from bibliopixel import log
log.setLogLevel(WARNING)
import re
import time
from operator import or_, ior, ixor
import matplotlib.pyplot as plt
import BiblioPixelAnimations.matrix.bloom as BA
class MasterAnimation(BaseStripAnim):
"""
Takes copies of fake leds, combines using heights and mixing to fill and update
a led
NEED now ledcopies is list of the leds associated with each animation
NEED also mapping of the leds into master led (i.e. path list)
NEED also height of each animations and merging method if same height
"""
def __init__(self, led, animcopies, start=0, end=-1):
super(MasterAnimation, self).__init__(led, start, end)
if not isinstance(animcopies, list):
animcopies = [animcopies]
self._animcopies = animcopies
self._ledcopies = [a._led for a, f in animcopies]
self._idlelist = []
self.timedata = [[] for _ in range(len(self._ledcopies))] # [[]] * 5 NOT define 5 different lists!
self._led.pixheights = [0] * self._led.numLEDs
# def preRun(self, amt=1):
# self._led.all_off()
# for w, f in self._animcopies:
# w.run(fps=f, max_steps=runtime * f, threaded = True)
def preStep(self, amt=1):
#print 'prestep {}'.format(self._step)
# only step the master thread when something from ledcopies
# has been done i.e. its event _wait must be false (I THINK)
# TODO is this good code???? or is there a better way to block
self._idlelist = [True] # to insure goes thru while loop at least once
while all(self._idlelist):
self._idlelist = [not ledcopy.driver[0]._updatenow.isSet() for ledcopy in self._ledcopies]
if self._stopEvent.isSet():
|
#
def postStep(self, amt=1):
# clear the ones found in preStep
activewormind = [i for i, x in enumerate(self._idlelist) if x == False]
[self._ledcopies[i].driver[0]._updatenow.clear() for i in activewormind]
def step(self, amt=1):
"""
combines the buffers from the slave led's
which then gets sent to led via update
"""
# For checking if all the animations have their framse looked at
#activewormind = [i for i, x in enumerate(self._idlelist) if x == False]
#print "Worm {} at {:5g}".format(activewormind, 1000*(time.time() - starttime))
# save times activated for each worm
[self.timedata[i].append(1000*(time.time() - starttime)) for i, x in enumerate(self._idlelist) if x == False]
#self._led.buffer = [0] * 480
self._led.pixheights = [-100] * self._led.numLEDs
#print type(self._led.buffer)
for ledcopy in self._ledcopies:
# self._led.buffer = map(ixor, self._led.buffer, ledcopy.buffer)
# use pixheights but assume all buffers same size
# print ledcopy.driver[0].pixheights
for pix in range(self._led.numLEDs):
#for ledcopy in self._ledcopies:
if self._led.pixheights[pix] == ledcopy.driver[0].pixheights[pix]:
for i in range(3):
self._led.buffer[3*pix + i] ^= ledcopy.buffer[3*pix + i]
elif self._led.pixheights[pix] < ledcopy.driver[0].pixheights[pix]:
for i in range(3):
self._led.buffer[3*pix + i] = ledcopy.buffer[3*pix + i]
self._led.pixheights[pix] = ledcopy.driver[0].pixheights[pix]
self._step += 1
def run(self, amt = 1, fps=None, sleep=None, max_steps = 0, untilComplete = False, max_cycles = 0, joinThread = False, callback=None):
#def run(self, amt = 1, fps=None, sleep=None, max_steps = 0, untilComplete = False, max_cycles = 0, threaded = True, joinThread = False, callback=None):
# self.fps = fps
# self.untilComplete = untilComplete
super(MasterAnimation, self).run(amt = 1, fps=fps, sleep=None, max_steps = 0, untilComplete = untilComplete, max_cycles = 0, threaded = True, joinThread = joinThread, callback=callback)
class Worm(BaseStripAnim):
"""
colors a list the worm segment (starting with head) colors
path a list of the LED indices over which the worm will travel
cyclelen controls speed, worm movement only when LED upload
cycles == 0 mod cyclelen
height (of worm segments) is same length as colors: higher
value worms segments go over top of lower value worms
"""
def __init__(self, led, colors, path, cyclelen, direction=1,
height=None, start=0, end=-1):
super(Worm, self).__init__(led, start, end)
if height is None:
height = [0]*len(colors)
elif type(height) == int:
height = [height]*len(colors)
self._colors = colors
self._colors.append((0, 0, 0)) # add blank seqment to end worm
self._path = path
self._cyclelen = cyclelen
self._height = height
self._height.append(-1) # add lowest value for height
self._activecount = 0
self._direction = direction
self._headposition = -self._direction
#print self._colors
#print self._height
def step(self, amt=1):
if self._activecount == 0:
self._headposition += amt*self._direction
self._headposition %= len(self._path)
# Put worm into strip and blank end
segpos = self._headposition
for x in range(len(self._colors)):
if True: #self._height[x] >= LEDsegheights[self._path[segpos]]: # or x == len(self.colors) - 1:
#if self._height[x] >= self._led.driver[0].pixheights[self._path[segpos]]: # or x == len(self.colors) - 1:
self._led.set(self._path[segpos], self._colors[x])
self._led.driver[0].pixheights[self._path[segpos]] = self._height[x]
segpos -= self._direction
segpos %= len(self._path)
self._activecount += amt
self._activecount %= self._cyclelen
self._step += amt
def pathgen(nleft=0, nright=15, nbot=0, ntop=9, shift=0, turns=10, rounds=16):
"""
A path around a rectangle from strip wound helically
10 turns high by 16 round.
rounds * turns must be number of pixels on strip
nleft and nright is from 0 to rounds-1,
nbot and ntop from 0 to turns-1
"""
def ind(x, y):
return x + y * rounds
assert 0 <= nleft <= nright -1 <= rounds and 0 <= nbot <= ntop -1 <= turns
nled = rounds*turns
sleft = range(ind(nleft, nbot), ind(nleft, ntop), rounds)
tp = range(ind(nleft, ntop), ind(nright, ntop), 1)
sright = range(ind(nright, ntop), ind(nright, nbot), -rounds)
bt = range(ind(nright, nbot), ind(nleft, nbot), -1)
path = sleft+tp+sright+bt
if len(path) == 0:
path = [ind(nleft, nbot)]
path = map(lambda x: (shift+x) % nled, path)
log.logger.info("pathgen({}, {}, {}, {}, {}) is {}".format(nleft, nright, nbot, ntop, shift, path))
return path
if True: #__name__ == '__main__':
drivermaster = DriverVisualizer(160, pixelSize=62, stayTop=False, maxWindowWidth=1024)
# using pixelSize 62 and changed code of visualizer.py to have maxWindowWidth=1024
#drivermaster = DriverVisualizer(160, pixelSize=31, stayTop=False)
#ledmaster = LEDStrip(drivermaster, threadedUpdate=True)
ledmaster = LEDStrip(drivermaster)
lnin = [255, 222, 200, 150, 125]
bluedimming = [(0, 0, i) for i in lnin]
bluedimming = [(0, 0, 0) for i in lnin]
reddimming = [(i, 0, 0) for i in lnin]
greendimming = [(0, i, 0) for i in lnin]
cyandimming = [(0, i, i) for i in lnin]
whitedimming = [(i, i, i) for i in lnin]
# Worm arguments
wormblue = (bluedimming, pathgen(5, 10, 0, 9), 1, 1, 6)
wormred = (reddimming, pathgen(1, 14, 1, 8), 1, 1, 2)
wormgreen = (greendimming, pathgen(2, 13, 2, 7), 1, 1, 3)
wormcyan = (cyandimming, pathgen(3, 12, 3, 6), 1, 1, 4)
wormwhite = (whitedimming, pathgen(4, 11, 4, 5), 1, 1, 5)
# List of pair (animation arguments, fps)
wormdatalist = [(wormblue, 24), (wormred, 20), (wormgreen, 16), (wormcyan, 12), (wormwhite, 8)]
#wormdatalist = [(wormwhite, 8)]
#wormdatalist = []
# dummy strips must each have their own slavedriver as thread is attached
# to the driver
ledslaves = [LEDStrip(DriverSlave(160, pixheights=-1), threadedUpdate=True) for _ in range(len(wormdatalist))]
# Make the Worm animations an list pairs (animation, fps)
wormlist = [(Worm(ledslaves[i], *d[0]), d[1]) for i, d in enumerate(wormdatalist)]
ledslaveb = LEDMatrix(DriverSlave(160, None, 0), width=16, height=10, threadedUpdate=True)
bloom = BA.Bloom(ledslaveb)
wormlist.append((bloom, 10))
#masteranimation = MasterAnimation(ledmaster, [w._led for w, f in wormlist])
masteranimation = MasterAnimation(ledmaster, wormlist)
starttime = time.time()
runtime = 1
# Master steps when it gets a go ahdead signal from one of the
# concurrent annimations
masteranimation.run(fps=None) # if give fps for master will skip faster frames
# Run all the slave animations and master threaded
# The slave animations update their buffers at the correct
# time and rather than update, just signal the master they
# are ready to be combined and sent to the actual leds
for w, f in wormlist:
w.run(fps=f, max_steps=runtime * f, threaded = True)
#print threading.enumerate()
print "THREADS: " + ",".join([re.sub('<class |,|bibliopixel.\w*.|>', '', str(s.__class__)) for s in threading.enumerate()])
# idle and threaded animations will run jointly
while not all([w.stopped() for w, f in wormlist]):
pass
# stop the master
masteranimation.stopThread(True) # need True
print "Master Animation Step Count {}".format(masteranimation._step)
ledmaster.waitForUpdate()
ledmaster.stopUpdateThreads()
[w._led.stopUpdateThreads() for w, f in wormlist]
print "THREADS: " + ",".join([re.sub('<class |,|bibliopixel.\w*.|>', '', str(s.__class__)) for s in threading.enumerate()])
plt.clf()
col = 'brgcwk'
[plt.plot(masteranimation.timedata[i], [i] * len(masteranimation.timedata[i]), col[i%6]+'o') for i in range(len(wormlist))]
ax = plt.axis()
delx = .01 * (ax[1] - ax[0])
plt.axis([ax[0]-delx, ax[1]+delx, ax[2]-1, ax[3]+1])
| self.animComplete = True
print 'breaking out'
break | conditional_block |
ConcurrentAnimations.py | #!/usr/bin/env python
"""
Use version of DriverSlave that has pixmap and pixheights
"""
import threading
# import base classes and driver
from bibliopixel import LEDStrip, LEDMatrix
# from bibliopixel.drivers.LPD8806 import DriverLPD8806, ChannelOrder
from bibliopixel.drivers.visualizer import DriverVisualizer, ChannelOrder
from bibliopixel.drivers.slave_driver import DriverSlave
# import colors
import bibliopixel.colors
from bibliopixel.animation import BaseStripAnim
from logging import DEBUG, INFO, WARNING, CRITICAL, ERROR
from bibliopixel import log
log.setLogLevel(WARNING)
import re
import time
from operator import or_, ior, ixor
import matplotlib.pyplot as plt
import BiblioPixelAnimations.matrix.bloom as BA
class MasterAnimation(BaseStripAnim):
"""
Takes copies of fake leds, combines using heights and mixing to fill and update
a led
NEED now ledcopies is list of the leds associated with each animation
NEED also mapping of the leds into master led (i.e. path list)
NEED also height of each animations and merging method if same height
"""
def __init__(self, led, animcopies, start=0, end=-1):
super(MasterAnimation, self).__init__(led, start, end)
if not isinstance(animcopies, list):
animcopies = [animcopies]
self._animcopies = animcopies
self._ledcopies = [a._led for a, f in animcopies]
self._idlelist = []
self.timedata = [[] for _ in range(len(self._ledcopies))] # [[]] * 5 NOT define 5 different lists!
self._led.pixheights = [0] * self._led.numLEDs
# def preRun(self, amt=1):
# self._led.all_off()
# for w, f in self._animcopies:
# w.run(fps=f, max_steps=runtime * f, threaded = True)
def preStep(self, amt=1):
#print 'prestep {}'.format(self._step)
# only step the master thread when something from ledcopies
# has been done i.e. its event _wait must be false (I THINK)
# TODO is this good code???? or is there a better way to block
self._idlelist = [True] # to insure goes thru while loop at least once
while all(self._idlelist):
self._idlelist = [not ledcopy.driver[0]._updatenow.isSet() for ledcopy in self._ledcopies]
if self._stopEvent.isSet():
self.animComplete = True
print 'breaking out'
break
#
def postStep(self, amt=1):
# clear the ones found in preStep
activewormind = [i for i, x in enumerate(self._idlelist) if x == False]
[self._ledcopies[i].driver[0]._updatenow.clear() for i in activewormind]
def step(self, amt=1):
"""
combines the buffers from the slave led's
which then gets sent to led via update
"""
# For checking if all the animations have their framse looked at
#activewormind = [i for i, x in enumerate(self._idlelist) if x == False]
#print "Worm {} at {:5g}".format(activewormind, 1000*(time.time() - starttime))
# save times activated for each worm
[self.timedata[i].append(1000*(time.time() - starttime)) for i, x in enumerate(self._idlelist) if x == False]
#self._led.buffer = [0] * 480
self._led.pixheights = [-100] * self._led.numLEDs
#print type(self._led.buffer)
for ledcopy in self._ledcopies:
# self._led.buffer = map(ixor, self._led.buffer, ledcopy.buffer)
# use pixheights but assume all buffers same size
# print ledcopy.driver[0].pixheights
for pix in range(self._led.numLEDs):
#for ledcopy in self._ledcopies:
if self._led.pixheights[pix] == ledcopy.driver[0].pixheights[pix]:
for i in range(3):
self._led.buffer[3*pix + i] ^= ledcopy.buffer[3*pix + i]
elif self._led.pixheights[pix] < ledcopy.driver[0].pixheights[pix]:
for i in range(3):
self._led.buffer[3*pix + i] = ledcopy.buffer[3*pix + i]
self._led.pixheights[pix] = ledcopy.driver[0].pixheights[pix]
self._step += 1
def run(self, amt = 1, fps=None, sleep=None, max_steps = 0, untilComplete = False, max_cycles = 0, joinThread = False, callback=None):
#def run(self, amt = 1, fps=None, sleep=None, max_steps = 0, untilComplete = False, max_cycles = 0, threaded = True, joinThread = False, callback=None):
# self.fps = fps
# self.untilComplete = untilComplete
super(MasterAnimation, self).run(amt = 1, fps=fps, sleep=None, max_steps = 0, untilComplete = untilComplete, max_cycles = 0, threaded = True, joinThread = joinThread, callback=callback)
class Worm(BaseStripAnim):
"""
colors a list the worm segment (starting with head) colors
path a list of the LED indices over which the worm will travel
cyclelen controls speed, worm movement only when LED upload
cycles == 0 mod cyclelen
height (of worm segments) is same length as colors: higher
value worms segments go over top of lower value worms
"""
def __init__(self, led, colors, path, cyclelen, direction=1,
height=None, start=0, end=-1):
super(Worm, self).__init__(led, start, end)
if height is None:
height = [0]*len(colors)
elif type(height) == int:
height = [height]*len(colors)
self._colors = colors
self._colors.append((0, 0, 0)) # add blank seqment to end worm
self._path = path
self._cyclelen = cyclelen
self._height = height
self._height.append(-1) # add lowest value for height
self._activecount = 0
self._direction = direction
self._headposition = -self._direction
#print self._colors
#print self._height
def step(self, amt=1):
|
def pathgen(nleft=0, nright=15, nbot=0, ntop=9, shift=0, turns=10, rounds=16):
"""
A path around a rectangle from strip wound helically
10 turns high by 16 round.
rounds * turns must be number of pixels on strip
nleft and nright is from 0 to rounds-1,
nbot and ntop from 0 to turns-1
"""
def ind(x, y):
return x + y * rounds
assert 0 <= nleft <= nright -1 <= rounds and 0 <= nbot <= ntop -1 <= turns
nled = rounds*turns
sleft = range(ind(nleft, nbot), ind(nleft, ntop), rounds)
tp = range(ind(nleft, ntop), ind(nright, ntop), 1)
sright = range(ind(nright, ntop), ind(nright, nbot), -rounds)
bt = range(ind(nright, nbot), ind(nleft, nbot), -1)
path = sleft+tp+sright+bt
if len(path) == 0:
path = [ind(nleft, nbot)]
path = map(lambda x: (shift+x) % nled, path)
log.logger.info("pathgen({}, {}, {}, {}, {}) is {}".format(nleft, nright, nbot, ntop, shift, path))
return path
if True: #__name__ == '__main__':
drivermaster = DriverVisualizer(160, pixelSize=62, stayTop=False, maxWindowWidth=1024)
# using pixelSize 62 and changed code of visualizer.py to have maxWindowWidth=1024
#drivermaster = DriverVisualizer(160, pixelSize=31, stayTop=False)
#ledmaster = LEDStrip(drivermaster, threadedUpdate=True)
ledmaster = LEDStrip(drivermaster)
lnin = [255, 222, 200, 150, 125]
bluedimming = [(0, 0, i) for i in lnin]
bluedimming = [(0, 0, 0) for i in lnin]
reddimming = [(i, 0, 0) for i in lnin]
greendimming = [(0, i, 0) for i in lnin]
cyandimming = [(0, i, i) for i in lnin]
whitedimming = [(i, i, i) for i in lnin]
# Worm arguments
wormblue = (bluedimming, pathgen(5, 10, 0, 9), 1, 1, 6)
wormred = (reddimming, pathgen(1, 14, 1, 8), 1, 1, 2)
wormgreen = (greendimming, pathgen(2, 13, 2, 7), 1, 1, 3)
wormcyan = (cyandimming, pathgen(3, 12, 3, 6), 1, 1, 4)
wormwhite = (whitedimming, pathgen(4, 11, 4, 5), 1, 1, 5)
# List of pair (animation arguments, fps)
wormdatalist = [(wormblue, 24), (wormred, 20), (wormgreen, 16), (wormcyan, 12), (wormwhite, 8)]
#wormdatalist = [(wormwhite, 8)]
#wormdatalist = []
# dummy strips must each have their own slavedriver as thread is attached
# to the driver
ledslaves = [LEDStrip(DriverSlave(160, pixheights=-1), threadedUpdate=True) for _ in range(len(wormdatalist))]
# Make the Worm animations an list pairs (animation, fps)
wormlist = [(Worm(ledslaves[i], *d[0]), d[1]) for i, d in enumerate(wormdatalist)]
ledslaveb = LEDMatrix(DriverSlave(160, None, 0), width=16, height=10, threadedUpdate=True)
bloom = BA.Bloom(ledslaveb)
wormlist.append((bloom, 10))
#masteranimation = MasterAnimation(ledmaster, [w._led for w, f in wormlist])
masteranimation = MasterAnimation(ledmaster, wormlist)
starttime = time.time()
runtime = 1
# Master steps when it gets a go ahdead signal from one of the
# concurrent annimations
masteranimation.run(fps=None) # if give fps for master will skip faster frames
# Run all the slave animations and master threaded
# The slave animations update their buffers at the correct
# time and rather than update, just signal the master they
# are ready to be combined and sent to the actual leds
for w, f in wormlist:
w.run(fps=f, max_steps=runtime * f, threaded = True)
#print threading.enumerate()
print "THREADS: " + ",".join([re.sub('<class |,|bibliopixel.\w*.|>', '', str(s.__class__)) for s in threading.enumerate()])
# idle and threaded animations will run jointly
while not all([w.stopped() for w, f in wormlist]):
pass
# stop the master
masteranimation.stopThread(True) # need True
print "Master Animation Step Count {}".format(masteranimation._step)
ledmaster.waitForUpdate()
ledmaster.stopUpdateThreads()
[w._led.stopUpdateThreads() for w, f in wormlist]
print "THREADS: " + ",".join([re.sub('<class |,|bibliopixel.\w*.|>', '', str(s.__class__)) for s in threading.enumerate()])
plt.clf()
col = 'brgcwk'
[plt.plot(masteranimation.timedata[i], [i] * len(masteranimation.timedata[i]), col[i%6]+'o') for i in range(len(wormlist))]
ax = plt.axis()
delx = .01 * (ax[1] - ax[0])
plt.axis([ax[0]-delx, ax[1]+delx, ax[2]-1, ax[3]+1])
| if self._activecount == 0:
self._headposition += amt*self._direction
self._headposition %= len(self._path)
# Put worm into strip and blank end
segpos = self._headposition
for x in range(len(self._colors)):
if True: #self._height[x] >= LEDsegheights[self._path[segpos]]: # or x == len(self.colors) - 1:
#if self._height[x] >= self._led.driver[0].pixheights[self._path[segpos]]: # or x == len(self.colors) - 1:
self._led.set(self._path[segpos], self._colors[x])
self._led.driver[0].pixheights[self._path[segpos]] = self._height[x]
segpos -= self._direction
segpos %= len(self._path)
self._activecount += amt
self._activecount %= self._cyclelen
self._step += amt | identifier_body |
resource_ldap_object_attributes.go | package provider
import (
"fmt"
"strings"
"github.com/go-ldap/ldap/v3"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/trevex/terraform-provider-ldap/util"
)
func resourceLDAPObjectAttributes() *schema.Resource {
return &schema.Resource{
Create: resourceLDAPObjectAttributesCreate,
Read: resourceLDAPObjectAttributesRead,
Update: resourceLDAPObjectAttributesUpdate,
Delete: resourceLDAPObjectAttributesDelete,
Description: "The `ldap_object_attributes`-resource owns only specific attributes of an object. In case of multi-valued attributes the resource only owns the values defined by the resource and all pre-existing ones or ones added by other means are left in-tact.",
Schema: map[string]*schema.Schema{
"dn": {
Type: schema.TypeString,
Description: "The Distinguished Name (DN) of the object, as the concatenation of its RDN (unique among siblings) and its parent's DN. The referenced object should exist to be able to add attributes.",
Required: true,
ForceNew: true,
},
"attributes": {
Type: schema.TypeSet,
Description: "The map of attributes to add to the referenced object; each attribute can be multi-valued.",
Set: attributeHash,
MinItems: 0,
Elem: &schema.Schema{
Type: schema.TypeMap,
Description: "The list of values for a given attribute.",
MinItems: 1,
MaxItems: 1,
Elem: &schema.Schema{
Type: schema.TypeString,
Description: "The individual value for the given attribute.",
},
},
Optional: true,
},
},
}
}
func resourceLDAPObjectAttributesCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ldap.Conn)
dn := d.Get("dn").(string)
debugLog("ldap_object_attributes::create - adding attributes to object %q", dn)
request := ldap.NewModifyRequest(dn, []ldap.Control{})
// if there is a non empty list of attributes, loop though it and
// create a new map collecting attribute names and its value(s); we need to
// do this because we could not model the attributes as a map[string][]string
// due to an appareent limitation in HCL; we have a []map[string]string, so
// we loop through the list and accumulate values when they share the same
// key, then we use these as attributes in the LDAP client.
if v, ok := d.GetOk("attributes"); ok {
attributes := v.(*schema.Set).List()
if len(attributes) > 0 {
debugLog("ldap_object_attributes::create - object %q updated with %d additional attributes", dn, len(attributes))
m := make(map[string][]string)
for _, attribute := range attributes {
debugLog("ldap_object_attributes::create - %q has attribute of type %T", dn, attribute)
// each map should only have one entry (see resource declaration)
for name, value := range attribute.(map[string]interface{}) {
debugLog("ldap_object_attributes::create - %q has attribute[%v] => %v (%T)", dn, name, value, value)
v := toAttributeValue(name, value.(string))
m[name] = append(m[name], v)
}
}
// now loop through the map and add attributes with theys value(s)
for name, values := range m {
request.Add(name, values)
}
}
}
err := client.Modify(request)
if err != nil {
return err
}
debugLog("ldap_object_attributes::create - object %q updated with additional attributes", dn)
return resourceLDAPObjectAttributesRead(d, meta)
}
func resourceLDAPObjectAttributesRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ldap.Conn)
dn := d.Get("dn").(string)
debugLog("ldap_object_attributes::read - looking for object %q", dn)
// when searching by DN, you don't need t specify the base DN a search
// filter a "subtree" scope: just put the DN (i.e. the primary key) as the
// base DN with a "base object" scope, and the returned object will be the
// entry, if it exists
request := ldap.NewSearchRequest(
dn,
ldap.ScopeBaseObject,
ldap.NeverDerefAliases,
0,
0,
false,
"(objectclass=*)",
[]string{"*"},
nil,
)
sr, err := client.Search(request)
if err != nil {
if err, ok := err.(*ldap.Error); ok {
if err.ResultCode == 32 { // no such object
warnLog("ldap_object_attributes::read - object not found, removing %q from state because it no longer exists in LDAP", dn)
d.SetId("")
}
}
debugLog("ldap_object_attributes::read - lookup for %q returned an error %v", dn, err)
return err
}
debugLog("ldap_object_attributes::read - query for %q returned %v", dn, sr)
// Let's transform the attributes from LDAP into a set that we can intersect
// with our resources sets.
ldapSet := &schema.Set{
F: attributeHash,
}
for _, attribute := range sr.Entries[0].Attributes {
debugLog("ldap_object_attributes::read - adding attribute %q to %q (%d values)", attribute.Name, dn, len(attribute.Values))
if len(attribute.Values) == 1 {
// we don't treat the RDN as an ordinary attribute
a := fmt.Sprintf("%s=%s", attribute.Name, attribute.Values[0])
if strings.HasPrefix(dn, a) {
debugLog("ldap_object_attributes::read - skipping RDN %q of %q", a, dn)
continue
}
}
// now add each value as an individual entry into the object, because
// we do not handle name => []values, and we have a set of maps each
// holding a single entry name => value; multiple maps may share the
// same key.
for _, value := range attribute.Values {
debugLog("ldap_object_attributes::read - for %q from ldap, setting %q => %q", dn, attribute.Name, value)
ldapSet.Add(map[string]interface{}{
attribute.Name: value,
})
}
}
debugLog("ldap_object_attributes::read - attributes from ldap of %q => %v", dn, ldapSet.List())
// We are both interested in the attributes before and after changes, so
// depending on what is available, let's compute the union
var (
oldSet *schema.Set
newSet *schema.Set
unionSet *schema.Set
)
if d.HasChange("attributes") {
prev, next := d.GetChange("attributes")
oldSet = prev.(*schema.Set)
newSet = next.(*schema.Set)
} else {
newSet = d.Get("attributes").(*schema.Set)
}
debugLog("ldap_object_attributes::read - newSet of %q => %v", dn, newSet.List())
if oldSet != nil {
debugLog("ldap_object_attributes::read - oldSet of %q => %v", dn, oldSet.List())
unionSet = oldSet.Union(newSet)
} else {
unionSet = newSet
}
debugLog("ldap_object_attributes::read - union of %q => %v", dn, unionSet.List())
// Now that we both have union of relevant terraform states and ldap, let's
// get the intersection and set it.
set := unionSet.Intersection(ldapSet)
debugLog("ldap_object_attributes::read - intersection with ldap of %q => %v", dn, set.List())
// If the set is empty the attributes do not exist, yet.
if set.Len() == 0 {
d.SetId("")
return nil
}
// The set contains values, let's set them and indicate that the object
// exists by setting the id as well.
if err := d.Set("attributes", set); err != nil {
warnLog("ldap_object_attributes::read - error setting attributes for %q : %v", dn, err)
return err
}
d.SetId(dn)
return nil
}
func resourceLDAPObjectAttributesUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ldap.Conn)
dn := d.Get("dn").(string)
debugLog("ldap_object_attributes::update - performing update on %q", dn)
modify := ldap.NewModifyRequest(dn, []ldap.Control{})
if d.HasChange("attributes") {
o, n := d.GetChange("attributes")
debugLog("ldap_object_attributes::update - \n%s", printAttributes("old attributes map", o))
debugLog("ldap_object_attributes::update - \n%s", printAttributes("new attributes map", n))
err := computeAndAddAttributeDeltas(modify, o.(*schema.Set), n.(*schema.Set))
if err != nil {
return err
}
}
if len(modify.Changes) > 0 {
err := client.Modify(modify)
if err != nil |
} else {
warnLog("ldap_object_attributes::update - didn't actually make changes to %q because there were no changes requested", dn)
}
return resourceLDAPObjectAttributesRead(d, meta)
}
func resourceLDAPObjectAttributesDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ldap.Conn)
dn := d.Get("dn").(string)
debugLog("ldap_object_attributes::delete - removing attributes from %q", dn)
modify := ldap.NewModifyRequest(dn, []ldap.Control{})
err := computeAndAddAttributeDeltas(modify, d.Get("attributes").(*schema.Set), &schema.Set{
F: attributeHash,
})
if err != nil {
return err
}
if len(modify.Changes) > 0 {
err := client.Modify(modify)
if err != nil {
errorLog("ldap_object_attributes::delete - error modifying LDAP object %q with values %v", d.Id(), err)
return err
}
} else {
warnLog("ldap_object_attributes::delete - didn't actually make changes to %q because there were no changes requested", dn)
}
debugLog("ldap_object::delete - %q removed", dn)
return nil
}
func computeAndAddAttributeDeltas(modify *ldap.ModifyRequest, os, ns *schema.Set) error {
ra := os.Difference(ns) // removed attributes
rk := util.NewSet() // names of removed attributes
for _, v := range ra.List() {
for k := range v.(map[string]interface{}) {
rk.Add(k)
}
}
aa := ns.Difference(os) // added attributes
ak := util.NewSet() // names of added attributes
for _, v := range aa.List() {
for k := range v.(map[string]interface{}) {
ak.Add(k)
}
}
// loop over remove attributes' names
for _, k := range rk.List() {
values := []string{}
for _, m := range ra.List() {
for mk, mv := range m.(map[string]interface{}) {
if k == mk {
v := toAttributeValue(k, mv.(string))
values = append(values, v)
}
}
}
modify.Delete(k, values)
debugLog("ldap_object_attributes::deltas - removing attribute %q with values %v", k, values)
}
for _, k := range ak.List() {
values := []string{}
for _, m := range aa.List() {
for mk, mv := range m.(map[string]interface{}) {
if k == mk {
v := toAttributeValue(k, mv.(string))
values = append(values, v)
}
}
}
modify.Add(k, values)
debugLog("ldap_object_attributes::deltas - adding new attribute %q with values %v", k, values)
}
return nil
}
| {
errorLog("ldap_object_attributes::update - error modifying LDAP object %q with values %v", d.Id(), err)
return err
} | conditional_block |
resource_ldap_object_attributes.go | package provider
import (
"fmt"
"strings"
"github.com/go-ldap/ldap/v3"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/trevex/terraform-provider-ldap/util"
)
func resourceLDAPObjectAttributes() *schema.Resource {
return &schema.Resource{
Create: resourceLDAPObjectAttributesCreate,
Read: resourceLDAPObjectAttributesRead,
Update: resourceLDAPObjectAttributesUpdate,
Delete: resourceLDAPObjectAttributesDelete,
Description: "The `ldap_object_attributes`-resource owns only specific attributes of an object. In case of multi-valued attributes the resource only owns the values defined by the resource and all pre-existing ones or ones added by other means are left in-tact.",
Schema: map[string]*schema.Schema{
"dn": {
Type: schema.TypeString,
Description: "The Distinguished Name (DN) of the object, as the concatenation of its RDN (unique among siblings) and its parent's DN. The referenced object should exist to be able to add attributes.",
Required: true,
ForceNew: true,
},
"attributes": {
Type: schema.TypeSet,
Description: "The map of attributes to add to the referenced object; each attribute can be multi-valued.",
Set: attributeHash,
MinItems: 0,
Elem: &schema.Schema{
Type: schema.TypeMap,
Description: "The list of values for a given attribute.",
MinItems: 1,
MaxItems: 1,
Elem: &schema.Schema{
Type: schema.TypeString,
Description: "The individual value for the given attribute.",
},
},
Optional: true,
},
},
}
}
func resourceLDAPObjectAttributesCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ldap.Conn)
dn := d.Get("dn").(string)
debugLog("ldap_object_attributes::create - adding attributes to object %q", dn)
request := ldap.NewModifyRequest(dn, []ldap.Control{})
// if there is a non empty list of attributes, loop though it and
// create a new map collecting attribute names and its value(s); we need to
// do this because we could not model the attributes as a map[string][]string
// due to an appareent limitation in HCL; we have a []map[string]string, so
// we loop through the list and accumulate values when they share the same
// key, then we use these as attributes in the LDAP client.
if v, ok := d.GetOk("attributes"); ok {
attributes := v.(*schema.Set).List()
if len(attributes) > 0 {
debugLog("ldap_object_attributes::create - object %q updated with %d additional attributes", dn, len(attributes))
m := make(map[string][]string)
for _, attribute := range attributes {
debugLog("ldap_object_attributes::create - %q has attribute of type %T", dn, attribute)
// each map should only have one entry (see resource declaration)
for name, value := range attribute.(map[string]interface{}) {
debugLog("ldap_object_attributes::create - %q has attribute[%v] => %v (%T)", dn, name, value, value)
v := toAttributeValue(name, value.(string))
m[name] = append(m[name], v)
}
}
// now loop through the map and add attributes with theys value(s)
for name, values := range m {
request.Add(name, values)
}
}
}
err := client.Modify(request)
if err != nil {
return err
}
debugLog("ldap_object_attributes::create - object %q updated with additional attributes", dn)
return resourceLDAPObjectAttributesRead(d, meta)
}
func resourceLDAPObjectAttributesRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ldap.Conn)
dn := d.Get("dn").(string)
debugLog("ldap_object_attributes::read - looking for object %q", dn)
// when searching by DN, you don't need t specify the base DN a search
// filter a "subtree" scope: just put the DN (i.e. the primary key) as the
// base DN with a "base object" scope, and the returned object will be the
// entry, if it exists
request := ldap.NewSearchRequest(
dn,
ldap.ScopeBaseObject,
ldap.NeverDerefAliases,
0,
0,
false,
"(objectclass=*)",
[]string{"*"},
nil,
)
sr, err := client.Search(request)
if err != nil {
if err, ok := err.(*ldap.Error); ok {
if err.ResultCode == 32 { // no such object
warnLog("ldap_object_attributes::read - object not found, removing %q from state because it no longer exists in LDAP", dn)
d.SetId("")
}
}
debugLog("ldap_object_attributes::read - lookup for %q returned an error %v", dn, err)
return err
}
debugLog("ldap_object_attributes::read - query for %q returned %v", dn, sr)
// Let's transform the attributes from LDAP into a set that we can intersect
// with our resources sets.
ldapSet := &schema.Set{
F: attributeHash,
}
for _, attribute := range sr.Entries[0].Attributes {
debugLog("ldap_object_attributes::read - adding attribute %q to %q (%d values)", attribute.Name, dn, len(attribute.Values))
if len(attribute.Values) == 1 {
// we don't treat the RDN as an ordinary attribute
a := fmt.Sprintf("%s=%s", attribute.Name, attribute.Values[0])
if strings.HasPrefix(dn, a) {
debugLog("ldap_object_attributes::read - skipping RDN %q of %q", a, dn)
continue
}
}
// now add each value as an individual entry into the object, because
// we do not handle name => []values, and we have a set of maps each
// holding a single entry name => value; multiple maps may share the
// same key.
for _, value := range attribute.Values {
debugLog("ldap_object_attributes::read - for %q from ldap, setting %q => %q", dn, attribute.Name, value)
ldapSet.Add(map[string]interface{}{
attribute.Name: value,
})
}
}
debugLog("ldap_object_attributes::read - attributes from ldap of %q => %v", dn, ldapSet.List())
// We are both interested in the attributes before and after changes, so
// depending on what is available, let's compute the union
var (
oldSet *schema.Set
newSet *schema.Set
unionSet *schema.Set
)
if d.HasChange("attributes") {
prev, next := d.GetChange("attributes")
oldSet = prev.(*schema.Set)
newSet = next.(*schema.Set)
} else {
newSet = d.Get("attributes").(*schema.Set)
}
debugLog("ldap_object_attributes::read - newSet of %q => %v", dn, newSet.List())
if oldSet != nil {
debugLog("ldap_object_attributes::read - oldSet of %q => %v", dn, oldSet.List())
unionSet = oldSet.Union(newSet)
} else {
unionSet = newSet
}
debugLog("ldap_object_attributes::read - union of %q => %v", dn, unionSet.List())
// Now that we both have union of relevant terraform states and ldap, let's
// get the intersection and set it. | debugLog("ldap_object_attributes::read - intersection with ldap of %q => %v", dn, set.List())
// If the set is empty the attributes do not exist, yet.
if set.Len() == 0 {
d.SetId("")
return nil
}
// The set contains values, let's set them and indicate that the object
// exists by setting the id as well.
if err := d.Set("attributes", set); err != nil {
warnLog("ldap_object_attributes::read - error setting attributes for %q : %v", dn, err)
return err
}
d.SetId(dn)
return nil
}
func resourceLDAPObjectAttributesUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ldap.Conn)
dn := d.Get("dn").(string)
debugLog("ldap_object_attributes::update - performing update on %q", dn)
modify := ldap.NewModifyRequest(dn, []ldap.Control{})
if d.HasChange("attributes") {
o, n := d.GetChange("attributes")
debugLog("ldap_object_attributes::update - \n%s", printAttributes("old attributes map", o))
debugLog("ldap_object_attributes::update - \n%s", printAttributes("new attributes map", n))
err := computeAndAddAttributeDeltas(modify, o.(*schema.Set), n.(*schema.Set))
if err != nil {
return err
}
}
if len(modify.Changes) > 0 {
err := client.Modify(modify)
if err != nil {
errorLog("ldap_object_attributes::update - error modifying LDAP object %q with values %v", d.Id(), err)
return err
}
} else {
warnLog("ldap_object_attributes::update - didn't actually make changes to %q because there were no changes requested", dn)
}
return resourceLDAPObjectAttributesRead(d, meta)
}
func resourceLDAPObjectAttributesDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ldap.Conn)
dn := d.Get("dn").(string)
debugLog("ldap_object_attributes::delete - removing attributes from %q", dn)
modify := ldap.NewModifyRequest(dn, []ldap.Control{})
err := computeAndAddAttributeDeltas(modify, d.Get("attributes").(*schema.Set), &schema.Set{
F: attributeHash,
})
if err != nil {
return err
}
if len(modify.Changes) > 0 {
err := client.Modify(modify)
if err != nil {
errorLog("ldap_object_attributes::delete - error modifying LDAP object %q with values %v", d.Id(), err)
return err
}
} else {
warnLog("ldap_object_attributes::delete - didn't actually make changes to %q because there were no changes requested", dn)
}
debugLog("ldap_object::delete - %q removed", dn)
return nil
}
func computeAndAddAttributeDeltas(modify *ldap.ModifyRequest, os, ns *schema.Set) error {
ra := os.Difference(ns) // removed attributes
rk := util.NewSet() // names of removed attributes
for _, v := range ra.List() {
for k := range v.(map[string]interface{}) {
rk.Add(k)
}
}
aa := ns.Difference(os) // added attributes
ak := util.NewSet() // names of added attributes
for _, v := range aa.List() {
for k := range v.(map[string]interface{}) {
ak.Add(k)
}
}
// loop over remove attributes' names
for _, k := range rk.List() {
values := []string{}
for _, m := range ra.List() {
for mk, mv := range m.(map[string]interface{}) {
if k == mk {
v := toAttributeValue(k, mv.(string))
values = append(values, v)
}
}
}
modify.Delete(k, values)
debugLog("ldap_object_attributes::deltas - removing attribute %q with values %v", k, values)
}
for _, k := range ak.List() {
values := []string{}
for _, m := range aa.List() {
for mk, mv := range m.(map[string]interface{}) {
if k == mk {
v := toAttributeValue(k, mv.(string))
values = append(values, v)
}
}
}
modify.Add(k, values)
debugLog("ldap_object_attributes::deltas - adding new attribute %q with values %v", k, values)
}
return nil
} | set := unionSet.Intersection(ldapSet) | random_line_split |
resource_ldap_object_attributes.go | package provider
import (
"fmt"
"strings"
"github.com/go-ldap/ldap/v3"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/trevex/terraform-provider-ldap/util"
)
func resourceLDAPObjectAttributes() *schema.Resource |
func resourceLDAPObjectAttributesCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ldap.Conn)
dn := d.Get("dn").(string)
debugLog("ldap_object_attributes::create - adding attributes to object %q", dn)
request := ldap.NewModifyRequest(dn, []ldap.Control{})
// if there is a non empty list of attributes, loop though it and
// create a new map collecting attribute names and its value(s); we need to
// do this because we could not model the attributes as a map[string][]string
// due to an appareent limitation in HCL; we have a []map[string]string, so
// we loop through the list and accumulate values when they share the same
// key, then we use these as attributes in the LDAP client.
if v, ok := d.GetOk("attributes"); ok {
attributes := v.(*schema.Set).List()
if len(attributes) > 0 {
debugLog("ldap_object_attributes::create - object %q updated with %d additional attributes", dn, len(attributes))
m := make(map[string][]string)
for _, attribute := range attributes {
debugLog("ldap_object_attributes::create - %q has attribute of type %T", dn, attribute)
// each map should only have one entry (see resource declaration)
for name, value := range attribute.(map[string]interface{}) {
debugLog("ldap_object_attributes::create - %q has attribute[%v] => %v (%T)", dn, name, value, value)
v := toAttributeValue(name, value.(string))
m[name] = append(m[name], v)
}
}
// now loop through the map and add attributes with theys value(s)
for name, values := range m {
request.Add(name, values)
}
}
}
err := client.Modify(request)
if err != nil {
return err
}
debugLog("ldap_object_attributes::create - object %q updated with additional attributes", dn)
return resourceLDAPObjectAttributesRead(d, meta)
}
func resourceLDAPObjectAttributesRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ldap.Conn)
dn := d.Get("dn").(string)
debugLog("ldap_object_attributes::read - looking for object %q", dn)
// when searching by DN, you don't need t specify the base DN a search
// filter a "subtree" scope: just put the DN (i.e. the primary key) as the
// base DN with a "base object" scope, and the returned object will be the
// entry, if it exists
request := ldap.NewSearchRequest(
dn,
ldap.ScopeBaseObject,
ldap.NeverDerefAliases,
0,
0,
false,
"(objectclass=*)",
[]string{"*"},
nil,
)
sr, err := client.Search(request)
if err != nil {
if err, ok := err.(*ldap.Error); ok {
if err.ResultCode == 32 { // no such object
warnLog("ldap_object_attributes::read - object not found, removing %q from state because it no longer exists in LDAP", dn)
d.SetId("")
}
}
debugLog("ldap_object_attributes::read - lookup for %q returned an error %v", dn, err)
return err
}
debugLog("ldap_object_attributes::read - query for %q returned %v", dn, sr)
// Let's transform the attributes from LDAP into a set that we can intersect
// with our resources sets.
ldapSet := &schema.Set{
F: attributeHash,
}
for _, attribute := range sr.Entries[0].Attributes {
debugLog("ldap_object_attributes::read - adding attribute %q to %q (%d values)", attribute.Name, dn, len(attribute.Values))
if len(attribute.Values) == 1 {
// we don't treat the RDN as an ordinary attribute
a := fmt.Sprintf("%s=%s", attribute.Name, attribute.Values[0])
if strings.HasPrefix(dn, a) {
debugLog("ldap_object_attributes::read - skipping RDN %q of %q", a, dn)
continue
}
}
// now add each value as an individual entry into the object, because
// we do not handle name => []values, and we have a set of maps each
// holding a single entry name => value; multiple maps may share the
// same key.
for _, value := range attribute.Values {
debugLog("ldap_object_attributes::read - for %q from ldap, setting %q => %q", dn, attribute.Name, value)
ldapSet.Add(map[string]interface{}{
attribute.Name: value,
})
}
}
debugLog("ldap_object_attributes::read - attributes from ldap of %q => %v", dn, ldapSet.List())
// We are both interested in the attributes before and after changes, so
// depending on what is available, let's compute the union
var (
oldSet *schema.Set
newSet *schema.Set
unionSet *schema.Set
)
if d.HasChange("attributes") {
prev, next := d.GetChange("attributes")
oldSet = prev.(*schema.Set)
newSet = next.(*schema.Set)
} else {
newSet = d.Get("attributes").(*schema.Set)
}
debugLog("ldap_object_attributes::read - newSet of %q => %v", dn, newSet.List())
if oldSet != nil {
debugLog("ldap_object_attributes::read - oldSet of %q => %v", dn, oldSet.List())
unionSet = oldSet.Union(newSet)
} else {
unionSet = newSet
}
debugLog("ldap_object_attributes::read - union of %q => %v", dn, unionSet.List())
// Now that we both have union of relevant terraform states and ldap, let's
// get the intersection and set it.
set := unionSet.Intersection(ldapSet)
debugLog("ldap_object_attributes::read - intersection with ldap of %q => %v", dn, set.List())
// If the set is empty the attributes do not exist, yet.
if set.Len() == 0 {
d.SetId("")
return nil
}
// The set contains values, let's set them and indicate that the object
// exists by setting the id as well.
if err := d.Set("attributes", set); err != nil {
warnLog("ldap_object_attributes::read - error setting attributes for %q : %v", dn, err)
return err
}
d.SetId(dn)
return nil
}
func resourceLDAPObjectAttributesUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ldap.Conn)
dn := d.Get("dn").(string)
debugLog("ldap_object_attributes::update - performing update on %q", dn)
modify := ldap.NewModifyRequest(dn, []ldap.Control{})
if d.HasChange("attributes") {
o, n := d.GetChange("attributes")
debugLog("ldap_object_attributes::update - \n%s", printAttributes("old attributes map", o))
debugLog("ldap_object_attributes::update - \n%s", printAttributes("new attributes map", n))
err := computeAndAddAttributeDeltas(modify, o.(*schema.Set), n.(*schema.Set))
if err != nil {
return err
}
}
if len(modify.Changes) > 0 {
err := client.Modify(modify)
if err != nil {
errorLog("ldap_object_attributes::update - error modifying LDAP object %q with values %v", d.Id(), err)
return err
}
} else {
warnLog("ldap_object_attributes::update - didn't actually make changes to %q because there were no changes requested", dn)
}
return resourceLDAPObjectAttributesRead(d, meta)
}
func resourceLDAPObjectAttributesDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ldap.Conn)
dn := d.Get("dn").(string)
debugLog("ldap_object_attributes::delete - removing attributes from %q", dn)
modify := ldap.NewModifyRequest(dn, []ldap.Control{})
err := computeAndAddAttributeDeltas(modify, d.Get("attributes").(*schema.Set), &schema.Set{
F: attributeHash,
})
if err != nil {
return err
}
if len(modify.Changes) > 0 {
err := client.Modify(modify)
if err != nil {
errorLog("ldap_object_attributes::delete - error modifying LDAP object %q with values %v", d.Id(), err)
return err
}
} else {
warnLog("ldap_object_attributes::delete - didn't actually make changes to %q because there were no changes requested", dn)
}
debugLog("ldap_object::delete - %q removed", dn)
return nil
}
func computeAndAddAttributeDeltas(modify *ldap.ModifyRequest, os, ns *schema.Set) error {
ra := os.Difference(ns) // removed attributes
rk := util.NewSet() // names of removed attributes
for _, v := range ra.List() {
for k := range v.(map[string]interface{}) {
rk.Add(k)
}
}
aa := ns.Difference(os) // added attributes
ak := util.NewSet() // names of added attributes
for _, v := range aa.List() {
for k := range v.(map[string]interface{}) {
ak.Add(k)
}
}
// loop over remove attributes' names
for _, k := range rk.List() {
values := []string{}
for _, m := range ra.List() {
for mk, mv := range m.(map[string]interface{}) {
if k == mk {
v := toAttributeValue(k, mv.(string))
values = append(values, v)
}
}
}
modify.Delete(k, values)
debugLog("ldap_object_attributes::deltas - removing attribute %q with values %v", k, values)
}
for _, k := range ak.List() {
values := []string{}
for _, m := range aa.List() {
for mk, mv := range m.(map[string]interface{}) {
if k == mk {
v := toAttributeValue(k, mv.(string))
values = append(values, v)
}
}
}
modify.Add(k, values)
debugLog("ldap_object_attributes::deltas - adding new attribute %q with values %v", k, values)
}
return nil
}
| {
return &schema.Resource{
Create: resourceLDAPObjectAttributesCreate,
Read: resourceLDAPObjectAttributesRead,
Update: resourceLDAPObjectAttributesUpdate,
Delete: resourceLDAPObjectAttributesDelete,
Description: "The `ldap_object_attributes`-resource owns only specific attributes of an object. In case of multi-valued attributes the resource only owns the values defined by the resource and all pre-existing ones or ones added by other means are left in-tact.",
Schema: map[string]*schema.Schema{
"dn": {
Type: schema.TypeString,
Description: "The Distinguished Name (DN) of the object, as the concatenation of its RDN (unique among siblings) and its parent's DN. The referenced object should exist to be able to add attributes.",
Required: true,
ForceNew: true,
},
"attributes": {
Type: schema.TypeSet,
Description: "The map of attributes to add to the referenced object; each attribute can be multi-valued.",
Set: attributeHash,
MinItems: 0,
Elem: &schema.Schema{
Type: schema.TypeMap,
Description: "The list of values for a given attribute.",
MinItems: 1,
MaxItems: 1,
Elem: &schema.Schema{
Type: schema.TypeString,
Description: "The individual value for the given attribute.",
},
},
Optional: true,
},
},
}
} | identifier_body |
resource_ldap_object_attributes.go | package provider
import (
"fmt"
"strings"
"github.com/go-ldap/ldap/v3"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/trevex/terraform-provider-ldap/util"
)
func resourceLDAPObjectAttributes() *schema.Resource {
return &schema.Resource{
Create: resourceLDAPObjectAttributesCreate,
Read: resourceLDAPObjectAttributesRead,
Update: resourceLDAPObjectAttributesUpdate,
Delete: resourceLDAPObjectAttributesDelete,
Description: "The `ldap_object_attributes`-resource owns only specific attributes of an object. In case of multi-valued attributes the resource only owns the values defined by the resource and all pre-existing ones or ones added by other means are left in-tact.",
Schema: map[string]*schema.Schema{
"dn": {
Type: schema.TypeString,
Description: "The Distinguished Name (DN) of the object, as the concatenation of its RDN (unique among siblings) and its parent's DN. The referenced object should exist to be able to add attributes.",
Required: true,
ForceNew: true,
},
"attributes": {
Type: schema.TypeSet,
Description: "The map of attributes to add to the referenced object; each attribute can be multi-valued.",
Set: attributeHash,
MinItems: 0,
Elem: &schema.Schema{
Type: schema.TypeMap,
Description: "The list of values for a given attribute.",
MinItems: 1,
MaxItems: 1,
Elem: &schema.Schema{
Type: schema.TypeString,
Description: "The individual value for the given attribute.",
},
},
Optional: true,
},
},
}
}
func | (d *schema.ResourceData, meta interface{}) error {
client := meta.(*ldap.Conn)
dn := d.Get("dn").(string)
debugLog("ldap_object_attributes::create - adding attributes to object %q", dn)
request := ldap.NewModifyRequest(dn, []ldap.Control{})
// if there is a non empty list of attributes, loop though it and
// create a new map collecting attribute names and its value(s); we need to
// do this because we could not model the attributes as a map[string][]string
// due to an appareent limitation in HCL; we have a []map[string]string, so
// we loop through the list and accumulate values when they share the same
// key, then we use these as attributes in the LDAP client.
if v, ok := d.GetOk("attributes"); ok {
attributes := v.(*schema.Set).List()
if len(attributes) > 0 {
debugLog("ldap_object_attributes::create - object %q updated with %d additional attributes", dn, len(attributes))
m := make(map[string][]string)
for _, attribute := range attributes {
debugLog("ldap_object_attributes::create - %q has attribute of type %T", dn, attribute)
// each map should only have one entry (see resource declaration)
for name, value := range attribute.(map[string]interface{}) {
debugLog("ldap_object_attributes::create - %q has attribute[%v] => %v (%T)", dn, name, value, value)
v := toAttributeValue(name, value.(string))
m[name] = append(m[name], v)
}
}
// now loop through the map and add attributes with theys value(s)
for name, values := range m {
request.Add(name, values)
}
}
}
err := client.Modify(request)
if err != nil {
return err
}
debugLog("ldap_object_attributes::create - object %q updated with additional attributes", dn)
return resourceLDAPObjectAttributesRead(d, meta)
}
func resourceLDAPObjectAttributesRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ldap.Conn)
dn := d.Get("dn").(string)
debugLog("ldap_object_attributes::read - looking for object %q", dn)
// when searching by DN, you don't need t specify the base DN a search
// filter a "subtree" scope: just put the DN (i.e. the primary key) as the
// base DN with a "base object" scope, and the returned object will be the
// entry, if it exists
request := ldap.NewSearchRequest(
dn,
ldap.ScopeBaseObject,
ldap.NeverDerefAliases,
0,
0,
false,
"(objectclass=*)",
[]string{"*"},
nil,
)
sr, err := client.Search(request)
if err != nil {
if err, ok := err.(*ldap.Error); ok {
if err.ResultCode == 32 { // no such object
warnLog("ldap_object_attributes::read - object not found, removing %q from state because it no longer exists in LDAP", dn)
d.SetId("")
}
}
debugLog("ldap_object_attributes::read - lookup for %q returned an error %v", dn, err)
return err
}
debugLog("ldap_object_attributes::read - query for %q returned %v", dn, sr)
// Let's transform the attributes from LDAP into a set that we can intersect
// with our resources sets.
ldapSet := &schema.Set{
F: attributeHash,
}
for _, attribute := range sr.Entries[0].Attributes {
debugLog("ldap_object_attributes::read - adding attribute %q to %q (%d values)", attribute.Name, dn, len(attribute.Values))
if len(attribute.Values) == 1 {
// we don't treat the RDN as an ordinary attribute
a := fmt.Sprintf("%s=%s", attribute.Name, attribute.Values[0])
if strings.HasPrefix(dn, a) {
debugLog("ldap_object_attributes::read - skipping RDN %q of %q", a, dn)
continue
}
}
// now add each value as an individual entry into the object, because
// we do not handle name => []values, and we have a set of maps each
// holding a single entry name => value; multiple maps may share the
// same key.
for _, value := range attribute.Values {
debugLog("ldap_object_attributes::read - for %q from ldap, setting %q => %q", dn, attribute.Name, value)
ldapSet.Add(map[string]interface{}{
attribute.Name: value,
})
}
}
debugLog("ldap_object_attributes::read - attributes from ldap of %q => %v", dn, ldapSet.List())
// We are both interested in the attributes before and after changes, so
// depending on what is available, let's compute the union
var (
oldSet *schema.Set
newSet *schema.Set
unionSet *schema.Set
)
if d.HasChange("attributes") {
prev, next := d.GetChange("attributes")
oldSet = prev.(*schema.Set)
newSet = next.(*schema.Set)
} else {
newSet = d.Get("attributes").(*schema.Set)
}
debugLog("ldap_object_attributes::read - newSet of %q => %v", dn, newSet.List())
if oldSet != nil {
debugLog("ldap_object_attributes::read - oldSet of %q => %v", dn, oldSet.List())
unionSet = oldSet.Union(newSet)
} else {
unionSet = newSet
}
debugLog("ldap_object_attributes::read - union of %q => %v", dn, unionSet.List())
// Now that we both have union of relevant terraform states and ldap, let's
// get the intersection and set it.
set := unionSet.Intersection(ldapSet)
debugLog("ldap_object_attributes::read - intersection with ldap of %q => %v", dn, set.List())
// If the set is empty the attributes do not exist, yet.
if set.Len() == 0 {
d.SetId("")
return nil
}
// The set contains values, let's set them and indicate that the object
// exists by setting the id as well.
if err := d.Set("attributes", set); err != nil {
warnLog("ldap_object_attributes::read - error setting attributes for %q : %v", dn, err)
return err
}
d.SetId(dn)
return nil
}
func resourceLDAPObjectAttributesUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ldap.Conn)
dn := d.Get("dn").(string)
debugLog("ldap_object_attributes::update - performing update on %q", dn)
modify := ldap.NewModifyRequest(dn, []ldap.Control{})
if d.HasChange("attributes") {
o, n := d.GetChange("attributes")
debugLog("ldap_object_attributes::update - \n%s", printAttributes("old attributes map", o))
debugLog("ldap_object_attributes::update - \n%s", printAttributes("new attributes map", n))
err := computeAndAddAttributeDeltas(modify, o.(*schema.Set), n.(*schema.Set))
if err != nil {
return err
}
}
if len(modify.Changes) > 0 {
err := client.Modify(modify)
if err != nil {
errorLog("ldap_object_attributes::update - error modifying LDAP object %q with values %v", d.Id(), err)
return err
}
} else {
warnLog("ldap_object_attributes::update - didn't actually make changes to %q because there were no changes requested", dn)
}
return resourceLDAPObjectAttributesRead(d, meta)
}
func resourceLDAPObjectAttributesDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ldap.Conn)
dn := d.Get("dn").(string)
debugLog("ldap_object_attributes::delete - removing attributes from %q", dn)
modify := ldap.NewModifyRequest(dn, []ldap.Control{})
err := computeAndAddAttributeDeltas(modify, d.Get("attributes").(*schema.Set), &schema.Set{
F: attributeHash,
})
if err != nil {
return err
}
if len(modify.Changes) > 0 {
err := client.Modify(modify)
if err != nil {
errorLog("ldap_object_attributes::delete - error modifying LDAP object %q with values %v", d.Id(), err)
return err
}
} else {
warnLog("ldap_object_attributes::delete - didn't actually make changes to %q because there were no changes requested", dn)
}
debugLog("ldap_object::delete - %q removed", dn)
return nil
}
func computeAndAddAttributeDeltas(modify *ldap.ModifyRequest, os, ns *schema.Set) error {
ra := os.Difference(ns) // removed attributes
rk := util.NewSet() // names of removed attributes
for _, v := range ra.List() {
for k := range v.(map[string]interface{}) {
rk.Add(k)
}
}
aa := ns.Difference(os) // added attributes
ak := util.NewSet() // names of added attributes
for _, v := range aa.List() {
for k := range v.(map[string]interface{}) {
ak.Add(k)
}
}
// loop over remove attributes' names
for _, k := range rk.List() {
values := []string{}
for _, m := range ra.List() {
for mk, mv := range m.(map[string]interface{}) {
if k == mk {
v := toAttributeValue(k, mv.(string))
values = append(values, v)
}
}
}
modify.Delete(k, values)
debugLog("ldap_object_attributes::deltas - removing attribute %q with values %v", k, values)
}
for _, k := range ak.List() {
values := []string{}
for _, m := range aa.List() {
for mk, mv := range m.(map[string]interface{}) {
if k == mk {
v := toAttributeValue(k, mv.(string))
values = append(values, v)
}
}
}
modify.Add(k, values)
debugLog("ldap_object_attributes::deltas - adding new attribute %q with values %v", k, values)
}
return nil
}
| resourceLDAPObjectAttributesCreate | identifier_name |
mod.rs | use std::cell::RefCell;
use std::collections::HashSet;
use std::collections::hash_map::HashMap;
use std::fmt;
use std::rc::Rc;
use semver;
use core::{PackageId, Registry, SourceId, Summary, Dependency};
use core::PackageIdSpec;
use util::{CargoResult, Graph, human, ChainError, CargoError};
use util::profile;
use util::graph::{Nodes, Edges};
pub use self::encode::{EncodableResolve, EncodableDependency, EncodablePackageId};
pub use self::encode::Metadata;
mod encode;
/// Represents a fully resolved package dependency graph. Each node in the graph
/// is a package and edges represent dependencies between packages.
///
/// Each instance of `Resolve` also understands the full set of features used
/// for each package as well as what the root package is.
#[derive(PartialEq, Eq, Clone)]
pub struct Resolve {
graph: Graph<PackageId>,
features: HashMap<PackageId, HashSet<String>>,
root: PackageId,
metadata: Option<Metadata>,
}
#[derive(Clone, Copy)]
pub enum Method<'a> {
Everything,
Required{ dev_deps: bool,
features: &'a [String],
uses_default_features: bool,
target_platform: Option<&'a str>},
}
impl Resolve {
fn new(root: PackageId) -> Resolve {
let mut g = Graph::new();
g.add(root.clone(), &[]);
Resolve { graph: g, root: root, features: HashMap::new(), metadata: None }
}
pub fn copy_metadata(&mut self, other: &Resolve) {
self.metadata = other.metadata.clone();
}
pub fn iter(&self) -> Nodes<PackageId> {
self.graph.iter()
}
pub fn root(&self) -> &PackageId { &self.root }
pub fn deps(&self, pkg: &PackageId) -> Option<Edges<PackageId>> {
self.graph.edges(pkg)
}
pub fn query(&self, spec: &str) -> CargoResult<&PackageId> {
let spec = try!(PackageIdSpec::parse(spec).chain_error(|| {
human(format!("invalid package id specification: `{}`", spec))
}));
let mut ids = self.iter().filter(|p| spec.matches(*p));
let ret = match ids.next() {
Some(id) => id,
None => return Err(human(format!("package id specification `{}` \
matched no packages", spec))),
};
return match ids.next() {
Some(other) => {
let mut msg = format!("There are multiple `{}` packages in \
your project, and the specification \
`{}` is ambiguous.\n\
Please re-run this command \
with `-p <spec>` where `<spec>` is one \
of the following:",
spec.name(), spec);
let mut vec = vec![ret, other];
vec.extend(ids);
minimize(&mut msg, vec, &spec);
Err(human(msg))
}
None => Ok(ret)
};
fn minimize(msg: &mut String,
ids: Vec<&PackageId>,
spec: &PackageIdSpec) {
let mut version_cnt = HashMap::new();
for id in ids.iter() {
*version_cnt.entry(id.version()).or_insert(0) += 1;
}
for id in ids.iter() {
if version_cnt[id.version()] == 1 {
msg.push_str(&format!("\n {}:{}", spec.name(),
id.version()));
} else {
msg.push_str(&format!("\n {}",
PackageIdSpec::from_package_id(*id)));
}
}
}
}
pub fn features(&self, pkg: &PackageId) -> Option<&HashSet<String>> |
}
impl fmt::Debug for Resolve {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
try!(write!(fmt, "graph: {:?}\n", self.graph));
try!(write!(fmt, "\nfeatures: {{\n"));
for (pkg, features) in &self.features {
try!(write!(fmt, " {}: {:?}\n", pkg, features));
}
write!(fmt, "}}")
}
}
#[derive(Clone)]
struct Context {
activations: HashMap<(String, SourceId), Vec<Rc<Summary>>>,
resolve: Resolve,
visited: Rc<RefCell<HashSet<PackageId>>>,
}
/// Builds the list of all packages required to build the first argument.
pub fn resolve(summary: &Summary, method: Method,
registry: &mut Registry) -> CargoResult<Resolve> {
trace!("resolve; summary={}", summary.package_id());
let summary = Rc::new(summary.clone());
let cx = Box::new(Context {
resolve: Resolve::new(summary.package_id().clone()),
activations: HashMap::new(),
visited: Rc::new(RefCell::new(HashSet::new())),
});
let _p = profile::start(format!("resolving: {}", summary.package_id()));
match try!(activate(cx, registry, &summary, method)) {
Ok(cx) => {
debug!("resolved: {:?}", cx.resolve);
Ok(cx.resolve)
}
Err(e) => Err(e),
}
}
fn activate(mut cx: Box<Context>,
registry: &mut Registry,
parent: &Rc<Summary>,
method: Method)
-> CargoResult<CargoResult<Box<Context>>> {
// Dependency graphs are required to be a DAG, so we keep a set of
// packages we're visiting and bail if we hit a dupe.
let id = parent.package_id();
if !cx.visited.borrow_mut().insert(id.clone()) {
return Err(human(format!("cyclic package dependency: package `{}` \
depends on itself", id)))
}
// If we're already activated, then that was easy!
if flag_activated(&mut *cx, parent, &method) {
cx.visited.borrow_mut().remove(id);
return Ok(Ok(cx))
}
debug!("activating {}", parent.package_id());
// Extracting the platform request.
let platform = match method {
Method::Required{target_platform: platform, ..} => platform,
Method::Everything => None,
};
// First, figure out our set of dependencies based on the requsted set of
// features. This also calculates what features we're going to enable for
// our own dependencies.
let deps = try!(resolve_features(&mut cx, parent, method));
// Next, transform all dependencies into a list of possible candidates which
// can satisfy that dependency.
let mut deps = try!(deps.into_iter().map(|(_dep_name, (dep, features))| {
let mut candidates = try!(registry.query(dep));
// When we attempt versions for a package, we'll want to start at the
// maximum version and work our way down.
candidates.sort_by(|a, b| {
b.version().cmp(a.version())
});
let candidates = candidates.into_iter().map(Rc::new).collect::<Vec<_>>();
Ok((dep, candidates, features))
}).collect::<CargoResult<Vec<_>>>());
// When we recurse, attempt to resolve dependencies with fewer candidates
// before recursing on dependencies with more candidates. This way if the
// dependency with only one candidate can't be resolved we don't have to do
// a bunch of work before we figure that out.
deps.sort_by(|&(_, ref a, _), &(_, ref b, _)| {
a.len().cmp(&b.len())
});
// Workaround compilation error: `deps` does not live long enough
let platform = platform.map(|s| &*s);
Ok(match try!(activate_deps(cx, registry, parent, platform, &deps, 0)) {
Ok(cx) => {
cx.visited.borrow_mut().remove(parent.package_id());
Ok(cx)
}
Err(e) => Err(e),
})
}
// Activate this summary by inserting it into our list of known activations.
//
// Returns if this summary with the given method is already activated.
fn flag_activated(cx: &mut Context,
summary: &Rc<Summary>,
method: &Method) -> bool {
let id = summary.package_id();
let key = (id.name().to_string(), id.source_id().clone());
let prev = cx.activations.entry(key).or_insert(Vec::new());
if !prev.iter().any(|c| c == summary) {
cx.resolve.graph.add(id.clone(), &[]);
prev.push(summary.clone());
return false
}
debug!("checking if {} is already activated", summary.package_id());
let (features, use_default) = match *method {
Method::Required { features, uses_default_features, .. } => {
(features, uses_default_features)
}
Method::Everything => return false,
};
let has_default_feature = summary.features().contains_key("default");
match cx.resolve.features(id) {
Some(prev) => {
features.iter().all(|f| prev.contains(f)) &&
(!use_default || prev.contains("default") || !has_default_feature)
}
None => features.len() == 0 && (!use_default || !has_default_feature)
}
}
fn activate_deps<'a>(cx: Box<Context>,
registry: &mut Registry,
parent: &Summary,
platform: Option<&'a str>,
deps: &'a [(&Dependency, Vec<Rc<Summary>>, Vec<String>)],
cur: usize) -> CargoResult<CargoResult<Box<Context>>> {
if cur == deps.len() { return Ok(Ok(cx)) }
let (dep, ref candidates, ref features) = deps[cur];
let method = Method::Required{
dev_deps: false,
features: &features,
uses_default_features: dep.uses_default_features(),
target_platform: platform};
let key = (dep.name().to_string(), dep.source_id().clone());
let prev_active = cx.activations.get(&key)
.map(|v| &v[..]).unwrap_or(&[]);
trace!("{}[{}]>{} {} candidates", parent.name(), cur, dep.name(),
candidates.len());
trace!("{}[{}]>{} {} prev activations", parent.name(), cur,
dep.name(), prev_active.len());
// Filter the set of candidates based on the previously activated
// versions for this dependency. We can actually use a version if it
// precisely matches an activated version or if it is otherwise
// incompatible with all other activated versions. Note that we define
// "compatible" here in terms of the semver sense where if the left-most
// nonzero digit is the same they're considered compatible.
let my_candidates = candidates.iter().filter(|&b| {
prev_active.iter().any(|a| a == b) ||
prev_active.iter().all(|a| {
!compatible(a.version(), b.version())
})
});
// Alright, for each candidate that's gotten this far, it meets the
// following requirements:
//
// 1. The version matches the dependency requirement listed for this
// package
// 2. There are no activated versions for this package which are
// semver-compatible, or there's an activated version which is
// precisely equal to `candidate`.
//
// This means that we're going to attempt to activate each candidate in
// turn. We could possibly fail to activate each candidate, so we try
// each one in turn.
let mut last_err = None;
for candidate in my_candidates {
trace!("{}[{}]>{} trying {}", parent.name(), cur, dep.name(),
candidate.version());
let mut my_cx = cx.clone();
my_cx.resolve.graph.link(parent.package_id().clone(),
candidate.package_id().clone());
// If we hit an intransitive dependency then clear out the visitation
// list as we can't induce a cycle through transitive dependencies.
if !dep.is_transitive() {
my_cx.visited.borrow_mut().clear();
}
let my_cx = match try!(activate(my_cx, registry, candidate, method)) {
Ok(cx) => cx,
Err(e) => { last_err = Some(e); continue }
};
match try!(activate_deps(my_cx, registry, parent, platform, deps,
cur + 1)) {
Ok(cx) => return Ok(Ok(cx)),
Err(e) => { last_err = Some(e); }
}
}
trace!("{}[{}]>{} -- {:?}", parent.name(), cur, dep.name(),
last_err);
// Oh well, we couldn't activate any of the candidates, so we just can't
// activate this dependency at all
Ok(activation_error(&cx, registry, last_err, parent, dep, prev_active,
&candidates))
}
fn activation_error(cx: &Context,
registry: &mut Registry,
err: Option<Box<CargoError>>,
parent: &Summary,
dep: &Dependency,
prev_active: &[Rc<Summary>],
candidates: &[Rc<Summary>]) -> CargoResult<Box<Context>> {
match err {
Some(e) => return Err(e),
None => {}
}
if candidates.len() > 0 {
let mut msg = format!("failed to select a version for `{}` \
(required by `{}`):\n\
all possible versions conflict with \
previously selected versions of `{}`",
dep.name(), parent.name(),
dep.name());
'outer: for v in prev_active.iter() {
for node in cx.resolve.graph.iter() {
let edges = match cx.resolve.graph.edges(node) {
Some(edges) => edges,
None => continue,
};
for edge in edges {
if edge != v.package_id() { continue }
msg.push_str(&format!("\n version {} in use by {}",
v.version(), edge));
continue 'outer;
}
}
msg.push_str(&format!("\n version {} in use by ??",
v.version()));
}
msg.push_str(&format!("\n possible versions to select: {}",
candidates.iter()
.map(|v| v.version())
.map(|v| v.to_string())
.collect::<Vec<_>>()
.connect(", ")));
return Err(human(msg))
}
// Once we're all the way down here, we're definitely lost in the
// weeds! We didn't actually use any candidates above, so we need to
// give an error message that nothing was found.
//
// Note that we re-query the registry with a new dependency that
// allows any version so we can give some nicer error reporting
// which indicates a few versions that were actually found.
let msg = format!("no matching package named `{}` found \
(required by `{}`)\n\
location searched: {}\n\
version required: {}",
dep.name(), parent.name(),
dep.source_id(),
dep.version_req());
let mut msg = msg;
let all_req = semver::VersionReq::parse("*").unwrap();
let new_dep = dep.clone().set_version_req(all_req);
let mut candidates = try!(registry.query(&new_dep));
candidates.sort_by(|a, b| {
b.version().cmp(a.version())
});
if candidates.len() > 0 {
msg.push_str("\nversions found: ");
for (i, c) in candidates.iter().take(3).enumerate() {
if i != 0 { msg.push_str(", "); }
msg.push_str(&c.version().to_string());
}
if candidates.len() > 3 {
msg.push_str(", ...");
}
}
// If we have a path dependency with a locked version, then this may
// indicate that we updated a sub-package and forgot to run `cargo
// update`. In this case try to print a helpful error!
if dep.source_id().is_path() &&
dep.version_req().to_string().starts_with("=") &&
candidates.len() > 0 {
msg.push_str("\nconsider running `cargo update` to update \
a path dependency's locked version");
}
Err(human(msg))
}
// Returns if `a` and `b` are compatible in the semver sense. This is a
// commutative operation.
//
// Versions `a` and `b` are compatible if their left-most nonzero digit is the
// same.
fn compatible(a: &semver::Version, b: &semver::Version) -> bool {
if a.major != b.major { return false }
if a.major != 0 { return true }
if a.minor != b.minor { return false }
if a.minor != 0 { return true }
a.patch == b.patch
}
fn resolve_features<'a>(cx: &mut Context, parent: &'a Summary,
method: Method)
-> CargoResult<HashMap<&'a str,
(&'a Dependency, Vec<String>)>> {
let dev_deps = match method {
Method::Everything => true,
Method::Required { dev_deps, .. } => dev_deps,
};
// First, filter by dev-dependencies
let deps = parent.dependencies();
let deps = deps.iter().filter(|d| d.is_transitive() || dev_deps);
// Second, ignoring dependencies that should not be compiled for this platform
let deps = deps.filter(|d| {
match method {
Method::Required{target_platform: Some(ref platform), ..} => {
d.is_active_for_platform(platform)
},
_ => true
}
});
let (mut feature_deps, used_features) = try!(build_features(parent, method));
let mut ret = HashMap::new();
// Next, sanitize all requested features by whitelisting all the requested
// features that correspond to optional dependencies
for dep in deps {
// weed out optional dependencies, but not those required
if dep.is_optional() && !feature_deps.contains_key(dep.name()) {
continue
}
let mut base = feature_deps.remove(dep.name()).unwrap_or(vec![]);
for feature in dep.features().iter() {
base.push(feature.clone());
if feature.contains("/") {
return Err(human(format!("features in dependencies \
cannot enable features in \
other dependencies: `{}`",
feature)));
}
}
ret.insert(dep.name(), (dep, base));
}
// All features can only point to optional dependencies, in which case they
// should have all been weeded out by the above iteration. Any remaining
// features are bugs in that the package does not actually have those
// features.
if feature_deps.len() > 0 {
let unknown = feature_deps.keys().map(|s| &s[..])
.collect::<Vec<&str>>();
if unknown.len() > 0 {
let features = unknown.connect(", ");
return Err(human(format!("Package `{}` does not have these features: \
`{}`", parent.package_id(), features)))
}
}
// Record what list of features is active for this package.
if used_features.len() > 0 {
let pkgid = parent.package_id();
cx.resolve.features.entry(pkgid.clone())
.or_insert(HashSet::new())
.extend(used_features);
}
Ok(ret)
}
// Returns a pair of (feature dependencies, all used features)
//
// The feature dependencies map is a mapping of package name to list of features
// enabled. Each package should be enabled, and each package should have the
// specified set of features enabled.
//
// The all used features set is the set of features which this local package had
// enabled, which is later used when compiling to instruct the code what
// features were enabled.
fn build_features(s: &Summary, method: Method)
-> CargoResult<(HashMap<String, Vec<String>>, HashSet<String>)> {
let mut deps = HashMap::new();
let mut used = HashSet::new();
let mut visited = HashSet::new();
match method {
Method::Everything => {
for key in s.features().keys() {
try!(add_feature(s, key, &mut deps, &mut used, &mut visited));
}
for dep in s.dependencies().iter().filter(|d| d.is_optional()) {
try!(add_feature(s, dep.name(), &mut deps, &mut used,
&mut visited));
}
}
Method::Required{features: requested_features, ..} => {
for feat in requested_features.iter() {
try!(add_feature(s, feat, &mut deps, &mut used, &mut visited));
}
}
}
match method {
Method::Everything |
Method::Required { uses_default_features: true, .. } => {
if s.features().get("default").is_some() {
try!(add_feature(s, "default", &mut deps, &mut used,
&mut visited));
}
}
Method::Required { uses_default_features: false, .. } => {}
}
return Ok((deps, used));
fn add_feature(s: &Summary, feat: &str,
deps: &mut HashMap<String, Vec<String>>,
used: &mut HashSet<String>,
visited: &mut HashSet<String>) -> CargoResult<()> {
if feat.is_empty() { return Ok(()) }
// If this feature is of the form `foo/bar`, then we just lookup package
// `foo` and enable its feature `bar`. Otherwise this feature is of the
// form `foo` and we need to recurse to enable the feature `foo` for our
// own package, which may end up enabling more features or just enabling
// a dependency.
let mut parts = feat.splitn(2, '/');
let feat_or_package = parts.next().unwrap();
match parts.next() {
Some(feat) => {
let package = feat_or_package;
deps.entry(package.to_string())
.or_insert(Vec::new())
.push(feat.to_string());
}
None => {
let feat = feat_or_package;
if !visited.insert(feat.to_string()) {
return Err(human(format!("Cyclic feature dependency: \
feature `{}` depends on itself",
feat)))
}
used.insert(feat.to_string());
match s.features().get(feat) {
Some(recursive) => {
for f in recursive {
try!(add_feature(s, f, deps, used, visited));
}
}
None => {
deps.entry(feat.to_string()).or_insert(Vec::new());
}
}
visited.remove(&feat.to_string());
}
}
Ok(())
}
}
| {
self.features.get(pkg)
} | identifier_body |
mod.rs | use std::cell::RefCell;
use std::collections::HashSet;
use std::collections::hash_map::HashMap;
use std::fmt;
use std::rc::Rc;
use semver;
use core::{PackageId, Registry, SourceId, Summary, Dependency};
use core::PackageIdSpec;
use util::{CargoResult, Graph, human, ChainError, CargoError};
use util::profile;
use util::graph::{Nodes, Edges};
pub use self::encode::{EncodableResolve, EncodableDependency, EncodablePackageId};
pub use self::encode::Metadata;
mod encode;
/// Represents a fully resolved package dependency graph. Each node in the graph
/// is a package and edges represent dependencies between packages.
///
/// Each instance of `Resolve` also understands the full set of features used
/// for each package as well as what the root package is.
#[derive(PartialEq, Eq, Clone)]
pub struct Resolve {
graph: Graph<PackageId>,
features: HashMap<PackageId, HashSet<String>>,
root: PackageId,
metadata: Option<Metadata>,
}
#[derive(Clone, Copy)]
pub enum Method<'a> {
Everything,
Required{ dev_deps: bool,
features: &'a [String],
uses_default_features: bool,
target_platform: Option<&'a str>},
}
impl Resolve {
fn new(root: PackageId) -> Resolve {
let mut g = Graph::new();
g.add(root.clone(), &[]);
Resolve { graph: g, root: root, features: HashMap::new(), metadata: None }
}
pub fn copy_metadata(&mut self, other: &Resolve) {
self.metadata = other.metadata.clone();
}
pub fn iter(&self) -> Nodes<PackageId> {
self.graph.iter()
}
pub fn root(&self) -> &PackageId { &self.root }
pub fn deps(&self, pkg: &PackageId) -> Option<Edges<PackageId>> {
self.graph.edges(pkg)
}
pub fn query(&self, spec: &str) -> CargoResult<&PackageId> {
let spec = try!(PackageIdSpec::parse(spec).chain_error(|| {
human(format!("invalid package id specification: `{}`", spec))
}));
let mut ids = self.iter().filter(|p| spec.matches(*p));
let ret = match ids.next() {
Some(id) => id,
None => return Err(human(format!("package id specification `{}` \
matched no packages", spec))),
};
return match ids.next() {
Some(other) => {
let mut msg = format!("There are multiple `{}` packages in \
your project, and the specification \
`{}` is ambiguous.\n\
Please re-run this command \
with `-p <spec>` where `<spec>` is one \
of the following:",
spec.name(), spec);
let mut vec = vec![ret, other];
vec.extend(ids);
minimize(&mut msg, vec, &spec);
Err(human(msg))
}
None => Ok(ret)
};
fn minimize(msg: &mut String,
ids: Vec<&PackageId>,
spec: &PackageIdSpec) {
let mut version_cnt = HashMap::new();
for id in ids.iter() {
*version_cnt.entry(id.version()).or_insert(0) += 1;
}
for id in ids.iter() {
if version_cnt[id.version()] == 1 {
msg.push_str(&format!("\n {}:{}", spec.name(),
id.version()));
} else {
msg.push_str(&format!("\n {}",
PackageIdSpec::from_package_id(*id)));
}
}
}
}
pub fn features(&self, pkg: &PackageId) -> Option<&HashSet<String>> {
self.features.get(pkg)
}
}
impl fmt::Debug for Resolve {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
try!(write!(fmt, "graph: {:?}\n", self.graph));
try!(write!(fmt, "\nfeatures: {{\n"));
for (pkg, features) in &self.features {
try!(write!(fmt, " {}: {:?}\n", pkg, features));
}
write!(fmt, "}}")
}
}
#[derive(Clone)]
struct Context {
activations: HashMap<(String, SourceId), Vec<Rc<Summary>>>,
resolve: Resolve,
visited: Rc<RefCell<HashSet<PackageId>>>,
}
/// Builds the list of all packages required to build the first argument.
pub fn resolve(summary: &Summary, method: Method,
registry: &mut Registry) -> CargoResult<Resolve> {
trace!("resolve; summary={}", summary.package_id());
let summary = Rc::new(summary.clone());
let cx = Box::new(Context {
resolve: Resolve::new(summary.package_id().clone()),
activations: HashMap::new(),
visited: Rc::new(RefCell::new(HashSet::new())),
});
let _p = profile::start(format!("resolving: {}", summary.package_id()));
match try!(activate(cx, registry, &summary, method)) {
Ok(cx) => {
debug!("resolved: {:?}", cx.resolve);
Ok(cx.resolve)
}
Err(e) => Err(e),
}
}
fn activate(mut cx: Box<Context>,
registry: &mut Registry,
parent: &Rc<Summary>,
method: Method)
-> CargoResult<CargoResult<Box<Context>>> {
// Dependency graphs are required to be a DAG, so we keep a set of
// packages we're visiting and bail if we hit a dupe.
let id = parent.package_id();
if !cx.visited.borrow_mut().insert(id.clone()) {
return Err(human(format!("cyclic package dependency: package `{}` \
depends on itself", id)))
}
// If we're already activated, then that was easy!
if flag_activated(&mut *cx, parent, &method) {
cx.visited.borrow_mut().remove(id);
return Ok(Ok(cx))
}
debug!("activating {}", parent.package_id());
// Extracting the platform request.
let platform = match method {
Method::Required{target_platform: platform, ..} => platform,
Method::Everything => None,
};
// First, figure out our set of dependencies based on the requsted set of
// features. This also calculates what features we're going to enable for
// our own dependencies.
let deps = try!(resolve_features(&mut cx, parent, method));
// Next, transform all dependencies into a list of possible candidates which
// can satisfy that dependency.
let mut deps = try!(deps.into_iter().map(|(_dep_name, (dep, features))| {
let mut candidates = try!(registry.query(dep));
// When we attempt versions for a package, we'll want to start at the
// maximum version and work our way down.
candidates.sort_by(|a, b| {
b.version().cmp(a.version())
});
let candidates = candidates.into_iter().map(Rc::new).collect::<Vec<_>>();
Ok((dep, candidates, features))
}).collect::<CargoResult<Vec<_>>>());
// When we recurse, attempt to resolve dependencies with fewer candidates
// before recursing on dependencies with more candidates. This way if the
// dependency with only one candidate can't be resolved we don't have to do
// a bunch of work before we figure that out.
deps.sort_by(|&(_, ref a, _), &(_, ref b, _)| {
a.len().cmp(&b.len())
});
// Workaround compilation error: `deps` does not live long enough
let platform = platform.map(|s| &*s);
Ok(match try!(activate_deps(cx, registry, parent, platform, &deps, 0)) {
Ok(cx) => {
cx.visited.borrow_mut().remove(parent.package_id());
Ok(cx)
}
Err(e) => Err(e),
})
}
// Activate this summary by inserting it into our list of known activations.
//
// Returns if this summary with the given method is already activated.
fn flag_activated(cx: &mut Context,
summary: &Rc<Summary>,
method: &Method) -> bool {
let id = summary.package_id();
let key = (id.name().to_string(), id.source_id().clone());
let prev = cx.activations.entry(key).or_insert(Vec::new());
if !prev.iter().any(|c| c == summary) {
cx.resolve.graph.add(id.clone(), &[]);
prev.push(summary.clone());
return false
}
debug!("checking if {} is already activated", summary.package_id());
let (features, use_default) = match *method {
Method::Required { features, uses_default_features, .. } => {
(features, uses_default_features)
}
Method::Everything => return false,
};
let has_default_feature = summary.features().contains_key("default");
match cx.resolve.features(id) {
Some(prev) => {
features.iter().all(|f| prev.contains(f)) &&
(!use_default || prev.contains("default") || !has_default_feature)
}
None => features.len() == 0 && (!use_default || !has_default_feature)
}
}
fn activate_deps<'a>(cx: Box<Context>,
registry: &mut Registry,
parent: &Summary,
platform: Option<&'a str>,
deps: &'a [(&Dependency, Vec<Rc<Summary>>, Vec<String>)],
cur: usize) -> CargoResult<CargoResult<Box<Context>>> {
if cur == deps.len() { return Ok(Ok(cx)) }
let (dep, ref candidates, ref features) = deps[cur];
let method = Method::Required{
dev_deps: false,
features: &features,
uses_default_features: dep.uses_default_features(),
target_platform: platform};
let key = (dep.name().to_string(), dep.source_id().clone());
let prev_active = cx.activations.get(&key)
.map(|v| &v[..]).unwrap_or(&[]);
trace!("{}[{}]>{} {} candidates", parent.name(), cur, dep.name(),
candidates.len());
trace!("{}[{}]>{} {} prev activations", parent.name(), cur,
dep.name(), prev_active.len());
// Filter the set of candidates based on the previously activated
// versions for this dependency. We can actually use a version if it
// precisely matches an activated version or if it is otherwise
// incompatible with all other activated versions. Note that we define
// "compatible" here in terms of the semver sense where if the left-most
// nonzero digit is the same they're considered compatible.
let my_candidates = candidates.iter().filter(|&b| {
prev_active.iter().any(|a| a == b) ||
prev_active.iter().all(|a| {
!compatible(a.version(), b.version())
})
});
// Alright, for each candidate that's gotten this far, it meets the
// following requirements:
//
// 1. The version matches the dependency requirement listed for this
// package
// 2. There are no activated versions for this package which are
// semver-compatible, or there's an activated version which is
// precisely equal to `candidate`.
//
// This means that we're going to attempt to activate each candidate in
// turn. We could possibly fail to activate each candidate, so we try
// each one in turn.
let mut last_err = None;
for candidate in my_candidates {
trace!("{}[{}]>{} trying {}", parent.name(), cur, dep.name(),
candidate.version());
let mut my_cx = cx.clone();
my_cx.resolve.graph.link(parent.package_id().clone(),
candidate.package_id().clone());
// If we hit an intransitive dependency then clear out the visitation
// list as we can't induce a cycle through transitive dependencies.
if !dep.is_transitive() {
my_cx.visited.borrow_mut().clear();
}
let my_cx = match try!(activate(my_cx, registry, candidate, method)) {
Ok(cx) => cx,
Err(e) => { last_err = Some(e); continue }
};
match try!(activate_deps(my_cx, registry, parent, platform, deps,
cur + 1)) {
Ok(cx) => return Ok(Ok(cx)),
Err(e) => { last_err = Some(e); }
}
}
trace!("{}[{}]>{} -- {:?}", parent.name(), cur, dep.name(),
last_err);
// Oh well, we couldn't activate any of the candidates, so we just can't
// activate this dependency at all
Ok(activation_error(&cx, registry, last_err, parent, dep, prev_active,
&candidates))
}
fn activation_error(cx: &Context,
registry: &mut Registry,
err: Option<Box<CargoError>>,
parent: &Summary,
dep: &Dependency,
prev_active: &[Rc<Summary>],
candidates: &[Rc<Summary>]) -> CargoResult<Box<Context>> {
match err {
Some(e) => return Err(e),
None => {}
}
if candidates.len() > 0 {
let mut msg = format!("failed to select a version for `{}` \
(required by `{}`):\n\
all possible versions conflict with \
previously selected versions of `{}`",
dep.name(), parent.name(),
dep.name());
'outer: for v in prev_active.iter() {
for node in cx.resolve.graph.iter() {
let edges = match cx.resolve.graph.edges(node) {
Some(edges) => edges,
None => continue,
};
for edge in edges {
if edge != v.package_id() { continue }
msg.push_str(&format!("\n version {} in use by {}",
v.version(), edge));
continue 'outer;
}
}
msg.push_str(&format!("\n version {} in use by ??",
v.version()));
}
msg.push_str(&format!("\n possible versions to select: {}",
candidates.iter()
.map(|v| v.version())
.map(|v| v.to_string())
.collect::<Vec<_>>()
.connect(", ")));
return Err(human(msg))
}
// Once we're all the way down here, we're definitely lost in the
// weeds! We didn't actually use any candidates above, so we need to
// give an error message that nothing was found.
//
// Note that we re-query the registry with a new dependency that
// allows any version so we can give some nicer error reporting
// which indicates a few versions that were actually found.
let msg = format!("no matching package named `{}` found \
(required by `{}`)\n\
location searched: {}\n\
version required: {}",
dep.name(), parent.name(),
dep.source_id(),
dep.version_req());
let mut msg = msg;
let all_req = semver::VersionReq::parse("*").unwrap();
let new_dep = dep.clone().set_version_req(all_req);
let mut candidates = try!(registry.query(&new_dep));
candidates.sort_by(|a, b| {
b.version().cmp(a.version())
});
if candidates.len() > 0 {
msg.push_str("\nversions found: ");
for (i, c) in candidates.iter().take(3).enumerate() {
if i != 0 { msg.push_str(", "); }
msg.push_str(&c.version().to_string());
}
if candidates.len() > 3 {
msg.push_str(", ...");
}
}
// If we have a path dependency with a locked version, then this may
// indicate that we updated a sub-package and forgot to run `cargo
// update`. In this case try to print a helpful error!
if dep.source_id().is_path() &&
dep.version_req().to_string().starts_with("=") &&
candidates.len() > 0 {
msg.push_str("\nconsider running `cargo update` to update \
a path dependency's locked version");
}
Err(human(msg))
}
// Returns if `a` and `b` are compatible in the semver sense. This is a
// commutative operation.
//
// Versions `a` and `b` are compatible if their left-most nonzero digit is the
// same.
fn compatible(a: &semver::Version, b: &semver::Version) -> bool {
if a.major != b.major { return false }
if a.major != 0 { return true }
if a.minor != b.minor { return false }
if a.minor != 0 { return true }
a.patch == b.patch
}
fn resolve_features<'a>(cx: &mut Context, parent: &'a Summary,
method: Method)
-> CargoResult<HashMap<&'a str,
(&'a Dependency, Vec<String>)>> {
let dev_deps = match method {
Method::Everything => true,
Method::Required { dev_deps, .. } => dev_deps,
};
// First, filter by dev-dependencies
let deps = parent.dependencies();
let deps = deps.iter().filter(|d| d.is_transitive() || dev_deps);
// Second, ignoring dependencies that should not be compiled for this platform
let deps = deps.filter(|d| {
match method {
Method::Required{target_platform: Some(ref platform), ..} => {
d.is_active_for_platform(platform)
},
_ => true
}
});
let (mut feature_deps, used_features) = try!(build_features(parent, method));
let mut ret = HashMap::new();
// Next, sanitize all requested features by whitelisting all the requested
// features that correspond to optional dependencies
for dep in deps {
// weed out optional dependencies, but not those required
if dep.is_optional() && !feature_deps.contains_key(dep.name()) {
continue
}
let mut base = feature_deps.remove(dep.name()).unwrap_or(vec![]);
for feature in dep.features().iter() {
base.push(feature.clone());
if feature.contains("/") {
return Err(human(format!("features in dependencies \
cannot enable features in \
other dependencies: `{}`",
feature)));
}
}
ret.insert(dep.name(), (dep, base));
}
// All features can only point to optional dependencies, in which case they
// should have all been weeded out by the above iteration. Any remaining
// features are bugs in that the package does not actually have those
// features.
if feature_deps.len() > 0 {
let unknown = feature_deps.keys().map(|s| &s[..])
.collect::<Vec<&str>>();
if unknown.len() > 0 {
let features = unknown.connect(", ");
return Err(human(format!("Package `{}` does not have these features: \
`{}`", parent.package_id(), features)))
}
}
// Record what list of features is active for this package.
if used_features.len() > 0 {
let pkgid = parent.package_id();
cx.resolve.features.entry(pkgid.clone())
.or_insert(HashSet::new())
.extend(used_features);
}
Ok(ret)
}
// Returns a pair of (feature dependencies, all used features)
//
// The feature dependencies map is a mapping of package name to list of features
// enabled. Each package should be enabled, and each package should have the
// specified set of features enabled.
//
// The all used features set is the set of features which this local package had
// enabled, which is later used when compiling to instruct the code what
// features were enabled.
fn build_features(s: &Summary, method: Method)
-> CargoResult<(HashMap<String, Vec<String>>, HashSet<String>)> {
let mut deps = HashMap::new();
let mut used = HashSet::new();
let mut visited = HashSet::new();
match method {
Method::Everything => |
Method::Required{features: requested_features, ..} => {
for feat in requested_features.iter() {
try!(add_feature(s, feat, &mut deps, &mut used, &mut visited));
}
}
}
match method {
Method::Everything |
Method::Required { uses_default_features: true, .. } => {
if s.features().get("default").is_some() {
try!(add_feature(s, "default", &mut deps, &mut used,
&mut visited));
}
}
Method::Required { uses_default_features: false, .. } => {}
}
return Ok((deps, used));
fn add_feature(s: &Summary, feat: &str,
deps: &mut HashMap<String, Vec<String>>,
used: &mut HashSet<String>,
visited: &mut HashSet<String>) -> CargoResult<()> {
if feat.is_empty() { return Ok(()) }
// If this feature is of the form `foo/bar`, then we just lookup package
// `foo` and enable its feature `bar`. Otherwise this feature is of the
// form `foo` and we need to recurse to enable the feature `foo` for our
// own package, which may end up enabling more features or just enabling
// a dependency.
let mut parts = feat.splitn(2, '/');
let feat_or_package = parts.next().unwrap();
match parts.next() {
Some(feat) => {
let package = feat_or_package;
deps.entry(package.to_string())
.or_insert(Vec::new())
.push(feat.to_string());
}
None => {
let feat = feat_or_package;
if !visited.insert(feat.to_string()) {
return Err(human(format!("Cyclic feature dependency: \
feature `{}` depends on itself",
feat)))
}
used.insert(feat.to_string());
match s.features().get(feat) {
Some(recursive) => {
for f in recursive {
try!(add_feature(s, f, deps, used, visited));
}
}
None => {
deps.entry(feat.to_string()).or_insert(Vec::new());
}
}
visited.remove(&feat.to_string());
}
}
Ok(())
}
}
| {
for key in s.features().keys() {
try!(add_feature(s, key, &mut deps, &mut used, &mut visited));
}
for dep in s.dependencies().iter().filter(|d| d.is_optional()) {
try!(add_feature(s, dep.name(), &mut deps, &mut used,
&mut visited));
}
} | conditional_block |
mod.rs | use std::cell::RefCell;
use std::collections::HashSet;
use std::collections::hash_map::HashMap;
use std::fmt;
use std::rc::Rc;
use semver;
use core::{PackageId, Registry, SourceId, Summary, Dependency};
use core::PackageIdSpec;
use util::{CargoResult, Graph, human, ChainError, CargoError};
use util::profile;
use util::graph::{Nodes, Edges};
pub use self::encode::{EncodableResolve, EncodableDependency, EncodablePackageId};
pub use self::encode::Metadata;
mod encode;
/// Represents a fully resolved package dependency graph. Each node in the graph
/// is a package and edges represent dependencies between packages.
///
/// Each instance of `Resolve` also understands the full set of features used
/// for each package as well as what the root package is.
#[derive(PartialEq, Eq, Clone)]
pub struct Resolve {
graph: Graph<PackageId>,
features: HashMap<PackageId, HashSet<String>>,
root: PackageId,
metadata: Option<Metadata>,
}
#[derive(Clone, Copy)]
pub enum Method<'a> {
Everything,
Required{ dev_deps: bool,
features: &'a [String],
uses_default_features: bool,
target_platform: Option<&'a str>},
}
impl Resolve {
fn new(root: PackageId) -> Resolve {
let mut g = Graph::new();
g.add(root.clone(), &[]);
Resolve { graph: g, root: root, features: HashMap::new(), metadata: None }
}
pub fn copy_metadata(&mut self, other: &Resolve) {
self.metadata = other.metadata.clone();
}
pub fn | (&self) -> Nodes<PackageId> {
self.graph.iter()
}
pub fn root(&self) -> &PackageId { &self.root }
pub fn deps(&self, pkg: &PackageId) -> Option<Edges<PackageId>> {
self.graph.edges(pkg)
}
pub fn query(&self, spec: &str) -> CargoResult<&PackageId> {
let spec = try!(PackageIdSpec::parse(spec).chain_error(|| {
human(format!("invalid package id specification: `{}`", spec))
}));
let mut ids = self.iter().filter(|p| spec.matches(*p));
let ret = match ids.next() {
Some(id) => id,
None => return Err(human(format!("package id specification `{}` \
matched no packages", spec))),
};
return match ids.next() {
Some(other) => {
let mut msg = format!("There are multiple `{}` packages in \
your project, and the specification \
`{}` is ambiguous.\n\
Please re-run this command \
with `-p <spec>` where `<spec>` is one \
of the following:",
spec.name(), spec);
let mut vec = vec![ret, other];
vec.extend(ids);
minimize(&mut msg, vec, &spec);
Err(human(msg))
}
None => Ok(ret)
};
fn minimize(msg: &mut String,
ids: Vec<&PackageId>,
spec: &PackageIdSpec) {
let mut version_cnt = HashMap::new();
for id in ids.iter() {
*version_cnt.entry(id.version()).or_insert(0) += 1;
}
for id in ids.iter() {
if version_cnt[id.version()] == 1 {
msg.push_str(&format!("\n {}:{}", spec.name(),
id.version()));
} else {
msg.push_str(&format!("\n {}",
PackageIdSpec::from_package_id(*id)));
}
}
}
}
pub fn features(&self, pkg: &PackageId) -> Option<&HashSet<String>> {
self.features.get(pkg)
}
}
impl fmt::Debug for Resolve {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
try!(write!(fmt, "graph: {:?}\n", self.graph));
try!(write!(fmt, "\nfeatures: {{\n"));
for (pkg, features) in &self.features {
try!(write!(fmt, " {}: {:?}\n", pkg, features));
}
write!(fmt, "}}")
}
}
#[derive(Clone)]
struct Context {
activations: HashMap<(String, SourceId), Vec<Rc<Summary>>>,
resolve: Resolve,
visited: Rc<RefCell<HashSet<PackageId>>>,
}
/// Builds the list of all packages required to build the first argument.
pub fn resolve(summary: &Summary, method: Method,
registry: &mut Registry) -> CargoResult<Resolve> {
trace!("resolve; summary={}", summary.package_id());
let summary = Rc::new(summary.clone());
let cx = Box::new(Context {
resolve: Resolve::new(summary.package_id().clone()),
activations: HashMap::new(),
visited: Rc::new(RefCell::new(HashSet::new())),
});
let _p = profile::start(format!("resolving: {}", summary.package_id()));
match try!(activate(cx, registry, &summary, method)) {
Ok(cx) => {
debug!("resolved: {:?}", cx.resolve);
Ok(cx.resolve)
}
Err(e) => Err(e),
}
}
fn activate(mut cx: Box<Context>,
registry: &mut Registry,
parent: &Rc<Summary>,
method: Method)
-> CargoResult<CargoResult<Box<Context>>> {
// Dependency graphs are required to be a DAG, so we keep a set of
// packages we're visiting and bail if we hit a dupe.
let id = parent.package_id();
if !cx.visited.borrow_mut().insert(id.clone()) {
return Err(human(format!("cyclic package dependency: package `{}` \
depends on itself", id)))
}
// If we're already activated, then that was easy!
if flag_activated(&mut *cx, parent, &method) {
cx.visited.borrow_mut().remove(id);
return Ok(Ok(cx))
}
debug!("activating {}", parent.package_id());
// Extracting the platform request.
let platform = match method {
Method::Required{target_platform: platform, ..} => platform,
Method::Everything => None,
};
// First, figure out our set of dependencies based on the requsted set of
// features. This also calculates what features we're going to enable for
// our own dependencies.
let deps = try!(resolve_features(&mut cx, parent, method));
// Next, transform all dependencies into a list of possible candidates which
// can satisfy that dependency.
let mut deps = try!(deps.into_iter().map(|(_dep_name, (dep, features))| {
let mut candidates = try!(registry.query(dep));
// When we attempt versions for a package, we'll want to start at the
// maximum version and work our way down.
candidates.sort_by(|a, b| {
b.version().cmp(a.version())
});
let candidates = candidates.into_iter().map(Rc::new).collect::<Vec<_>>();
Ok((dep, candidates, features))
}).collect::<CargoResult<Vec<_>>>());
// When we recurse, attempt to resolve dependencies with fewer candidates
// before recursing on dependencies with more candidates. This way if the
// dependency with only one candidate can't be resolved we don't have to do
// a bunch of work before we figure that out.
deps.sort_by(|&(_, ref a, _), &(_, ref b, _)| {
a.len().cmp(&b.len())
});
// Workaround compilation error: `deps` does not live long enough
let platform = platform.map(|s| &*s);
Ok(match try!(activate_deps(cx, registry, parent, platform, &deps, 0)) {
Ok(cx) => {
cx.visited.borrow_mut().remove(parent.package_id());
Ok(cx)
}
Err(e) => Err(e),
})
}
// Activate this summary by inserting it into our list of known activations.
//
// Returns if this summary with the given method is already activated.
fn flag_activated(cx: &mut Context,
summary: &Rc<Summary>,
method: &Method) -> bool {
let id = summary.package_id();
let key = (id.name().to_string(), id.source_id().clone());
let prev = cx.activations.entry(key).or_insert(Vec::new());
if !prev.iter().any(|c| c == summary) {
cx.resolve.graph.add(id.clone(), &[]);
prev.push(summary.clone());
return false
}
debug!("checking if {} is already activated", summary.package_id());
let (features, use_default) = match *method {
Method::Required { features, uses_default_features, .. } => {
(features, uses_default_features)
}
Method::Everything => return false,
};
let has_default_feature = summary.features().contains_key("default");
match cx.resolve.features(id) {
Some(prev) => {
features.iter().all(|f| prev.contains(f)) &&
(!use_default || prev.contains("default") || !has_default_feature)
}
None => features.len() == 0 && (!use_default || !has_default_feature)
}
}
fn activate_deps<'a>(cx: Box<Context>,
registry: &mut Registry,
parent: &Summary,
platform: Option<&'a str>,
deps: &'a [(&Dependency, Vec<Rc<Summary>>, Vec<String>)],
cur: usize) -> CargoResult<CargoResult<Box<Context>>> {
if cur == deps.len() { return Ok(Ok(cx)) }
let (dep, ref candidates, ref features) = deps[cur];
let method = Method::Required{
dev_deps: false,
features: &features,
uses_default_features: dep.uses_default_features(),
target_platform: platform};
let key = (dep.name().to_string(), dep.source_id().clone());
let prev_active = cx.activations.get(&key)
.map(|v| &v[..]).unwrap_or(&[]);
trace!("{}[{}]>{} {} candidates", parent.name(), cur, dep.name(),
candidates.len());
trace!("{}[{}]>{} {} prev activations", parent.name(), cur,
dep.name(), prev_active.len());
// Filter the set of candidates based on the previously activated
// versions for this dependency. We can actually use a version if it
// precisely matches an activated version or if it is otherwise
// incompatible with all other activated versions. Note that we define
// "compatible" here in terms of the semver sense where if the left-most
// nonzero digit is the same they're considered compatible.
let my_candidates = candidates.iter().filter(|&b| {
prev_active.iter().any(|a| a == b) ||
prev_active.iter().all(|a| {
!compatible(a.version(), b.version())
})
});
// Alright, for each candidate that's gotten this far, it meets the
// following requirements:
//
// 1. The version matches the dependency requirement listed for this
// package
// 2. There are no activated versions for this package which are
// semver-compatible, or there's an activated version which is
// precisely equal to `candidate`.
//
// This means that we're going to attempt to activate each candidate in
// turn. We could possibly fail to activate each candidate, so we try
// each one in turn.
let mut last_err = None;
for candidate in my_candidates {
trace!("{}[{}]>{} trying {}", parent.name(), cur, dep.name(),
candidate.version());
let mut my_cx = cx.clone();
my_cx.resolve.graph.link(parent.package_id().clone(),
candidate.package_id().clone());
// If we hit an intransitive dependency then clear out the visitation
// list as we can't induce a cycle through transitive dependencies.
if !dep.is_transitive() {
my_cx.visited.borrow_mut().clear();
}
let my_cx = match try!(activate(my_cx, registry, candidate, method)) {
Ok(cx) => cx,
Err(e) => { last_err = Some(e); continue }
};
match try!(activate_deps(my_cx, registry, parent, platform, deps,
cur + 1)) {
Ok(cx) => return Ok(Ok(cx)),
Err(e) => { last_err = Some(e); }
}
}
trace!("{}[{}]>{} -- {:?}", parent.name(), cur, dep.name(),
last_err);
// Oh well, we couldn't activate any of the candidates, so we just can't
// activate this dependency at all
Ok(activation_error(&cx, registry, last_err, parent, dep, prev_active,
&candidates))
}
fn activation_error(cx: &Context,
registry: &mut Registry,
err: Option<Box<CargoError>>,
parent: &Summary,
dep: &Dependency,
prev_active: &[Rc<Summary>],
candidates: &[Rc<Summary>]) -> CargoResult<Box<Context>> {
match err {
Some(e) => return Err(e),
None => {}
}
if candidates.len() > 0 {
let mut msg = format!("failed to select a version for `{}` \
(required by `{}`):\n\
all possible versions conflict with \
previously selected versions of `{}`",
dep.name(), parent.name(),
dep.name());
'outer: for v in prev_active.iter() {
for node in cx.resolve.graph.iter() {
let edges = match cx.resolve.graph.edges(node) {
Some(edges) => edges,
None => continue,
};
for edge in edges {
if edge != v.package_id() { continue }
msg.push_str(&format!("\n version {} in use by {}",
v.version(), edge));
continue 'outer;
}
}
msg.push_str(&format!("\n version {} in use by ??",
v.version()));
}
msg.push_str(&format!("\n possible versions to select: {}",
candidates.iter()
.map(|v| v.version())
.map(|v| v.to_string())
.collect::<Vec<_>>()
.connect(", ")));
return Err(human(msg))
}
// Once we're all the way down here, we're definitely lost in the
// weeds! We didn't actually use any candidates above, so we need to
// give an error message that nothing was found.
//
// Note that we re-query the registry with a new dependency that
// allows any version so we can give some nicer error reporting
// which indicates a few versions that were actually found.
let msg = format!("no matching package named `{}` found \
(required by `{}`)\n\
location searched: {}\n\
version required: {}",
dep.name(), parent.name(),
dep.source_id(),
dep.version_req());
let mut msg = msg;
let all_req = semver::VersionReq::parse("*").unwrap();
let new_dep = dep.clone().set_version_req(all_req);
let mut candidates = try!(registry.query(&new_dep));
candidates.sort_by(|a, b| {
b.version().cmp(a.version())
});
if candidates.len() > 0 {
msg.push_str("\nversions found: ");
for (i, c) in candidates.iter().take(3).enumerate() {
if i != 0 { msg.push_str(", "); }
msg.push_str(&c.version().to_string());
}
if candidates.len() > 3 {
msg.push_str(", ...");
}
}
// If we have a path dependency with a locked version, then this may
// indicate that we updated a sub-package and forgot to run `cargo
// update`. In this case try to print a helpful error!
if dep.source_id().is_path() &&
dep.version_req().to_string().starts_with("=") &&
candidates.len() > 0 {
msg.push_str("\nconsider running `cargo update` to update \
a path dependency's locked version");
}
Err(human(msg))
}
// Returns if `a` and `b` are compatible in the semver sense. This is a
// commutative operation.
//
// Versions `a` and `b` are compatible if their left-most nonzero digit is the
// same.
fn compatible(a: &semver::Version, b: &semver::Version) -> bool {
if a.major != b.major { return false }
if a.major != 0 { return true }
if a.minor != b.minor { return false }
if a.minor != 0 { return true }
a.patch == b.patch
}
fn resolve_features<'a>(cx: &mut Context, parent: &'a Summary,
method: Method)
-> CargoResult<HashMap<&'a str,
(&'a Dependency, Vec<String>)>> {
let dev_deps = match method {
Method::Everything => true,
Method::Required { dev_deps, .. } => dev_deps,
};
// First, filter by dev-dependencies
let deps = parent.dependencies();
let deps = deps.iter().filter(|d| d.is_transitive() || dev_deps);
// Second, ignoring dependencies that should not be compiled for this platform
let deps = deps.filter(|d| {
match method {
Method::Required{target_platform: Some(ref platform), ..} => {
d.is_active_for_platform(platform)
},
_ => true
}
});
let (mut feature_deps, used_features) = try!(build_features(parent, method));
let mut ret = HashMap::new();
// Next, sanitize all requested features by whitelisting all the requested
// features that correspond to optional dependencies
for dep in deps {
// weed out optional dependencies, but not those required
if dep.is_optional() && !feature_deps.contains_key(dep.name()) {
continue
}
let mut base = feature_deps.remove(dep.name()).unwrap_or(vec![]);
for feature in dep.features().iter() {
base.push(feature.clone());
if feature.contains("/") {
return Err(human(format!("features in dependencies \
cannot enable features in \
other dependencies: `{}`",
feature)));
}
}
ret.insert(dep.name(), (dep, base));
}
// All features can only point to optional dependencies, in which case they
// should have all been weeded out by the above iteration. Any remaining
// features are bugs in that the package does not actually have those
// features.
if feature_deps.len() > 0 {
let unknown = feature_deps.keys().map(|s| &s[..])
.collect::<Vec<&str>>();
if unknown.len() > 0 {
let features = unknown.connect(", ");
return Err(human(format!("Package `{}` does not have these features: \
`{}`", parent.package_id(), features)))
}
}
// Record what list of features is active for this package.
if used_features.len() > 0 {
let pkgid = parent.package_id();
cx.resolve.features.entry(pkgid.clone())
.or_insert(HashSet::new())
.extend(used_features);
}
Ok(ret)
}
// Returns a pair of (feature dependencies, all used features)
//
// The feature dependencies map is a mapping of package name to list of features
// enabled. Each package should be enabled, and each package should have the
// specified set of features enabled.
//
// The all used features set is the set of features which this local package had
// enabled, which is later used when compiling to instruct the code what
// features were enabled.
fn build_features(s: &Summary, method: Method)
-> CargoResult<(HashMap<String, Vec<String>>, HashSet<String>)> {
let mut deps = HashMap::new();
let mut used = HashSet::new();
let mut visited = HashSet::new();
match method {
Method::Everything => {
for key in s.features().keys() {
try!(add_feature(s, key, &mut deps, &mut used, &mut visited));
}
for dep in s.dependencies().iter().filter(|d| d.is_optional()) {
try!(add_feature(s, dep.name(), &mut deps, &mut used,
&mut visited));
}
}
Method::Required{features: requested_features, ..} => {
for feat in requested_features.iter() {
try!(add_feature(s, feat, &mut deps, &mut used, &mut visited));
}
}
}
match method {
Method::Everything |
Method::Required { uses_default_features: true, .. } => {
if s.features().get("default").is_some() {
try!(add_feature(s, "default", &mut deps, &mut used,
&mut visited));
}
}
Method::Required { uses_default_features: false, .. } => {}
}
return Ok((deps, used));
fn add_feature(s: &Summary, feat: &str,
deps: &mut HashMap<String, Vec<String>>,
used: &mut HashSet<String>,
visited: &mut HashSet<String>) -> CargoResult<()> {
if feat.is_empty() { return Ok(()) }
// If this feature is of the form `foo/bar`, then we just lookup package
// `foo` and enable its feature `bar`. Otherwise this feature is of the
// form `foo` and we need to recurse to enable the feature `foo` for our
// own package, which may end up enabling more features or just enabling
// a dependency.
let mut parts = feat.splitn(2, '/');
let feat_or_package = parts.next().unwrap();
match parts.next() {
Some(feat) => {
let package = feat_or_package;
deps.entry(package.to_string())
.or_insert(Vec::new())
.push(feat.to_string());
}
None => {
let feat = feat_or_package;
if !visited.insert(feat.to_string()) {
return Err(human(format!("Cyclic feature dependency: \
feature `{}` depends on itself",
feat)))
}
used.insert(feat.to_string());
match s.features().get(feat) {
Some(recursive) => {
for f in recursive {
try!(add_feature(s, f, deps, used, visited));
}
}
None => {
deps.entry(feat.to_string()).or_insert(Vec::new());
}
}
visited.remove(&feat.to_string());
}
}
Ok(())
}
}
| iter | identifier_name |
mod.rs | use std::cell::RefCell;
use std::collections::HashSet;
use std::collections::hash_map::HashMap;
use std::fmt;
use std::rc::Rc;
use semver;
use core::{PackageId, Registry, SourceId, Summary, Dependency};
use core::PackageIdSpec;
use util::{CargoResult, Graph, human, ChainError, CargoError};
use util::profile;
use util::graph::{Nodes, Edges};
pub use self::encode::{EncodableResolve, EncodableDependency, EncodablePackageId};
pub use self::encode::Metadata;
mod encode;
/// Represents a fully resolved package dependency graph. Each node in the graph
/// is a package and edges represent dependencies between packages.
///
/// Each instance of `Resolve` also understands the full set of features used
/// for each package as well as what the root package is.
#[derive(PartialEq, Eq, Clone)]
pub struct Resolve {
graph: Graph<PackageId>,
features: HashMap<PackageId, HashSet<String>>,
root: PackageId,
metadata: Option<Metadata>,
}
#[derive(Clone, Copy)]
pub enum Method<'a> {
Everything,
Required{ dev_deps: bool,
features: &'a [String],
uses_default_features: bool,
target_platform: Option<&'a str>},
}
impl Resolve {
fn new(root: PackageId) -> Resolve {
let mut g = Graph::new();
g.add(root.clone(), &[]);
Resolve { graph: g, root: root, features: HashMap::new(), metadata: None }
}
pub fn copy_metadata(&mut self, other: &Resolve) {
self.metadata = other.metadata.clone();
}
pub fn iter(&self) -> Nodes<PackageId> {
self.graph.iter()
}
pub fn root(&self) -> &PackageId { &self.root }
pub fn deps(&self, pkg: &PackageId) -> Option<Edges<PackageId>> {
self.graph.edges(pkg)
}
pub fn query(&self, spec: &str) -> CargoResult<&PackageId> {
let spec = try!(PackageIdSpec::parse(spec).chain_error(|| {
human(format!("invalid package id specification: `{}`", spec))
}));
let mut ids = self.iter().filter(|p| spec.matches(*p));
let ret = match ids.next() {
Some(id) => id,
None => return Err(human(format!("package id specification `{}` \
matched no packages", spec))),
};
return match ids.next() {
Some(other) => {
let mut msg = format!("There are multiple `{}` packages in \
your project, and the specification \
`{}` is ambiguous.\n\
Please re-run this command \
with `-p <spec>` where `<spec>` is one \
of the following:",
spec.name(), spec);
let mut vec = vec![ret, other];
vec.extend(ids);
minimize(&mut msg, vec, &spec);
Err(human(msg))
}
None => Ok(ret)
};
fn minimize(msg: &mut String,
ids: Vec<&PackageId>,
spec: &PackageIdSpec) {
let mut version_cnt = HashMap::new();
for id in ids.iter() {
*version_cnt.entry(id.version()).or_insert(0) += 1;
}
for id in ids.iter() {
if version_cnt[id.version()] == 1 {
msg.push_str(&format!("\n {}:{}", spec.name(),
id.version()));
} else {
msg.push_str(&format!("\n {}",
PackageIdSpec::from_package_id(*id)));
}
}
}
}
pub fn features(&self, pkg: &PackageId) -> Option<&HashSet<String>> {
self.features.get(pkg)
}
}
impl fmt::Debug for Resolve {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
try!(write!(fmt, "graph: {:?}\n", self.graph));
try!(write!(fmt, "\nfeatures: {{\n"));
for (pkg, features) in &self.features {
try!(write!(fmt, " {}: {:?}\n", pkg, features));
}
write!(fmt, "}}")
}
}
#[derive(Clone)]
struct Context {
activations: HashMap<(String, SourceId), Vec<Rc<Summary>>>,
resolve: Resolve,
visited: Rc<RefCell<HashSet<PackageId>>>,
}
/// Builds the list of all packages required to build the first argument.
pub fn resolve(summary: &Summary, method: Method,
registry: &mut Registry) -> CargoResult<Resolve> {
trace!("resolve; summary={}", summary.package_id());
let summary = Rc::new(summary.clone());
let cx = Box::new(Context {
resolve: Resolve::new(summary.package_id().clone()),
activations: HashMap::new(),
visited: Rc::new(RefCell::new(HashSet::new())),
});
let _p = profile::start(format!("resolving: {}", summary.package_id()));
match try!(activate(cx, registry, &summary, method)) {
Ok(cx) => {
debug!("resolved: {:?}", cx.resolve);
Ok(cx.resolve)
}
Err(e) => Err(e),
}
}
fn activate(mut cx: Box<Context>,
registry: &mut Registry,
parent: &Rc<Summary>,
method: Method)
-> CargoResult<CargoResult<Box<Context>>> {
// Dependency graphs are required to be a DAG, so we keep a set of
// packages we're visiting and bail if we hit a dupe.
let id = parent.package_id();
if !cx.visited.borrow_mut().insert(id.clone()) {
return Err(human(format!("cyclic package dependency: package `{}` \
depends on itself", id)))
}
// If we're already activated, then that was easy!
if flag_activated(&mut *cx, parent, &method) {
cx.visited.borrow_mut().remove(id);
return Ok(Ok(cx))
}
debug!("activating {}", parent.package_id());
// Extracting the platform request.
let platform = match method {
Method::Required{target_platform: platform, ..} => platform,
Method::Everything => None,
};
// First, figure out our set of dependencies based on the requsted set of
// features. This also calculates what features we're going to enable for
// our own dependencies.
let deps = try!(resolve_features(&mut cx, parent, method));
// Next, transform all dependencies into a list of possible candidates which
// can satisfy that dependency.
let mut deps = try!(deps.into_iter().map(|(_dep_name, (dep, features))| {
let mut candidates = try!(registry.query(dep));
// When we attempt versions for a package, we'll want to start at the
// maximum version and work our way down.
candidates.sort_by(|a, b| {
b.version().cmp(a.version())
});
let candidates = candidates.into_iter().map(Rc::new).collect::<Vec<_>>();
Ok((dep, candidates, features))
}).collect::<CargoResult<Vec<_>>>());
// When we recurse, attempt to resolve dependencies with fewer candidates
// before recursing on dependencies with more candidates. This way if the
// dependency with only one candidate can't be resolved we don't have to do
// a bunch of work before we figure that out.
deps.sort_by(|&(_, ref a, _), &(_, ref b, _)| {
a.len().cmp(&b.len())
});
// Workaround compilation error: `deps` does not live long enough
let platform = platform.map(|s| &*s);
Ok(match try!(activate_deps(cx, registry, parent, platform, &deps, 0)) {
Ok(cx) => {
cx.visited.borrow_mut().remove(parent.package_id());
Ok(cx)
}
Err(e) => Err(e),
})
}
// Activate this summary by inserting it into our list of known activations.
//
// Returns if this summary with the given method is already activated.
fn flag_activated(cx: &mut Context,
summary: &Rc<Summary>,
method: &Method) -> bool {
let id = summary.package_id();
let key = (id.name().to_string(), id.source_id().clone());
let prev = cx.activations.entry(key).or_insert(Vec::new());
if !prev.iter().any(|c| c == summary) {
cx.resolve.graph.add(id.clone(), &[]);
prev.push(summary.clone());
return false
}
debug!("checking if {} is already activated", summary.package_id());
let (features, use_default) = match *method {
Method::Required { features, uses_default_features, .. } => {
(features, uses_default_features)
}
Method::Everything => return false,
};
let has_default_feature = summary.features().contains_key("default");
match cx.resolve.features(id) {
Some(prev) => {
features.iter().all(|f| prev.contains(f)) &&
(!use_default || prev.contains("default") || !has_default_feature)
}
None => features.len() == 0 && (!use_default || !has_default_feature)
}
}
fn activate_deps<'a>(cx: Box<Context>,
registry: &mut Registry,
parent: &Summary,
platform: Option<&'a str>,
deps: &'a [(&Dependency, Vec<Rc<Summary>>, Vec<String>)],
cur: usize) -> CargoResult<CargoResult<Box<Context>>> {
if cur == deps.len() { return Ok(Ok(cx)) }
let (dep, ref candidates, ref features) = deps[cur];
let method = Method::Required{
dev_deps: false,
features: &features,
uses_default_features: dep.uses_default_features(),
target_platform: platform};
let key = (dep.name().to_string(), dep.source_id().clone());
let prev_active = cx.activations.get(&key)
.map(|v| &v[..]).unwrap_or(&[]);
trace!("{}[{}]>{} {} candidates", parent.name(), cur, dep.name(),
candidates.len());
trace!("{}[{}]>{} {} prev activations", parent.name(), cur,
dep.name(), prev_active.len());
// Filter the set of candidates based on the previously activated
// versions for this dependency. We can actually use a version if it
// precisely matches an activated version or if it is otherwise
// incompatible with all other activated versions. Note that we define
// "compatible" here in terms of the semver sense where if the left-most
// nonzero digit is the same they're considered compatible.
let my_candidates = candidates.iter().filter(|&b| {
prev_active.iter().any(|a| a == b) ||
prev_active.iter().all(|a| {
!compatible(a.version(), b.version())
})
});
// Alright, for each candidate that's gotten this far, it meets the
// following requirements:
//
// 1. The version matches the dependency requirement listed for this
// package
// 2. There are no activated versions for this package which are
// semver-compatible, or there's an activated version which is
// precisely equal to `candidate`.
//
// This means that we're going to attempt to activate each candidate in
// turn. We could possibly fail to activate each candidate, so we try
// each one in turn.
let mut last_err = None;
for candidate in my_candidates {
trace!("{}[{}]>{} trying {}", parent.name(), cur, dep.name(),
candidate.version());
let mut my_cx = cx.clone();
my_cx.resolve.graph.link(parent.package_id().clone(),
candidate.package_id().clone());
// If we hit an intransitive dependency then clear out the visitation
// list as we can't induce a cycle through transitive dependencies.
if !dep.is_transitive() {
my_cx.visited.borrow_mut().clear();
}
let my_cx = match try!(activate(my_cx, registry, candidate, method)) {
Ok(cx) => cx,
Err(e) => { last_err = Some(e); continue }
};
match try!(activate_deps(my_cx, registry, parent, platform, deps,
cur + 1)) {
Ok(cx) => return Ok(Ok(cx)),
Err(e) => { last_err = Some(e); }
}
}
trace!("{}[{}]>{} -- {:?}", parent.name(), cur, dep.name(),
last_err);
// Oh well, we couldn't activate any of the candidates, so we just can't
// activate this dependency at all
Ok(activation_error(&cx, registry, last_err, parent, dep, prev_active,
&candidates))
}
fn activation_error(cx: &Context,
registry: &mut Registry,
err: Option<Box<CargoError>>,
parent: &Summary,
dep: &Dependency,
prev_active: &[Rc<Summary>],
candidates: &[Rc<Summary>]) -> CargoResult<Box<Context>> {
match err {
Some(e) => return Err(e),
None => {}
}
if candidates.len() > 0 {
let mut msg = format!("failed to select a version for `{}` \
(required by `{}`):\n\
all possible versions conflict with \
previously selected versions of `{}`",
dep.name(), parent.name(),
dep.name());
'outer: for v in prev_active.iter() {
for node in cx.resolve.graph.iter() {
let edges = match cx.resolve.graph.edges(node) {
Some(edges) => edges,
None => continue,
};
for edge in edges {
if edge != v.package_id() { continue }
msg.push_str(&format!("\n version {} in use by {}",
v.version(), edge));
continue 'outer;
}
}
msg.push_str(&format!("\n version {} in use by ??",
v.version()));
}
msg.push_str(&format!("\n possible versions to select: {}",
candidates.iter()
.map(|v| v.version())
.map(|v| v.to_string())
.collect::<Vec<_>>()
.connect(", ")));
return Err(human(msg))
}
// Once we're all the way down here, we're definitely lost in the
// weeds! We didn't actually use any candidates above, so we need to
// give an error message that nothing was found.
//
// Note that we re-query the registry with a new dependency that
// allows any version so we can give some nicer error reporting
// which indicates a few versions that were actually found.
let msg = format!("no matching package named `{}` found \
(required by `{}`)\n\
location searched: {}\n\
version required: {}",
dep.name(), parent.name(),
dep.source_id(),
dep.version_req());
let mut msg = msg;
let all_req = semver::VersionReq::parse("*").unwrap();
let new_dep = dep.clone().set_version_req(all_req);
let mut candidates = try!(registry.query(&new_dep));
candidates.sort_by(|a, b| {
b.version().cmp(a.version())
});
if candidates.len() > 0 {
msg.push_str("\nversions found: ");
for (i, c) in candidates.iter().take(3).enumerate() {
if i != 0 { msg.push_str(", "); }
msg.push_str(&c.version().to_string());
}
if candidates.len() > 3 {
msg.push_str(", ...");
}
}
// If we have a path dependency with a locked version, then this may
// indicate that we updated a sub-package and forgot to run `cargo
// update`. In this case try to print a helpful error!
if dep.source_id().is_path() &&
dep.version_req().to_string().starts_with("=") &&
candidates.len() > 0 {
msg.push_str("\nconsider running `cargo update` to update \
a path dependency's locked version");
}
Err(human(msg))
}
// Returns if `a` and `b` are compatible in the semver sense. This is a
// commutative operation.
//
// Versions `a` and `b` are compatible if their left-most nonzero digit is the
// same.
fn compatible(a: &semver::Version, b: &semver::Version) -> bool {
if a.major != b.major { return false }
if a.major != 0 { return true }
if a.minor != b.minor { return false }
if a.minor != 0 { return true }
a.patch == b.patch
}
fn resolve_features<'a>(cx: &mut Context, parent: &'a Summary,
method: Method)
-> CargoResult<HashMap<&'a str,
(&'a Dependency, Vec<String>)>> {
let dev_deps = match method {
Method::Everything => true,
Method::Required { dev_deps, .. } => dev_deps,
};
// First, filter by dev-dependencies
let deps = parent.dependencies();
let deps = deps.iter().filter(|d| d.is_transitive() || dev_deps);
// Second, ignoring dependencies that should not be compiled for this platform
let deps = deps.filter(|d| {
match method {
Method::Required{target_platform: Some(ref platform), ..} => {
d.is_active_for_platform(platform)
},
_ => true
}
});
let (mut feature_deps, used_features) = try!(build_features(parent, method));
let mut ret = HashMap::new();
| continue
}
let mut base = feature_deps.remove(dep.name()).unwrap_or(vec![]);
for feature in dep.features().iter() {
base.push(feature.clone());
if feature.contains("/") {
return Err(human(format!("features in dependencies \
cannot enable features in \
other dependencies: `{}`",
feature)));
}
}
ret.insert(dep.name(), (dep, base));
}
// All features can only point to optional dependencies, in which case they
// should have all been weeded out by the above iteration. Any remaining
// features are bugs in that the package does not actually have those
// features.
if feature_deps.len() > 0 {
let unknown = feature_deps.keys().map(|s| &s[..])
.collect::<Vec<&str>>();
if unknown.len() > 0 {
let features = unknown.connect(", ");
return Err(human(format!("Package `{}` does not have these features: \
`{}`", parent.package_id(), features)))
}
}
// Record what list of features is active for this package.
if used_features.len() > 0 {
let pkgid = parent.package_id();
cx.resolve.features.entry(pkgid.clone())
.or_insert(HashSet::new())
.extend(used_features);
}
Ok(ret)
}
// Returns a pair of (feature dependencies, all used features)
//
// The feature dependencies map is a mapping of package name to list of features
// enabled. Each package should be enabled, and each package should have the
// specified set of features enabled.
//
// The all used features set is the set of features which this local package had
// enabled, which is later used when compiling to instruct the code what
// features were enabled.
fn build_features(s: &Summary, method: Method)
-> CargoResult<(HashMap<String, Vec<String>>, HashSet<String>)> {
let mut deps = HashMap::new();
let mut used = HashSet::new();
let mut visited = HashSet::new();
match method {
Method::Everything => {
for key in s.features().keys() {
try!(add_feature(s, key, &mut deps, &mut used, &mut visited));
}
for dep in s.dependencies().iter().filter(|d| d.is_optional()) {
try!(add_feature(s, dep.name(), &mut deps, &mut used,
&mut visited));
}
}
Method::Required{features: requested_features, ..} => {
for feat in requested_features.iter() {
try!(add_feature(s, feat, &mut deps, &mut used, &mut visited));
}
}
}
match method {
Method::Everything |
Method::Required { uses_default_features: true, .. } => {
if s.features().get("default").is_some() {
try!(add_feature(s, "default", &mut deps, &mut used,
&mut visited));
}
}
Method::Required { uses_default_features: false, .. } => {}
}
return Ok((deps, used));
fn add_feature(s: &Summary, feat: &str,
deps: &mut HashMap<String, Vec<String>>,
used: &mut HashSet<String>,
visited: &mut HashSet<String>) -> CargoResult<()> {
if feat.is_empty() { return Ok(()) }
// If this feature is of the form `foo/bar`, then we just lookup package
// `foo` and enable its feature `bar`. Otherwise this feature is of the
// form `foo` and we need to recurse to enable the feature `foo` for our
// own package, which may end up enabling more features or just enabling
// a dependency.
let mut parts = feat.splitn(2, '/');
let feat_or_package = parts.next().unwrap();
match parts.next() {
Some(feat) => {
let package = feat_or_package;
deps.entry(package.to_string())
.or_insert(Vec::new())
.push(feat.to_string());
}
None => {
let feat = feat_or_package;
if !visited.insert(feat.to_string()) {
return Err(human(format!("Cyclic feature dependency: \
feature `{}` depends on itself",
feat)))
}
used.insert(feat.to_string());
match s.features().get(feat) {
Some(recursive) => {
for f in recursive {
try!(add_feature(s, f, deps, used, visited));
}
}
None => {
deps.entry(feat.to_string()).or_insert(Vec::new());
}
}
visited.remove(&feat.to_string());
}
}
Ok(())
}
} | // Next, sanitize all requested features by whitelisting all the requested
// features that correspond to optional dependencies
for dep in deps {
// weed out optional dependencies, but not those required
if dep.is_optional() && !feature_deps.contains_key(dep.name()) { | random_line_split |
new_IDRQN_main.py | # Xinwu Qian 2019-02-06
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
# This implements independent q learning approach
use_gpu = 1
import os
import config
from multiprocessing import Pool
import taxi_env as te
import scipy
import taxi_util as tu
import time
from datetime import datetime
import pickle
from collections import deque
import tensorflow as tf
import numpy as np
import network
import DRQN_agent
from system_tracker import system_tracker
import bandit
from tensorflow.python.client import timeline
np.set_printoptions(precision=2)
if use_gpu == 0:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# force on gpu
config1 = tf.ConfigProto()
config1.gpu_options.allow_growth = True
reward_out = open('log/IDRQN_reward_log_' + datetime.now().strftime('%Y-%m-%d %H-%M-%S') + '.csv', 'w+')
with open('simulation_input.dat', 'rb') as fp:
simulation_input = pickle.load(fp)
# ------------------Parameter setting-----------------------
N_station = simulation_input['N_station']
OD_mat = simulation_input['OD_mat']
distance = simulation_input['distance']
travel_time = simulation_input['travel_time']
arrival_rate = simulation_input['arrival_rate']
taxi_input = simulation_input['taxi_input']
exp_dist=simulation_input['exp_dist']
# Setting the training parameters
batch_size = config.TRAIN_CONFIG['batch_size']
trace_length = config.TRAIN_CONFIG['trace_length'] # How long each experience trace will be when training
update_freq = config.TRAIN_CONFIG['update_freq'] # How often to perform a training step.
lstm_units=config.TRAIN_CONFIG['lstm_unit']
e_threshold=config.TRAIN_CONFIG['elimination_threshold']
y = config.TRAIN_CONFIG['y'] # Discount factor on the target Q-values
startE = config.TRAIN_CONFIG['startE'] # Starting chance of random action
endE = config.TRAIN_CONFIG['endE'] # Final chance of random action
anneling_steps = config.TRAIN_CONFIG['anneling_steps'] # How many steps of training to reduce startE to endE.
num_episodes = config.TRAIN_CONFIG['num_episodes'] # How many episodes of game environment to train network with.
load_model = config.TRAIN_CONFIG['load_model'] # Whether to load a saved model.
warmup_time = config.TRAIN_CONFIG['warmup_time'];
path = "./large_case_model" # The path to save our model to.
h_size = config.TRAIN_CONFIG['h_size']
max_epLength = config.TRAIN_CONFIG['max_epLength']
pre_train_steps = max_epLength * 10 # How many steps of random actions before training begins.
softmax_action = config.TRAIN_CONFIG['softmax_action']
silent = config.TRAIN_CONFIG['silent'] # do not print training time
prioritized = config.TRAIN_CONFIG['prioritized']
rng_seed=config.TRAIN_CONFIG['random_seed']
#set rng seed
np.random.seed(rng_seed)
tau = 0.1
# --------------Simulation initialization
sys_tracker = system_tracker()
sys_tracker.initialize(config, distance, travel_time, arrival_rate, int(taxi_input), N_station, num_episodes, max_epLength)
env = te.taxi_simulator(arrival_rate, OD_mat, distance, travel_time, taxi_input)
env.reset()
print('System Successfully Initialized!')
# ------------------Train the network-----------------------
#--------------Output record-------------------#
outf=open('temp_record.txt','w')
# Set the rate of random action decrease.
e = startE
stepDrop = (startE-endE)/anneling_steps
# create lists to contain total rewards and steps per episode
jList = []
rList = []
total_steps = 0
# network number
nn = 0
# Make a path for our model to be saved in.
if not os.path.exists(path):
os.makedirs(path)
linucb_agent=bandit.linucb_agent(N_station,N_station*4)
exp_replay = network.experience_buffer(15000) # a single buffer holds everything
bandit_buffer = network.bandit_buffer(15000)
bandit_swap_e=1;
linucb_agent_backup=bandit.linucb_agent(N_station, N_station * 4)
# # this step loads the model from the model that has been saved
# if load_model == True:
# print('Loading Model...')
# ckpt = tf.train.get_checkpoint_state(path)
# saver.restore(sess, ckpt.model_checkpoint_path)
# this example equals target network to the original network after every few episodes
# we may want to modify this
with tf.Session(config=config1) as sess:
# one DRQN per station is needed, different network requires a different scope (name)
stand_agent = []
# targetOps=[]
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
agent=DRQN_agent.drqn_agent_efficient(N_station, h_size, lstm_units,tau, sess, batch_size, trace_length,is_gpu=use_gpu)
agent.drqn_build()
global_init = tf.global_variables_initializer()
saver = tf.train.Saver(max_to_keep=10)
# writer = tf.summary.FileWriter('./graphs', sess.graph)
# writer.close()1
sess.run(global_init)
Qp_in=[]
Qp_value_in=[]
Q1_in=[]
Q2_in=[]
Q_train=[]
Q_input_dict = dict()
Q_train_dict = dict()
Qp_input_dict=dict()
for station in range(N_station):
Qp_in.append(agent.mainPredict[station])
Qp_value_in.append(agent.mainQout[station])
Qp_input_dict[agent.trainLength] = 1
Qp_input_dict[agent.batch_size] = 1
Q1_in.append(agent.targetZ[station])
Q2_in.append(agent.targetQout[station])
Q_train.append(agent.updateModel[station])
total_train_iter=0;
for i in range(num_episodes):
global_epi_buffer=[]
global_bandit_buffer=[]
sys_tracker.new_episode()
# Reset environment and get first new observation
env.reset()
# return the current state of the system
sP, tempr, featurep,score,tr2 = env.get_state()
# process the state into a list
# replace the state action with future states
feature=featurep
s = network.processState(sP, N_station)
pres=s
prea=np.zeros((N_station))
within_frame_reward = 0
frame_skipping = 1
prediction_time=0
targetz_time=0
training_time=0
rAll = 0
rAll_unshape=0
j = 0
total_serve = 0
total_leave = 0
buffer_count=0;
# We train one station in one single episode, and hold it unchanged for other stations, and we keep rotating.
tinit=time.time()
a = [st for st in range(N_station)]
#bandit swapping scheme
#bandit swapping scheme
if bandit_swap_e - e >.1: # we do swapping when $e$ got declined by 0.05 percent.
linucb_agent=linucb_agent_backup
linucb_agent_backup=bandit.linucb_agent(N_station, N_station * 4)
bandit_swap_e=e
print('we swap bandit here')
state_predict=deque()
initial_rnn_cstate = np.zeros((1, 1, config.TRAIN_CONFIG['lstm_unit']))
initial_rnn_hstate = np.zeros((1, 1, config.TRAIN_CONFIG['lstm_unit']))
while j < max_epLength:
# agent.update_conf(1,1.5*anneling_steps)
tall=time.time()
j += 1
hour=(j-1)//120
# for all the stations, act greedily
# Choose an action by greedily (with e chance of random action) from the Q-network
a = [-1] * N_station
tempt = time.time()
if config.TRAIN_CONFIG['use_linear'] == 0: # not using action elimination
if np.random.rand(1) < e or total_steps < pre_train_steps:
for station in range(N_station):
if env.taxi_in_q[station]:
a[station] = np.random.randint(0, N_station) # random actions for each station
else:
for station in range(N_station):
if env.taxi_in_q[station]:
a1 = agent.predict_regular(s, station)
a[station] = a1[0] # action performed by DRQN
if a[station] == N_station:
a[station] = station
else: # use e-greedy
# predict_score = sess.run(linear_model.linear_Yh, feed_dict={linear_model.linear_X: [feature]})
predict_score=linucb_agent.return_upper_bound(feature)
predict_score=predict_score*exp_dist[hour]/distance
invalid=predict_score<e_threshold
valid=predict_score>=e_threshold
rand_num=np.random.rand(1)
state_predict.append(s)
if len(state_predict)>1:
state_predict.popleft()
if rand_num < e:
rnn_value = 0
all_actions = 0
else:
rnn_value,initial_rnn_state = sess.run([agent.main_rnn_value,agent.rnn_out_state],feed_dict={agent.scalarInput: np.vstack(state_predict), agent.rnn_cstate_holder:initial_rnn_cstate,agent.rnn_hstate_holder:initial_rnn_hstate,agent.iter_holder:[np.array([e])], agent.eps_holder:[np.array([total_train_iter])], agent.trainLength: len(state_predict), agent.batch_size: 1})
initial_rnn_cstate=initial_rnn_state[0]
initial_rnn_hstate=initial_rnn_state[1]
for station in range(N_station):
if env.taxi_in_q[station]:
a1 = agent.predict(rnn_value, predict_score[station, :], e, station, e_threshold, rand_num,
valid[station, :], invalid[station, :])
a[station] = a1 # action performed by DRQN
if a[station] == N_station:
a[station] = station
prediction_time += time.time() - tempt
if config.TRAIN_CONFIG['use_tracker']:
sys_tracker.record(s, a)
# move to the next step based on action selected
ssp, lfp = env.step(a)
total_serve += ssp
total_leave += lfp
# get state and reward
s1P, r, featurep,score,r2 = env.get_state()
s1 = network.processState(s1P, N_station)
total_steps += 1
if total_steps > pre_train_steps and j > warmup_time:
# start training here
if e > endE:
e-=stepDrop
# episode buffer
# we don't store the initial 200 steps of the simulation, as warm up periods
newr=r*np.ones((N_station))
v1=np.reshape(np.array([s, a, newr, s1,feature,score,featurep,e,total_train_iter]), [1,9])
global_epi_buffer.append(v1)
global_bandit_buffer.append(v1)
#exp replay
# buffer_count+=1
# if buffer_count>=2*trace_length:
# #pop the first trace length items
# newbufferArray=[]
# bufferArray=global_epi_buffer[:trace_length]
# for bf in range(trace_length):
# #recalibrate the rewards
# reward_vec=sum([(y**id)*global_epi_buffer[bf+id][0][2] for id in range(trace_length)])
# bufferArray[bf][0][2]=reward_vec
# exp_replay.add(bufferArray)
# global_epi_buffer=global_epi_buffer[trace_length:]
# buffer_count-=trace_length
buffer_count+=1
if buffer_count>=2*trace_length:
for it in range(trace_length):
|
global_epi_buffer=global_epi_buffer[trace_length:]
buffer_count-=trace_length
if total_steps % (500) == 0 and i>4:
linubc_train = bandit_buffer.sample(batch_size * 40)
linucb_agent.update(linubc_train[:, 4], linubc_train[:, 1], linubc_train[:, 5])
linucb_agent_backup.update(linubc_train[:, 4], linubc_train[:, 1], linubc_train[:, 5])
#use a single buffer
if total_steps > pre_train_steps and j > warmup_time:
#train linear multi-arm bandit first, we periodically update this (every 10*update_fequency steps)
t1 = time.time()
if total_steps % (update_freq) == 0:
agent.update_target_net()
# train_predict_score[train_predict_score<0.312]=100
# train_predict_score[train_predict_score>=0.312] =0
# predict_in=np.zeros((batch_size*trace_length,N_station+1))
# predict_in[:,:-1]=train_predict_score;
# print('LINUCB predict time:', time.time() - t1)
#get targetQ
total_train_iter+=1/5000
trainBatch_list = [exp_replay.sample(batch_size, trace_length) for st in range(N_station)]
for station in range(N_station):
trainBatch= trainBatch_list[station]
# generate the linucb score for each batch
# train_predict_score= train_predict_score_list[(station)*batch_size*trace_length:(station+1)*batch_size*trace_length,:] * exp_dist
train_predict_score = linucb_agent.return_upper_bound_batch(trainBatch[:, 6]) * exp_dist[hour]
past_train_eps=np.vstack(trainBatch[:,7])
past_train_iter = np.vstack(trainBatch[:, 8])
current_action=np.vstack(trainBatch[:,1])
tr, t_action = agent.train_prepare(trainBatch, station)
tp = train_predict_score/ distance[station, :]
af = tp < e_threshold
bf = tp >= e_threshold
tp[af] = 100
tp[bf] = 0
tp[:, station] = 0
tempt = time.time()
tz=sess.run(agent.targetZ[station],feed_dict={agent.scalarInput:np.vstack(trainBatch[:, 3]),agent.iter_holder:past_train_iter, agent.eps_holder:past_train_eps, agent.predict_score[station]:tp,agent.rewards[station]:tr,agent.trainLength:trace_length,agent.batch_size:batch_size})
targetz_time += time.time() - tempt
tempt = time.time()
sess.run(agent.updateModel[station], feed_dict={agent.targetQ[station]: tz, agent.rewards[station]: tr, agent.actions[station]: t_action, agent.scalarInput: np.vstack(trainBatch[:, 0]),agent.iter_holder:past_train_iter, agent.eps_holder:past_train_eps,agent.trainLength: trace_length, agent.batch_size: batch_size})
training_time += time.time() - tempt
# train
# sess.run(agent.updateModel[station],feed_dict={agent.targetQ[station]:tz,agent.rewards[station]:tr,agent.actions[station]:t_action,agent.scalarInput:np.vstack(trainBatch[:, 0]),agent.trainLength:trace_length,agent.batch_size:batch_size})
rAll += r
rAll_unshape+=r2
# swap state
s = s1
sP = s1P
feature=featurep
#preocess bandit buffer
future_steps=2*trace_length
tmask = np.linspace(0, 1, num=future_steps + 1)
pdeta=0.5;
quantile_mask=scipy.stats.norm.cdf(scipy.stats.norm.ppf(tmask)-pdeta)
quantile_mask = np.diff(quantile_mask) # rescale the distribution to favor risk neutral or risk-averse behavior
for epi in range(len(global_bandit_buffer)-future_steps-1):
# print(global_bandit_buffer[i])
score=np.array([global_bandit_buffer[epi+k][0][5] for k in range(future_steps)]).T.dot(quantile_mask)
record=global_bandit_buffer[epi]
record[0][5]=score; #replay the score
bandit_buffer.add(record)
jList.append(j)
rList.append(rAll) # reward in this episode
sys_tracker.record_time(env)
print('Episode:', i, ', totalreward:', rAll, ', old reward:',rAll_unshape,', total serve:', total_serve, ', total leave:', total_leave, ', total_cpu_time:',time.time()-tinit,
', terminal_taxi_distribution:', [len(v) for v in env.taxi_in_q], ', terminal_passenger:',
[len(v) for v in env.passenger_qtime], e,agent.conf)
n_vars=len(tf.trainable_variables())
print('TargetZ_time:',targetz_time,', Training time:',training_time, ', Prediction time:',prediction_time,'Number of tensorflow variables',n_vars)
reward_out.write(str(i) + ',' + str(rAll) + '\n')
outf.writelines(str(i)+','+str(rAll)+','+str(total_serve)+','+str(total_leave)+'\n')
# Periodically save the model.
if i % 20 == 0 and i != 0:
saver.save(sess, path + '/model-' + str(i) + '.cptk')
print("Saved Model")
# if len(rList) % summaryLength == 0 and len(rList) != 0:
# print(total_steps, np.mean(rList[-summaryLength:]), e)
# saveToCenter(i,rList,jList,np.reshape(np.array(episodeBuffer),[len(episodeBuffer),5]),\
# summaryLength,h_size,sess,mainQN,time_per_step)
# saver.save(sess,path+'/model-'+str(i)+'.cptk')
outf.close()
reward_out.close()
sys_tracker.save('IDRQN')
sys_tracker.playback(-1)
| bufferArray=np.array(global_epi_buffer)
exp_replay.add(bufferArray[it:it+trace_length]) | conditional_block |
new_IDRQN_main.py | # Xinwu Qian 2019-02-06
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
# This implements independent q learning approach
use_gpu = 1
import os
import config
from multiprocessing import Pool
import taxi_env as te
import scipy
import taxi_util as tu
import time
from datetime import datetime
import pickle
from collections import deque
import tensorflow as tf
import numpy as np
import network | from system_tracker import system_tracker
import bandit
from tensorflow.python.client import timeline
np.set_printoptions(precision=2)
if use_gpu == 0:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# force on gpu
config1 = tf.ConfigProto()
config1.gpu_options.allow_growth = True
reward_out = open('log/IDRQN_reward_log_' + datetime.now().strftime('%Y-%m-%d %H-%M-%S') + '.csv', 'w+')
with open('simulation_input.dat', 'rb') as fp:
simulation_input = pickle.load(fp)
# ------------------Parameter setting-----------------------
N_station = simulation_input['N_station']
OD_mat = simulation_input['OD_mat']
distance = simulation_input['distance']
travel_time = simulation_input['travel_time']
arrival_rate = simulation_input['arrival_rate']
taxi_input = simulation_input['taxi_input']
exp_dist=simulation_input['exp_dist']
# Setting the training parameters
batch_size = config.TRAIN_CONFIG['batch_size']
trace_length = config.TRAIN_CONFIG['trace_length'] # How long each experience trace will be when training
update_freq = config.TRAIN_CONFIG['update_freq'] # How often to perform a training step.
lstm_units=config.TRAIN_CONFIG['lstm_unit']
e_threshold=config.TRAIN_CONFIG['elimination_threshold']
y = config.TRAIN_CONFIG['y'] # Discount factor on the target Q-values
startE = config.TRAIN_CONFIG['startE'] # Starting chance of random action
endE = config.TRAIN_CONFIG['endE'] # Final chance of random action
anneling_steps = config.TRAIN_CONFIG['anneling_steps'] # How many steps of training to reduce startE to endE.
num_episodes = config.TRAIN_CONFIG['num_episodes'] # How many episodes of game environment to train network with.
load_model = config.TRAIN_CONFIG['load_model'] # Whether to load a saved model.
warmup_time = config.TRAIN_CONFIG['warmup_time'];
path = "./large_case_model" # The path to save our model to.
h_size = config.TRAIN_CONFIG['h_size']
max_epLength = config.TRAIN_CONFIG['max_epLength']
pre_train_steps = max_epLength * 10 # How many steps of random actions before training begins.
softmax_action = config.TRAIN_CONFIG['softmax_action']
silent = config.TRAIN_CONFIG['silent'] # do not print training time
prioritized = config.TRAIN_CONFIG['prioritized']
rng_seed=config.TRAIN_CONFIG['random_seed']
#set rng seed
np.random.seed(rng_seed)
tau = 0.1
# --------------Simulation initialization
sys_tracker = system_tracker()
sys_tracker.initialize(config, distance, travel_time, arrival_rate, int(taxi_input), N_station, num_episodes, max_epLength)
env = te.taxi_simulator(arrival_rate, OD_mat, distance, travel_time, taxi_input)
env.reset()
print('System Successfully Initialized!')
# ------------------Train the network-----------------------
#--------------Output record-------------------#
outf=open('temp_record.txt','w')
# Set the rate of random action decrease.
e = startE
stepDrop = (startE-endE)/anneling_steps
# create lists to contain total rewards and steps per episode
jList = []
rList = []
total_steps = 0
# network number
nn = 0
# Make a path for our model to be saved in.
if not os.path.exists(path):
os.makedirs(path)
linucb_agent=bandit.linucb_agent(N_station,N_station*4)
exp_replay = network.experience_buffer(15000) # a single buffer holds everything
bandit_buffer = network.bandit_buffer(15000)
bandit_swap_e=1;
linucb_agent_backup=bandit.linucb_agent(N_station, N_station * 4)
# # this step loads the model from the model that has been saved
# if load_model == True:
# print('Loading Model...')
# ckpt = tf.train.get_checkpoint_state(path)
# saver.restore(sess, ckpt.model_checkpoint_path)
# this example equals target network to the original network after every few episodes
# we may want to modify this
with tf.Session(config=config1) as sess:
# one DRQN per station is needed, different network requires a different scope (name)
stand_agent = []
# targetOps=[]
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
agent=DRQN_agent.drqn_agent_efficient(N_station, h_size, lstm_units,tau, sess, batch_size, trace_length,is_gpu=use_gpu)
agent.drqn_build()
global_init = tf.global_variables_initializer()
saver = tf.train.Saver(max_to_keep=10)
# writer = tf.summary.FileWriter('./graphs', sess.graph)
# writer.close()1
sess.run(global_init)
Qp_in=[]
Qp_value_in=[]
Q1_in=[]
Q2_in=[]
Q_train=[]
Q_input_dict = dict()
Q_train_dict = dict()
Qp_input_dict=dict()
for station in range(N_station):
Qp_in.append(agent.mainPredict[station])
Qp_value_in.append(agent.mainQout[station])
Qp_input_dict[agent.trainLength] = 1
Qp_input_dict[agent.batch_size] = 1
Q1_in.append(agent.targetZ[station])
Q2_in.append(agent.targetQout[station])
Q_train.append(agent.updateModel[station])
total_train_iter=0;
for i in range(num_episodes):
global_epi_buffer=[]
global_bandit_buffer=[]
sys_tracker.new_episode()
# Reset environment and get first new observation
env.reset()
# return the current state of the system
sP, tempr, featurep,score,tr2 = env.get_state()
# process the state into a list
# replace the state action with future states
feature=featurep
s = network.processState(sP, N_station)
pres=s
prea=np.zeros((N_station))
within_frame_reward = 0
frame_skipping = 1
prediction_time=0
targetz_time=0
training_time=0
rAll = 0
rAll_unshape=0
j = 0
total_serve = 0
total_leave = 0
buffer_count=0;
# We train one station in one single episode, and hold it unchanged for other stations, and we keep rotating.
tinit=time.time()
a = [st for st in range(N_station)]
#bandit swapping scheme
#bandit swapping scheme
if bandit_swap_e - e >.1: # we do swapping when $e$ got declined by 0.05 percent.
linucb_agent=linucb_agent_backup
linucb_agent_backup=bandit.linucb_agent(N_station, N_station * 4)
bandit_swap_e=e
print('we swap bandit here')
state_predict=deque()
initial_rnn_cstate = np.zeros((1, 1, config.TRAIN_CONFIG['lstm_unit']))
initial_rnn_hstate = np.zeros((1, 1, config.TRAIN_CONFIG['lstm_unit']))
while j < max_epLength:
# agent.update_conf(1,1.5*anneling_steps)
tall=time.time()
j += 1
hour=(j-1)//120
# for all the stations, act greedily
# Choose an action by greedily (with e chance of random action) from the Q-network
a = [-1] * N_station
tempt = time.time()
if config.TRAIN_CONFIG['use_linear'] == 0: # not using action elimination
if np.random.rand(1) < e or total_steps < pre_train_steps:
for station in range(N_station):
if env.taxi_in_q[station]:
a[station] = np.random.randint(0, N_station) # random actions for each station
else:
for station in range(N_station):
if env.taxi_in_q[station]:
a1 = agent.predict_regular(s, station)
a[station] = a1[0] # action performed by DRQN
if a[station] == N_station:
a[station] = station
else: # use e-greedy
# predict_score = sess.run(linear_model.linear_Yh, feed_dict={linear_model.linear_X: [feature]})
predict_score=linucb_agent.return_upper_bound(feature)
predict_score=predict_score*exp_dist[hour]/distance
invalid=predict_score<e_threshold
valid=predict_score>=e_threshold
rand_num=np.random.rand(1)
state_predict.append(s)
if len(state_predict)>1:
state_predict.popleft()
if rand_num < e:
rnn_value = 0
all_actions = 0
else:
rnn_value,initial_rnn_state = sess.run([agent.main_rnn_value,agent.rnn_out_state],feed_dict={agent.scalarInput: np.vstack(state_predict), agent.rnn_cstate_holder:initial_rnn_cstate,agent.rnn_hstate_holder:initial_rnn_hstate,agent.iter_holder:[np.array([e])], agent.eps_holder:[np.array([total_train_iter])], agent.trainLength: len(state_predict), agent.batch_size: 1})
initial_rnn_cstate=initial_rnn_state[0]
initial_rnn_hstate=initial_rnn_state[1]
for station in range(N_station):
if env.taxi_in_q[station]:
a1 = agent.predict(rnn_value, predict_score[station, :], e, station, e_threshold, rand_num,
valid[station, :], invalid[station, :])
a[station] = a1 # action performed by DRQN
if a[station] == N_station:
a[station] = station
prediction_time += time.time() - tempt
if config.TRAIN_CONFIG['use_tracker']:
sys_tracker.record(s, a)
# move to the next step based on action selected
ssp, lfp = env.step(a)
total_serve += ssp
total_leave += lfp
# get state and reward
s1P, r, featurep,score,r2 = env.get_state()
s1 = network.processState(s1P, N_station)
total_steps += 1
if total_steps > pre_train_steps and j > warmup_time:
# start training here
if e > endE:
e-=stepDrop
# episode buffer
# we don't store the initial 200 steps of the simulation, as warm up periods
newr=r*np.ones((N_station))
v1=np.reshape(np.array([s, a, newr, s1,feature,score,featurep,e,total_train_iter]), [1,9])
global_epi_buffer.append(v1)
global_bandit_buffer.append(v1)
#exp replay
# buffer_count+=1
# if buffer_count>=2*trace_length:
# #pop the first trace length items
# newbufferArray=[]
# bufferArray=global_epi_buffer[:trace_length]
# for bf in range(trace_length):
# #recalibrate the rewards
# reward_vec=sum([(y**id)*global_epi_buffer[bf+id][0][2] for id in range(trace_length)])
# bufferArray[bf][0][2]=reward_vec
# exp_replay.add(bufferArray)
# global_epi_buffer=global_epi_buffer[trace_length:]
# buffer_count-=trace_length
buffer_count+=1
if buffer_count>=2*trace_length:
for it in range(trace_length):
bufferArray=np.array(global_epi_buffer)
exp_replay.add(bufferArray[it:it+trace_length])
global_epi_buffer=global_epi_buffer[trace_length:]
buffer_count-=trace_length
if total_steps % (500) == 0 and i>4:
linubc_train = bandit_buffer.sample(batch_size * 40)
linucb_agent.update(linubc_train[:, 4], linubc_train[:, 1], linubc_train[:, 5])
linucb_agent_backup.update(linubc_train[:, 4], linubc_train[:, 1], linubc_train[:, 5])
#use a single buffer
if total_steps > pre_train_steps and j > warmup_time:
#train linear multi-arm bandit first, we periodically update this (every 10*update_fequency steps)
t1 = time.time()
if total_steps % (update_freq) == 0:
agent.update_target_net()
# train_predict_score[train_predict_score<0.312]=100
# train_predict_score[train_predict_score>=0.312] =0
# predict_in=np.zeros((batch_size*trace_length,N_station+1))
# predict_in[:,:-1]=train_predict_score;
# print('LINUCB predict time:', time.time() - t1)
#get targetQ
total_train_iter+=1/5000
trainBatch_list = [exp_replay.sample(batch_size, trace_length) for st in range(N_station)]
for station in range(N_station):
trainBatch= trainBatch_list[station]
# generate the linucb score for each batch
# train_predict_score= train_predict_score_list[(station)*batch_size*trace_length:(station+1)*batch_size*trace_length,:] * exp_dist
train_predict_score = linucb_agent.return_upper_bound_batch(trainBatch[:, 6]) * exp_dist[hour]
past_train_eps=np.vstack(trainBatch[:,7])
past_train_iter = np.vstack(trainBatch[:, 8])
current_action=np.vstack(trainBatch[:,1])
tr, t_action = agent.train_prepare(trainBatch, station)
tp = train_predict_score/ distance[station, :]
af = tp < e_threshold
bf = tp >= e_threshold
tp[af] = 100
tp[bf] = 0
tp[:, station] = 0
tempt = time.time()
tz=sess.run(agent.targetZ[station],feed_dict={agent.scalarInput:np.vstack(trainBatch[:, 3]),agent.iter_holder:past_train_iter, agent.eps_holder:past_train_eps, agent.predict_score[station]:tp,agent.rewards[station]:tr,agent.trainLength:trace_length,agent.batch_size:batch_size})
targetz_time += time.time() - tempt
tempt = time.time()
sess.run(agent.updateModel[station], feed_dict={agent.targetQ[station]: tz, agent.rewards[station]: tr, agent.actions[station]: t_action, agent.scalarInput: np.vstack(trainBatch[:, 0]),agent.iter_holder:past_train_iter, agent.eps_holder:past_train_eps,agent.trainLength: trace_length, agent.batch_size: batch_size})
training_time += time.time() - tempt
# train
# sess.run(agent.updateModel[station],feed_dict={agent.targetQ[station]:tz,agent.rewards[station]:tr,agent.actions[station]:t_action,agent.scalarInput:np.vstack(trainBatch[:, 0]),agent.trainLength:trace_length,agent.batch_size:batch_size})
rAll += r
rAll_unshape+=r2
# swap state
s = s1
sP = s1P
feature=featurep
#preocess bandit buffer
future_steps=2*trace_length
tmask = np.linspace(0, 1, num=future_steps + 1)
pdeta=0.5;
quantile_mask=scipy.stats.norm.cdf(scipy.stats.norm.ppf(tmask)-pdeta)
quantile_mask = np.diff(quantile_mask) # rescale the distribution to favor risk neutral or risk-averse behavior
for epi in range(len(global_bandit_buffer)-future_steps-1):
# print(global_bandit_buffer[i])
score=np.array([global_bandit_buffer[epi+k][0][5] for k in range(future_steps)]).T.dot(quantile_mask)
record=global_bandit_buffer[epi]
record[0][5]=score; #replay the score
bandit_buffer.add(record)
jList.append(j)
rList.append(rAll) # reward in this episode
sys_tracker.record_time(env)
print('Episode:', i, ', totalreward:', rAll, ', old reward:',rAll_unshape,', total serve:', total_serve, ', total leave:', total_leave, ', total_cpu_time:',time.time()-tinit,
', terminal_taxi_distribution:', [len(v) for v in env.taxi_in_q], ', terminal_passenger:',
[len(v) for v in env.passenger_qtime], e,agent.conf)
n_vars=len(tf.trainable_variables())
print('TargetZ_time:',targetz_time,', Training time:',training_time, ', Prediction time:',prediction_time,'Number of tensorflow variables',n_vars)
reward_out.write(str(i) + ',' + str(rAll) + '\n')
outf.writelines(str(i)+','+str(rAll)+','+str(total_serve)+','+str(total_leave)+'\n')
# Periodically save the model.
if i % 20 == 0 and i != 0:
saver.save(sess, path + '/model-' + str(i) + '.cptk')
print("Saved Model")
# if len(rList) % summaryLength == 0 and len(rList) != 0:
# print(total_steps, np.mean(rList[-summaryLength:]), e)
# saveToCenter(i,rList,jList,np.reshape(np.array(episodeBuffer),[len(episodeBuffer),5]),\
# summaryLength,h_size,sess,mainQN,time_per_step)
# saver.save(sess,path+'/model-'+str(i)+'.cptk')
outf.close()
reward_out.close()
sys_tracker.save('IDRQN')
sys_tracker.playback(-1) | import DRQN_agent | random_line_split |
update.rs | //! Types used to describe updates on graphs.
use std::convert::TryInto;
use std::fs::File;
use std::sync::Mutex;
use crate::errors::{GraphAnnisCoreError, Result};
use crate::serializer::KeySerializer;
use bincode::Options;
use serde::de::Error as DeserializeError;
use serde::de::{MapAccess, Visitor};
use serde::ser::{Error as SerializeError, SerializeMap};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use sstable::{SSIterator, Table, TableBuilder, TableIterator};
use tempfile::NamedTempFile;
/// Describes a single update on the graph.
#[derive(Serialize, Deserialize, Clone, Debug, MallocSizeOf)]
pub enum UpdateEvent {
/// Add a node with a name and type.
AddNode {
node_name: String,
node_type: String,
},
/// Delete a node given by the name.
DeleteNode { node_name: String },
/// Add a label to a the node given by the name.
AddNodeLabel {
node_name: String,
anno_ns: String,
anno_name: String,
anno_value: String,
},
/// Delete a label of an node given by the name of the node and the qualified label name.
DeleteNodeLabel {
node_name: String,
anno_ns: String,
anno_name: String,
},
/// Add an edge between two nodes given by their name.
AddEdge {
source_node: String,
target_node: String,
layer: String,
component_type: String,
component_name: String,
},
/// Delete an existing edge between two nodes given by their name.
DeleteEdge {
source_node: String,
target_node: String,
layer: String,
component_type: String,
component_name: String,
},
/// Add a label to an edge between two nodes.
AddEdgeLabel {
source_node: String,
target_node: String,
layer: String,
component_type: String,
component_name: String,
anno_ns: String,
anno_name: String,
anno_value: String,
},
/// Delete a label from an edge between two nodes.
DeleteEdgeLabel {
source_node: String,
target_node: String,
layer: String,
component_type: String,
component_name: String,
anno_ns: String,
anno_name: String,
},
}
enum ChangeSet {
InProgress {
table_builder: Box<TableBuilder<File>>,
outfile: NamedTempFile,
},
Finished {
table: Table,
},
}
/// A list of changes to apply to an graph.
pub struct GraphUpdate {
changesets: Mutex<Vec<ChangeSet>>,
event_counter: u64,
serialization: bincode::config::DefaultOptions,
}
impl Default for GraphUpdate {
fn default() -> Self {
GraphUpdate::new()
}
}
impl GraphUpdate {
/// Create a new empty list of updates.
pub fn new() -> GraphUpdate {
GraphUpdate {
event_counter: 0,
changesets: Mutex::new(Vec::new()),
serialization: bincode::options(),
}
}
/// Add the given event to the update list.
pub fn add_event(&mut self, event: UpdateEvent) -> Result<()> {
let new_event_counter = self.event_counter + 1;
let key = new_event_counter.create_key();
let value = self.serialization.serialize(&event)?;
let mut changeset = self.changesets.lock()?;
if let ChangeSet::InProgress { table_builder, .. } =
current_inprogress_changeset(&mut changeset)?
{
table_builder.add(&key, &value)?;
self.event_counter = new_event_counter;
}
Ok(())
}
/// Get all changes
pub fn iter(&self) -> Result<GraphUpdateIterator> {
let it = GraphUpdateIterator::new(self)?;
Ok(it)
}
/// Returns `true` if the update list is empty.
pub fn is_empty(&self) -> Result<bool> {
Ok(self.event_counter == 0)
}
// Returns the number of updates.
pub fn len(&self) -> Result<usize> {
let result = self.event_counter.try_into()?;
Ok(result)
}
}
fn finish_all_changesets(changesets: &mut Vec<ChangeSet>) -> Result<()> {
// Remove all changesets from the vector and finish them
let finished: Result<Vec<ChangeSet>> = changesets
.drain(..)
.map(|c| match c {
ChangeSet::InProgress {
table_builder,
outfile,
} => {
table_builder.finish()?;
// Re-open as table
let file = outfile.reopen()?;
let size = file.metadata()?.len();
let table = Table::new(sstable::Options::default(), Box::new(file), size as usize)?;
Ok(ChangeSet::Finished { table })
}
ChangeSet::Finished { table } => Ok(ChangeSet::Finished { table }),
})
.collect();
// Re-add the finished changesets
changesets.extend(finished?);
Ok(())
}
fn current_inprogress_changeset(changesets: &mut Vec<ChangeSet>) -> Result<&mut ChangeSet> {
let needs_new_changeset = if let Some(c) = changesets.last_mut() {
match c {
ChangeSet::InProgress { .. } => false,
ChangeSet::Finished { .. } => true,
}
} else {
true
};
if needs_new_changeset {
// Create a new changeset
let outfile = NamedTempFile::new()?;
let table_builder = TableBuilder::new(sstable::Options::default(), outfile.reopen()?);
let c = ChangeSet::InProgress {
table_builder: Box::new(table_builder),
outfile,
};
changesets.push(c);
}
// Get the last changeset, which must be in the InProgress state
changesets
.last_mut()
.ok_or(GraphAnnisCoreError::GraphUpdatePersistanceFileMissing)
}
pub struct GraphUpdateIterator {
iterators: Vec<TableIterator>,
size_hint: u64,
serialization: bincode::config::DefaultOptions,
}
impl GraphUpdateIterator {
fn new(g: &GraphUpdate) -> Result<GraphUpdateIterator> {
let mut changesets = g.changesets.lock()?;
finish_all_changesets(&mut changesets)?;
let iterators: Vec<_> = changesets
.iter()
.filter_map(|c| match c {
ChangeSet::InProgress { .. } => None,
ChangeSet::Finished { table } => {
let mut it = table.iter();
it.seek_to_first();
Some(it)
}
})
.collect();
Ok(GraphUpdateIterator {
size_hint: g.event_counter,
iterators,
serialization: g.serialization,
})
}
}
impl std::iter::Iterator for GraphUpdateIterator {
type Item = Result<(u64, UpdateEvent)>;
fn next(&mut self) -> Option<Self::Item> {
// Remove all empty table iterators.
self.iterators.retain(|it| it.valid());
if let Some(it) = self.iterators.first_mut() {
// Get the current values
if let Some((key, value)) = sstable::current_key_val(it) {
// Create the actual types
let id = match u64::parse_key(&key) {
Ok(id) => id,
Err(e) => return Some(Err(e.into())),
};
let event: UpdateEvent = match self.serialization.deserialize(&value) {
Ok(event) => event,
Err(e) => return Some(Err(e.into())),
};
// Advance for next iteration
it.advance();
return Some(Ok((id, event)));
}
}
None
}
fn size_hint(&self) -> (usize, Option<usize>) {
if let Ok(s) = self.size_hint.try_into() | else {
(0, None)
}
}
}
impl Serialize for GraphUpdate {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
let iter = self.iter().map_err(S::Error::custom)?;
let number_of_updates = self.len().map_err(S::Error::custom)?;
let mut map_serializer = serializer.serialize_map(Some(number_of_updates))?;
for entry in iter {
let (key, value) = entry.map_err(S::Error::custom)?;
map_serializer
.serialize_entry(&key, &value)
.map_err(S::Error::custom)?;
}
map_serializer.end()
}
}
struct GraphUpdateVisitor {}
impl<'de> Visitor<'de> for GraphUpdateVisitor {
type Value = GraphUpdate;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a list of graph updates")
}
fn visit_map<M>(self, mut access: M) -> std::result::Result<Self::Value, M::Error>
where
M: MapAccess<'de>,
{
let serialization = bincode::options();
let outfile = NamedTempFile::new().map_err(M::Error::custom)?;
let mut table_builder = TableBuilder::new(
sstable::Options::default(),
outfile.reopen().map_err(M::Error::custom)?,
);
let mut event_counter = 0;
while let Some((id, event)) = access
.next_entry::<u64, GraphUpdate>()
.map_err(M::Error::custom)?
{
event_counter = id;
let key = id.create_key();
let value = serialization.serialize(&event).map_err(M::Error::custom)?;
table_builder.add(&key, &value).map_err(M::Error::custom)?
}
let c = ChangeSet::InProgress {
outfile,
table_builder: Box::new(table_builder),
};
let mut changesets = vec![c];
finish_all_changesets(&mut changesets).map_err(M::Error::custom)?;
let g = GraphUpdate {
changesets: Mutex::new(changesets),
event_counter,
serialization,
};
Ok(g)
}
}
impl<'de> Deserialize<'de> for GraphUpdate {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_map(GraphUpdateVisitor {})
}
}
| {
(s, Some(s))
} | conditional_block |
update.rs | //! Types used to describe updates on graphs.
use std::convert::TryInto;
use std::fs::File;
use std::sync::Mutex;
use crate::errors::{GraphAnnisCoreError, Result};
use crate::serializer::KeySerializer;
use bincode::Options;
use serde::de::Error as DeserializeError;
use serde::de::{MapAccess, Visitor};
use serde::ser::{Error as SerializeError, SerializeMap};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use sstable::{SSIterator, Table, TableBuilder, TableIterator};
use tempfile::NamedTempFile;
/// Describes a single update on the graph.
#[derive(Serialize, Deserialize, Clone, Debug, MallocSizeOf)]
pub enum UpdateEvent {
/// Add a node with a name and type.
AddNode {
node_name: String,
node_type: String,
},
/// Delete a node given by the name.
DeleteNode { node_name: String },
/// Add a label to a the node given by the name.
AddNodeLabel {
node_name: String,
anno_ns: String,
anno_name: String,
anno_value: String,
},
/// Delete a label of an node given by the name of the node and the qualified label name.
DeleteNodeLabel {
node_name: String,
anno_ns: String,
anno_name: String,
},
/// Add an edge between two nodes given by their name.
AddEdge {
source_node: String,
target_node: String,
layer: String,
component_type: String,
component_name: String,
},
/// Delete an existing edge between two nodes given by their name.
DeleteEdge {
source_node: String,
target_node: String,
layer: String,
component_type: String,
component_name: String,
},
/// Add a label to an edge between two nodes.
AddEdgeLabel {
source_node: String,
target_node: String,
layer: String,
component_type: String,
component_name: String,
anno_ns: String,
anno_name: String,
anno_value: String,
},
/// Delete a label from an edge between two nodes.
DeleteEdgeLabel {
source_node: String,
target_node: String,
layer: String,
component_type: String,
component_name: String,
anno_ns: String,
anno_name: String,
},
}
enum ChangeSet {
InProgress {
table_builder: Box<TableBuilder<File>>,
outfile: NamedTempFile,
},
Finished {
table: Table,
},
}
/// A list of changes to apply to an graph.
pub struct GraphUpdate {
changesets: Mutex<Vec<ChangeSet>>,
event_counter: u64,
serialization: bincode::config::DefaultOptions,
}
impl Default for GraphUpdate {
fn default() -> Self {
GraphUpdate::new()
}
}
impl GraphUpdate {
/// Create a new empty list of updates.
pub fn new() -> GraphUpdate {
GraphUpdate {
event_counter: 0,
changesets: Mutex::new(Vec::new()),
serialization: bincode::options(),
}
}
/// Add the given event to the update list.
pub fn add_event(&mut self, event: UpdateEvent) -> Result<()> {
let new_event_counter = self.event_counter + 1;
let key = new_event_counter.create_key();
let value = self.serialization.serialize(&event)?;
let mut changeset = self.changesets.lock()?;
if let ChangeSet::InProgress { table_builder, .. } =
current_inprogress_changeset(&mut changeset)?
{
table_builder.add(&key, &value)?;
self.event_counter = new_event_counter;
}
Ok(())
}
/// Get all changes
pub fn iter(&self) -> Result<GraphUpdateIterator> {
let it = GraphUpdateIterator::new(self)?;
Ok(it)
}
/// Returns `true` if the update list is empty.
pub fn is_empty(&self) -> Result<bool> {
Ok(self.event_counter == 0)
}
// Returns the number of updates.
pub fn len(&self) -> Result<usize> {
let result = self.event_counter.try_into()?;
Ok(result)
}
}
fn finish_all_changesets(changesets: &mut Vec<ChangeSet>) -> Result<()> {
// Remove all changesets from the vector and finish them
let finished: Result<Vec<ChangeSet>> = changesets
.drain(..)
.map(|c| match c {
ChangeSet::InProgress {
table_builder,
outfile,
} => {
table_builder.finish()?;
// Re-open as table
let file = outfile.reopen()?;
let size = file.metadata()?.len();
let table = Table::new(sstable::Options::default(), Box::new(file), size as usize)?;
Ok(ChangeSet::Finished { table })
}
ChangeSet::Finished { table } => Ok(ChangeSet::Finished { table }),
})
.collect();
// Re-add the finished changesets
changesets.extend(finished?);
Ok(())
}
fn current_inprogress_changeset(changesets: &mut Vec<ChangeSet>) -> Result<&mut ChangeSet> {
let needs_new_changeset = if let Some(c) = changesets.last_mut() {
match c {
ChangeSet::InProgress { .. } => false,
ChangeSet::Finished { .. } => true,
}
} else {
true
};
if needs_new_changeset {
// Create a new changeset
let outfile = NamedTempFile::new()?;
let table_builder = TableBuilder::new(sstable::Options::default(), outfile.reopen()?);
let c = ChangeSet::InProgress {
table_builder: Box::new(table_builder),
outfile,
};
changesets.push(c);
}
// Get the last changeset, which must be in the InProgress state
changesets
.last_mut()
.ok_or(GraphAnnisCoreError::GraphUpdatePersistanceFileMissing)
}
pub struct GraphUpdateIterator {
iterators: Vec<TableIterator>,
size_hint: u64,
serialization: bincode::config::DefaultOptions,
}
impl GraphUpdateIterator {
fn new(g: &GraphUpdate) -> Result<GraphUpdateIterator> {
let mut changesets = g.changesets.lock()?;
finish_all_changesets(&mut changesets)?;
let iterators: Vec<_> = changesets
.iter()
.filter_map(|c| match c {
ChangeSet::InProgress { .. } => None,
ChangeSet::Finished { table } => {
let mut it = table.iter();
it.seek_to_first();
Some(it)
}
})
.collect();
Ok(GraphUpdateIterator {
size_hint: g.event_counter,
iterators,
serialization: g.serialization,
})
}
}
impl std::iter::Iterator for GraphUpdateIterator {
type Item = Result<(u64, UpdateEvent)>;
fn next(&mut self) -> Option<Self::Item> {
// Remove all empty table iterators.
self.iterators.retain(|it| it.valid());
if let Some(it) = self.iterators.first_mut() {
// Get the current values
if let Some((key, value)) = sstable::current_key_val(it) {
// Create the actual types
let id = match u64::parse_key(&key) {
Ok(id) => id,
Err(e) => return Some(Err(e.into())),
};
let event: UpdateEvent = match self.serialization.deserialize(&value) {
Ok(event) => event,
Err(e) => return Some(Err(e.into())),
};
// Advance for next iteration
it.advance();
return Some(Ok((id, event)));
}
}
None
}
| if let Ok(s) = self.size_hint.try_into() {
(s, Some(s))
} else {
(0, None)
}
}
}
impl Serialize for GraphUpdate {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
let iter = self.iter().map_err(S::Error::custom)?;
let number_of_updates = self.len().map_err(S::Error::custom)?;
let mut map_serializer = serializer.serialize_map(Some(number_of_updates))?;
for entry in iter {
let (key, value) = entry.map_err(S::Error::custom)?;
map_serializer
.serialize_entry(&key, &value)
.map_err(S::Error::custom)?;
}
map_serializer.end()
}
}
struct GraphUpdateVisitor {}
impl<'de> Visitor<'de> for GraphUpdateVisitor {
type Value = GraphUpdate;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a list of graph updates")
}
fn visit_map<M>(self, mut access: M) -> std::result::Result<Self::Value, M::Error>
where
M: MapAccess<'de>,
{
let serialization = bincode::options();
let outfile = NamedTempFile::new().map_err(M::Error::custom)?;
let mut table_builder = TableBuilder::new(
sstable::Options::default(),
outfile.reopen().map_err(M::Error::custom)?,
);
let mut event_counter = 0;
while let Some((id, event)) = access
.next_entry::<u64, GraphUpdate>()
.map_err(M::Error::custom)?
{
event_counter = id;
let key = id.create_key();
let value = serialization.serialize(&event).map_err(M::Error::custom)?;
table_builder.add(&key, &value).map_err(M::Error::custom)?
}
let c = ChangeSet::InProgress {
outfile,
table_builder: Box::new(table_builder),
};
let mut changesets = vec![c];
finish_all_changesets(&mut changesets).map_err(M::Error::custom)?;
let g = GraphUpdate {
changesets: Mutex::new(changesets),
event_counter,
serialization,
};
Ok(g)
}
}
impl<'de> Deserialize<'de> for GraphUpdate {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_map(GraphUpdateVisitor {})
}
} | fn size_hint(&self) -> (usize, Option<usize>) { | random_line_split |
update.rs | //! Types used to describe updates on graphs.
use std::convert::TryInto;
use std::fs::File;
use std::sync::Mutex;
use crate::errors::{GraphAnnisCoreError, Result};
use crate::serializer::KeySerializer;
use bincode::Options;
use serde::de::Error as DeserializeError;
use serde::de::{MapAccess, Visitor};
use serde::ser::{Error as SerializeError, SerializeMap};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use sstable::{SSIterator, Table, TableBuilder, TableIterator};
use tempfile::NamedTempFile;
/// Describes a single update on the graph.
#[derive(Serialize, Deserialize, Clone, Debug, MallocSizeOf)]
pub enum UpdateEvent {
/// Add a node with a name and type.
AddNode {
node_name: String,
node_type: String,
},
/// Delete a node given by the name.
DeleteNode { node_name: String },
/// Add a label to a the node given by the name.
AddNodeLabel {
node_name: String,
anno_ns: String,
anno_name: String,
anno_value: String,
},
/// Delete a label of an node given by the name of the node and the qualified label name.
DeleteNodeLabel {
node_name: String,
anno_ns: String,
anno_name: String,
},
/// Add an edge between two nodes given by their name.
AddEdge {
source_node: String,
target_node: String,
layer: String,
component_type: String,
component_name: String,
},
/// Delete an existing edge between two nodes given by their name.
DeleteEdge {
source_node: String,
target_node: String,
layer: String,
component_type: String,
component_name: String,
},
/// Add a label to an edge between two nodes.
AddEdgeLabel {
source_node: String,
target_node: String,
layer: String,
component_type: String,
component_name: String,
anno_ns: String,
anno_name: String,
anno_value: String,
},
/// Delete a label from an edge between two nodes.
DeleteEdgeLabel {
source_node: String,
target_node: String,
layer: String,
component_type: String,
component_name: String,
anno_ns: String,
anno_name: String,
},
}
enum | {
InProgress {
table_builder: Box<TableBuilder<File>>,
outfile: NamedTempFile,
},
Finished {
table: Table,
},
}
/// A list of changes to apply to an graph.
pub struct GraphUpdate {
changesets: Mutex<Vec<ChangeSet>>,
event_counter: u64,
serialization: bincode::config::DefaultOptions,
}
impl Default for GraphUpdate {
fn default() -> Self {
GraphUpdate::new()
}
}
impl GraphUpdate {
/// Create a new empty list of updates.
pub fn new() -> GraphUpdate {
GraphUpdate {
event_counter: 0,
changesets: Mutex::new(Vec::new()),
serialization: bincode::options(),
}
}
/// Add the given event to the update list.
pub fn add_event(&mut self, event: UpdateEvent) -> Result<()> {
let new_event_counter = self.event_counter + 1;
let key = new_event_counter.create_key();
let value = self.serialization.serialize(&event)?;
let mut changeset = self.changesets.lock()?;
if let ChangeSet::InProgress { table_builder, .. } =
current_inprogress_changeset(&mut changeset)?
{
table_builder.add(&key, &value)?;
self.event_counter = new_event_counter;
}
Ok(())
}
/// Get all changes
pub fn iter(&self) -> Result<GraphUpdateIterator> {
let it = GraphUpdateIterator::new(self)?;
Ok(it)
}
/// Returns `true` if the update list is empty.
pub fn is_empty(&self) -> Result<bool> {
Ok(self.event_counter == 0)
}
// Returns the number of updates.
pub fn len(&self) -> Result<usize> {
let result = self.event_counter.try_into()?;
Ok(result)
}
}
fn finish_all_changesets(changesets: &mut Vec<ChangeSet>) -> Result<()> {
// Remove all changesets from the vector and finish them
let finished: Result<Vec<ChangeSet>> = changesets
.drain(..)
.map(|c| match c {
ChangeSet::InProgress {
table_builder,
outfile,
} => {
table_builder.finish()?;
// Re-open as table
let file = outfile.reopen()?;
let size = file.metadata()?.len();
let table = Table::new(sstable::Options::default(), Box::new(file), size as usize)?;
Ok(ChangeSet::Finished { table })
}
ChangeSet::Finished { table } => Ok(ChangeSet::Finished { table }),
})
.collect();
// Re-add the finished changesets
changesets.extend(finished?);
Ok(())
}
fn current_inprogress_changeset(changesets: &mut Vec<ChangeSet>) -> Result<&mut ChangeSet> {
let needs_new_changeset = if let Some(c) = changesets.last_mut() {
match c {
ChangeSet::InProgress { .. } => false,
ChangeSet::Finished { .. } => true,
}
} else {
true
};
if needs_new_changeset {
// Create a new changeset
let outfile = NamedTempFile::new()?;
let table_builder = TableBuilder::new(sstable::Options::default(), outfile.reopen()?);
let c = ChangeSet::InProgress {
table_builder: Box::new(table_builder),
outfile,
};
changesets.push(c);
}
// Get the last changeset, which must be in the InProgress state
changesets
.last_mut()
.ok_or(GraphAnnisCoreError::GraphUpdatePersistanceFileMissing)
}
pub struct GraphUpdateIterator {
iterators: Vec<TableIterator>,
size_hint: u64,
serialization: bincode::config::DefaultOptions,
}
impl GraphUpdateIterator {
fn new(g: &GraphUpdate) -> Result<GraphUpdateIterator> {
let mut changesets = g.changesets.lock()?;
finish_all_changesets(&mut changesets)?;
let iterators: Vec<_> = changesets
.iter()
.filter_map(|c| match c {
ChangeSet::InProgress { .. } => None,
ChangeSet::Finished { table } => {
let mut it = table.iter();
it.seek_to_first();
Some(it)
}
})
.collect();
Ok(GraphUpdateIterator {
size_hint: g.event_counter,
iterators,
serialization: g.serialization,
})
}
}
impl std::iter::Iterator for GraphUpdateIterator {
type Item = Result<(u64, UpdateEvent)>;
fn next(&mut self) -> Option<Self::Item> {
// Remove all empty table iterators.
self.iterators.retain(|it| it.valid());
if let Some(it) = self.iterators.first_mut() {
// Get the current values
if let Some((key, value)) = sstable::current_key_val(it) {
// Create the actual types
let id = match u64::parse_key(&key) {
Ok(id) => id,
Err(e) => return Some(Err(e.into())),
};
let event: UpdateEvent = match self.serialization.deserialize(&value) {
Ok(event) => event,
Err(e) => return Some(Err(e.into())),
};
// Advance for next iteration
it.advance();
return Some(Ok((id, event)));
}
}
None
}
fn size_hint(&self) -> (usize, Option<usize>) {
if let Ok(s) = self.size_hint.try_into() {
(s, Some(s))
} else {
(0, None)
}
}
}
impl Serialize for GraphUpdate {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
let iter = self.iter().map_err(S::Error::custom)?;
let number_of_updates = self.len().map_err(S::Error::custom)?;
let mut map_serializer = serializer.serialize_map(Some(number_of_updates))?;
for entry in iter {
let (key, value) = entry.map_err(S::Error::custom)?;
map_serializer
.serialize_entry(&key, &value)
.map_err(S::Error::custom)?;
}
map_serializer.end()
}
}
struct GraphUpdateVisitor {}
impl<'de> Visitor<'de> for GraphUpdateVisitor {
type Value = GraphUpdate;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a list of graph updates")
}
fn visit_map<M>(self, mut access: M) -> std::result::Result<Self::Value, M::Error>
where
M: MapAccess<'de>,
{
let serialization = bincode::options();
let outfile = NamedTempFile::new().map_err(M::Error::custom)?;
let mut table_builder = TableBuilder::new(
sstable::Options::default(),
outfile.reopen().map_err(M::Error::custom)?,
);
let mut event_counter = 0;
while let Some((id, event)) = access
.next_entry::<u64, GraphUpdate>()
.map_err(M::Error::custom)?
{
event_counter = id;
let key = id.create_key();
let value = serialization.serialize(&event).map_err(M::Error::custom)?;
table_builder.add(&key, &value).map_err(M::Error::custom)?
}
let c = ChangeSet::InProgress {
outfile,
table_builder: Box::new(table_builder),
};
let mut changesets = vec![c];
finish_all_changesets(&mut changesets).map_err(M::Error::custom)?;
let g = GraphUpdate {
changesets: Mutex::new(changesets),
event_counter,
serialization,
};
Ok(g)
}
}
impl<'de> Deserialize<'de> for GraphUpdate {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_map(GraphUpdateVisitor {})
}
}
| ChangeSet | identifier_name |
update.rs | //! Types used to describe updates on graphs.
use std::convert::TryInto;
use std::fs::File;
use std::sync::Mutex;
use crate::errors::{GraphAnnisCoreError, Result};
use crate::serializer::KeySerializer;
use bincode::Options;
use serde::de::Error as DeserializeError;
use serde::de::{MapAccess, Visitor};
use serde::ser::{Error as SerializeError, SerializeMap};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use sstable::{SSIterator, Table, TableBuilder, TableIterator};
use tempfile::NamedTempFile;
/// Describes a single update on the graph.
#[derive(Serialize, Deserialize, Clone, Debug, MallocSizeOf)]
pub enum UpdateEvent {
/// Add a node with a name and type.
AddNode {
node_name: String,
node_type: String,
},
/// Delete a node given by the name.
DeleteNode { node_name: String },
/// Add a label to a the node given by the name.
AddNodeLabel {
node_name: String,
anno_ns: String,
anno_name: String,
anno_value: String,
},
/// Delete a label of an node given by the name of the node and the qualified label name.
DeleteNodeLabel {
node_name: String,
anno_ns: String,
anno_name: String,
},
/// Add an edge between two nodes given by their name.
AddEdge {
source_node: String,
target_node: String,
layer: String,
component_type: String,
component_name: String,
},
/// Delete an existing edge between two nodes given by their name.
DeleteEdge {
source_node: String,
target_node: String,
layer: String,
component_type: String,
component_name: String,
},
/// Add a label to an edge between two nodes.
AddEdgeLabel {
source_node: String,
target_node: String,
layer: String,
component_type: String,
component_name: String,
anno_ns: String,
anno_name: String,
anno_value: String,
},
/// Delete a label from an edge between two nodes.
DeleteEdgeLabel {
source_node: String,
target_node: String,
layer: String,
component_type: String,
component_name: String,
anno_ns: String,
anno_name: String,
},
}
enum ChangeSet {
InProgress {
table_builder: Box<TableBuilder<File>>,
outfile: NamedTempFile,
},
Finished {
table: Table,
},
}
/// A list of changes to apply to an graph.
pub struct GraphUpdate {
changesets: Mutex<Vec<ChangeSet>>,
event_counter: u64,
serialization: bincode::config::DefaultOptions,
}
impl Default for GraphUpdate {
fn default() -> Self {
GraphUpdate::new()
}
}
impl GraphUpdate {
/// Create a new empty list of updates.
pub fn new() -> GraphUpdate {
GraphUpdate {
event_counter: 0,
changesets: Mutex::new(Vec::new()),
serialization: bincode::options(),
}
}
/// Add the given event to the update list.
pub fn add_event(&mut self, event: UpdateEvent) -> Result<()> {
let new_event_counter = self.event_counter + 1;
let key = new_event_counter.create_key();
let value = self.serialization.serialize(&event)?;
let mut changeset = self.changesets.lock()?;
if let ChangeSet::InProgress { table_builder, .. } =
current_inprogress_changeset(&mut changeset)?
{
table_builder.add(&key, &value)?;
self.event_counter = new_event_counter;
}
Ok(())
}
/// Get all changes
pub fn iter(&self) -> Result<GraphUpdateIterator> {
let it = GraphUpdateIterator::new(self)?;
Ok(it)
}
/// Returns `true` if the update list is empty.
pub fn is_empty(&self) -> Result<bool> {
Ok(self.event_counter == 0)
}
// Returns the number of updates.
pub fn len(&self) -> Result<usize> {
let result = self.event_counter.try_into()?;
Ok(result)
}
}
fn finish_all_changesets(changesets: &mut Vec<ChangeSet>) -> Result<()> {
// Remove all changesets from the vector and finish them
let finished: Result<Vec<ChangeSet>> = changesets
.drain(..)
.map(|c| match c {
ChangeSet::InProgress {
table_builder,
outfile,
} => {
table_builder.finish()?;
// Re-open as table
let file = outfile.reopen()?;
let size = file.metadata()?.len();
let table = Table::new(sstable::Options::default(), Box::new(file), size as usize)?;
Ok(ChangeSet::Finished { table })
}
ChangeSet::Finished { table } => Ok(ChangeSet::Finished { table }),
})
.collect();
// Re-add the finished changesets
changesets.extend(finished?);
Ok(())
}
fn current_inprogress_changeset(changesets: &mut Vec<ChangeSet>) -> Result<&mut ChangeSet> {
let needs_new_changeset = if let Some(c) = changesets.last_mut() {
match c {
ChangeSet::InProgress { .. } => false,
ChangeSet::Finished { .. } => true,
}
} else {
true
};
if needs_new_changeset {
// Create a new changeset
let outfile = NamedTempFile::new()?;
let table_builder = TableBuilder::new(sstable::Options::default(), outfile.reopen()?);
let c = ChangeSet::InProgress {
table_builder: Box::new(table_builder),
outfile,
};
changesets.push(c);
}
// Get the last changeset, which must be in the InProgress state
changesets
.last_mut()
.ok_or(GraphAnnisCoreError::GraphUpdatePersistanceFileMissing)
}
pub struct GraphUpdateIterator {
iterators: Vec<TableIterator>,
size_hint: u64,
serialization: bincode::config::DefaultOptions,
}
impl GraphUpdateIterator {
fn new(g: &GraphUpdate) -> Result<GraphUpdateIterator> {
let mut changesets = g.changesets.lock()?;
finish_all_changesets(&mut changesets)?;
let iterators: Vec<_> = changesets
.iter()
.filter_map(|c| match c {
ChangeSet::InProgress { .. } => None,
ChangeSet::Finished { table } => {
let mut it = table.iter();
it.seek_to_first();
Some(it)
}
})
.collect();
Ok(GraphUpdateIterator {
size_hint: g.event_counter,
iterators,
serialization: g.serialization,
})
}
}
impl std::iter::Iterator for GraphUpdateIterator {
type Item = Result<(u64, UpdateEvent)>;
fn next(&mut self) -> Option<Self::Item> |
fn size_hint(&self) -> (usize, Option<usize>) {
if let Ok(s) = self.size_hint.try_into() {
(s, Some(s))
} else {
(0, None)
}
}
}
impl Serialize for GraphUpdate {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
let iter = self.iter().map_err(S::Error::custom)?;
let number_of_updates = self.len().map_err(S::Error::custom)?;
let mut map_serializer = serializer.serialize_map(Some(number_of_updates))?;
for entry in iter {
let (key, value) = entry.map_err(S::Error::custom)?;
map_serializer
.serialize_entry(&key, &value)
.map_err(S::Error::custom)?;
}
map_serializer.end()
}
}
struct GraphUpdateVisitor {}
impl<'de> Visitor<'de> for GraphUpdateVisitor {
type Value = GraphUpdate;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a list of graph updates")
}
fn visit_map<M>(self, mut access: M) -> std::result::Result<Self::Value, M::Error>
where
M: MapAccess<'de>,
{
let serialization = bincode::options();
let outfile = NamedTempFile::new().map_err(M::Error::custom)?;
let mut table_builder = TableBuilder::new(
sstable::Options::default(),
outfile.reopen().map_err(M::Error::custom)?,
);
let mut event_counter = 0;
while let Some((id, event)) = access
.next_entry::<u64, GraphUpdate>()
.map_err(M::Error::custom)?
{
event_counter = id;
let key = id.create_key();
let value = serialization.serialize(&event).map_err(M::Error::custom)?;
table_builder.add(&key, &value).map_err(M::Error::custom)?
}
let c = ChangeSet::InProgress {
outfile,
table_builder: Box::new(table_builder),
};
let mut changesets = vec![c];
finish_all_changesets(&mut changesets).map_err(M::Error::custom)?;
let g = GraphUpdate {
changesets: Mutex::new(changesets),
event_counter,
serialization,
};
Ok(g)
}
}
impl<'de> Deserialize<'de> for GraphUpdate {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_map(GraphUpdateVisitor {})
}
}
| {
// Remove all empty table iterators.
self.iterators.retain(|it| it.valid());
if let Some(it) = self.iterators.first_mut() {
// Get the current values
if let Some((key, value)) = sstable::current_key_val(it) {
// Create the actual types
let id = match u64::parse_key(&key) {
Ok(id) => id,
Err(e) => return Some(Err(e.into())),
};
let event: UpdateEvent = match self.serialization.deserialize(&value) {
Ok(event) => event,
Err(e) => return Some(Err(e.into())),
};
// Advance for next iteration
it.advance();
return Some(Ok((id, event)));
}
}
None
} | identifier_body |
main.rs | #![warn(clippy::all)]
#![forbid(unsafe_code)]
// Import from other crates.
use csv::ByteRecord;
use humansize::{file_size_opts, FileSize};
use lazy_static::lazy_static;
use log::debug;
use regex::bytes::Regex;
use std::{
borrow::Cow,
fs,
io::{self, prelude::*},
path::PathBuf,
process,
};
use structopt::StructOpt;
// Modules defined in separate files.
#[macro_use]
mod errors;
mod uniquifier;
mod util;
// Import from our own crates.
use crate::errors::*;
use crate::uniquifier::Uniquifier;
use crate::util::{now, CharSpecifier};
/// Use reasonably large input and output buffers. This seems to give us a
/// performance boost of around 5-10% compared to the standard 8 KiB buffer used
/// by `csv`.
const BUFFER_SIZE: usize = 256 * 1024;
/// Our command-line arguments.
#[derive(Debug, StructOpt)]
#[structopt(
name = "scrubcsv",
about = "Clean and normalize a CSV file.",
after_help = "Read a CSV file, normalize the \"good\" lines, and print them to standard
output. Discard any lines with the wrong number of columns.
Regular expressions use Rust syntax, as described here:
https://doc.rust-lang.org/regex/regex/index.html#syntax
scrubcsv should work with any ASCII-compatible encoding, but it will not
attempt to transcode.
Exit code:
0 on success
1 on error
2 if more than 10% of rows were bad"
)]
struct | {
/// Input file (uses stdin if omitted).
input: Option<PathBuf>,
/// Character used to separate fields in a row (must be a single ASCII
/// byte, or "tab").
#[structopt(
value_name = "CHAR",
short = "d",
long = "delimiter",
default_value = ","
)]
delimiter: CharSpecifier,
/// Convert values matching NULL_REGEX to an empty string. For a case-insensitive
/// match, use `(?i)`: `--null '(?i)NULL'`.
#[structopt(value_name = "NULL_REGEX", short = "n", long = "null")]
null: Option<String>,
/// Replace LF and CRLF sequences in values with spaces. This should improve
/// compatibility with systems like BigQuery that don't expect newlines
/// inside escaped strings.
#[structopt(long = "replace-newlines")]
replace_newlines: bool,
/// Remove whitespace at beginning and end of each cell.
#[structopt(long = "trim-whitespace")]
trim_whitespace: bool,
/// Make sure column names are unique, and use only lowercase letters, numbers
/// and underscores.
#[structopt(long = "clean-column-names")]
clean_column_names: bool,
/// Drop any rows where the specified column is empty or NULL. Can be passed
/// more than once. Useful for cleaning primary key columns before
/// upserting. Uses the cleaned form of column names.
#[structopt(value_name = "COL", long = "drop-row-if-null")]
drop_row_if_null: Vec<String>,
/// Do not print performance information.
#[structopt(short = "q", long = "quiet")]
quiet: bool,
/// Character used to quote entries. May be set to "none" to ignore all
/// quoting.
#[structopt(value_name = "CHAR", long = "quote", default_value = "\"")]
quote: CharSpecifier,
}
lazy_static! {
/// Either a CRLF newline, a LF newline, or a CR newline. Any of these
/// will break certain CSV parsers, including BigQuery's CSV importer.
static ref NEWLINE_RE: Regex = Regex::new(r#"\n|\r\n?"#)
.expect("regex in source code is unparseable");
}
/// This is a helper function called by our `main` function. Unlike
/// `main`, we return a `Result`, which means that we can use `?` and other
/// standard error-handling machinery.
fn run() -> Result<()> {
// Set up logging.
env_logger::init();
// Parse our command-line arguments using `docopt`.
let opt: Opt = Opt::from_args();
debug!("Options: {:#?}", opt);
// Remember the time we started.
let start_time = now();
// Build a regex containing our `--null` value.
let null_re = if let Some(null_re_str) = opt.null.as_ref() {
// Always match the full CSV value.
let s = format!("^{}$", null_re_str);
let re = Regex::new(&s).context("can't compile regular expression")?;
Some(re)
} else {
None
};
// Fetch our input from either standard input or a file. The only tricky
// detail here is that we use a `Box<dyn Read>` to represent "some object
// implementing `Read`, stored on the heap." This allows us to do runtime
// dispatch (as if Rust were object oriented). But because `csv` wraps a
// `BufReader` around the box, we only do that dispatch once per buffer
// flush, not on every tiny write.
let stdin = io::stdin();
let input: Box<dyn Read> = if let Some(ref path) = opt.input {
Box::new(
fs::File::open(path)
.with_context(|_| format!("cannot open {}", path.display()))?,
)
} else {
Box::new(stdin.lock())
};
// Create our CSV reader.
let mut rdr_builder = csv::ReaderBuilder::new();
// Set a reasonable buffer size.
rdr_builder.buffer_capacity(BUFFER_SIZE);
// We need headers so that we can honor --drop-row-if-null.
rdr_builder.has_headers(true);
// Allow records with the wrong number of columns.
rdr_builder.flexible(true);
// Configure our delimiter.
if let Some(delimiter) = opt.delimiter.char() {
rdr_builder.delimiter(delimiter);
} else {
return Err(format_err!("field delimiter is required"));
}
// Configure our quote character.
if let Some(quote) = opt.quote.char() {
rdr_builder.quote(quote);
} else {
rdr_builder.quoting(false);
}
let mut rdr = rdr_builder.from_reader(input);
// We lock `stdout`, giving us exclusive access. In the past, this has made
// an enormous difference in performance.
let stdout = io::stdout();
let output = stdout.lock();
// Create our CSV writer. Note that we _don't_ allow variable numbers
// of columns, non-standard delimiters, or other nonsense: We want our
// output to be highly normalized.
let mut wtr = csv::WriterBuilder::new()
.buffer_capacity(BUFFER_SIZE)
.from_writer(output);
// Get our header and, if we were asked, make sure all the column names are unique.
let mut hdr = rdr
.byte_headers()
.context("cannot read headers")?
.to_owned();
if opt.clean_column_names {
let mut uniquifier = Uniquifier::default();
let mut new_hdr = ByteRecord::default();
for col in hdr.into_iter() {
// Convert from bytes to UTF-8, make unique (and clean), and convert back to bytes.
let col = String::from_utf8_lossy(col);
let col = uniquifier.unique_id_for(&col)?.to_owned();
new_hdr.push_field(col.as_bytes());
}
hdr = new_hdr;
}
// Write our header to our output.
wtr.write_byte_record(&hdr)
.context("cannot write headers")?;
// Calculate the number of expected columns.
let expected_cols = hdr.len();
// Just in case --drop-row-if-null was passed, precompute which columns are
// required to contain a value.
let required_cols = hdr
.iter()
.map(|name| -> bool {
opt.drop_row_if_null
.iter()
.any(|requried_name| requried_name.as_bytes() == name)
})
.collect::<Vec<bool>>();
// Keep track of total rows and malformed rows seen. We count the header as
// a row for backwards compatibility.
let mut rows: u64 = 1;
let mut bad_rows: u64 = 0;
// Can we use the fast path and copy the data through unchanged? Or do we
// need to clean up emebedded newlines in our data? (These break BigQuery,
// for example.)
let use_fast_path = null_re.is_none()
&& !opt.replace_newlines
&& !opt.trim_whitespace
&& opt.drop_row_if_null.is_empty();
// Iterate over all the rows, checking to make sure they look reasonable.
//
// If we use the lowest-level, zero-copy API for `csv`, we can process about
// 225 MB/s. But it turns out we can't do that, because we need to count
// all the row's fields before deciding whether or not to write it out.
'next_row: for record in rdr.byte_records() {
let record = record.context("cannot read record")?;
// Keep track of how many rows we've seen.
rows += 1;
// Check if we have the right number of columns in this row.
if record.len() != expected_cols {
bad_rows += 1;
debug!(
"row {}: expected {} columns, found {}",
rows,
expected_cols,
record.len(),
);
continue 'next_row;
}
// Decide how to handle this row.
if use_fast_path {
// We don't need to do anything fancy, so just pass it through.
// I'm not sure how much this actually buys us in current Rust
// versions, but it seemed like a good idea at the time.
wtr.write_record(record.into_iter())
.context("cannot write record")?;
} else {
// We need to apply one or more cleanups, so run the slow path.
let cleaned = record.into_iter().map(|mut val: &[u8]| -> Cow<[u8]> {
// Convert values matching `--null` regex to empty strings.
if let Some(ref null_re) = null_re {
if null_re.is_match(val) {
val = &[]
}
}
// Remove whitespace from our cells.
if opt.trim_whitespace {
// We do this manually, because the built-in `trim` only
// works on UTF-8 strings, and we work on any
// "ASCII-compatible" encoding.
let first = val.iter().position(|c| !c.is_ascii_whitespace());
let last = val.iter().rposition(|c| !c.is_ascii_whitespace());
val = match (first, last) {
(Some(first), Some(last)) if first <= last => {
&val[first..=last]
}
(None, None) => &[],
_ => panic!(
"tried to trim {:?}, got impossible indices {:?} {:?}",
val, first, last,
),
};
}
// Fix newlines.
if opt.replace_newlines
&& (val.contains(&b'\n') || val.contains(&b'\r'))
{
NEWLINE_RE.replace_all(val, &b" "[..])
} else {
Cow::Borrowed(val)
}
});
if opt.drop_row_if_null.is_empty() {
// Still somewhat fast!
wtr.write_record(cleaned).context("cannot write record")?;
} else {
// We need to rebuild the record, check for null columns,
// and only output the record if everything's OK.
let row = cleaned.collect::<Vec<Cow<[u8]>>>();
for (value, &is_required_col) in row.iter().zip(required_cols.iter()) {
// If the column is NULL but shouldn't be, bail on this row.
if is_required_col && value.is_empty() {
bad_rows += 1;
debug!("row {}: required column is empty", rows);
continue 'next_row;
}
}
wtr.write_record(row).context("cannot write record")?;
}
}
}
// Flush all our buffers.
wtr.flush().context("error writing records")?;
// Print out some information about our run.
if !opt.quiet {
let ellapsed = (now() - start_time).as_seconds_f64();
let bytes_per_second = (rdr.position().byte() as f64 / ellapsed) as i64;
eprintln!(
"{} rows ({} bad) in {:.2} seconds, {}/sec",
rows,
bad_rows,
ellapsed,
bytes_per_second.file_size(file_size_opts::BINARY)?,
);
}
// If more than 10% of rows are bad, assume something has gone horribly
// wrong.
if bad_rows.checked_mul(10).expect("multiplication overflow") > rows {
eprintln!("Too many rows ({} of {}) were bad", bad_rows, rows);
process::exit(2);
}
Ok(())
}
fn main() {
if let Err(err) = run() {
eprintln!("ERROR: {}", err);
let mut source = err.source();
while let Some(cause) = source {
eprintln!(" caused by: {}", cause);
source = cause.source();
}
process::exit(1);
}
}
| Opt | identifier_name |
main.rs | #![warn(clippy::all)]
#![forbid(unsafe_code)]
// Import from other crates.
use csv::ByteRecord;
use humansize::{file_size_opts, FileSize};
use lazy_static::lazy_static;
use log::debug;
use regex::bytes::Regex;
use std::{
borrow::Cow,
fs,
io::{self, prelude::*},
path::PathBuf,
process,
};
use structopt::StructOpt;
// Modules defined in separate files.
#[macro_use]
mod errors;
mod uniquifier;
mod util;
// Import from our own crates.
use crate::errors::*;
use crate::uniquifier::Uniquifier;
use crate::util::{now, CharSpecifier};
/// Use reasonably large input and output buffers. This seems to give us a
/// performance boost of around 5-10% compared to the standard 8 KiB buffer used
/// by `csv`.
const BUFFER_SIZE: usize = 256 * 1024;
/// Our command-line arguments.
#[derive(Debug, StructOpt)]
#[structopt(
name = "scrubcsv",
about = "Clean and normalize a CSV file.",
after_help = "Read a CSV file, normalize the \"good\" lines, and print them to standard
output. Discard any lines with the wrong number of columns.
Regular expressions use Rust syntax, as described here:
https://doc.rust-lang.org/regex/regex/index.html#syntax
scrubcsv should work with any ASCII-compatible encoding, but it will not
attempt to transcode.
Exit code:
0 on success
1 on error
2 if more than 10% of rows were bad"
)]
struct Opt {
/// Input file (uses stdin if omitted).
input: Option<PathBuf>,
/// Character used to separate fields in a row (must be a single ASCII
/// byte, or "tab").
#[structopt(
value_name = "CHAR",
short = "d",
long = "delimiter",
default_value = ","
)]
delimiter: CharSpecifier,
/// Convert values matching NULL_REGEX to an empty string. For a case-insensitive
/// match, use `(?i)`: `--null '(?i)NULL'`.
#[structopt(value_name = "NULL_REGEX", short = "n", long = "null")]
null: Option<String>,
/// Replace LF and CRLF sequences in values with spaces. This should improve
/// compatibility with systems like BigQuery that don't expect newlines
/// inside escaped strings.
#[structopt(long = "replace-newlines")]
replace_newlines: bool,
/// Remove whitespace at beginning and end of each cell.
#[structopt(long = "trim-whitespace")]
trim_whitespace: bool,
/// Make sure column names are unique, and use only lowercase letters, numbers
/// and underscores.
#[structopt(long = "clean-column-names")]
clean_column_names: bool,
/// Drop any rows where the specified column is empty or NULL. Can be passed
/// more than once. Useful for cleaning primary key columns before
/// upserting. Uses the cleaned form of column names.
#[structopt(value_name = "COL", long = "drop-row-if-null")]
drop_row_if_null: Vec<String>,
/// Do not print performance information.
#[structopt(short = "q", long = "quiet")]
quiet: bool,
/// Character used to quote entries. May be set to "none" to ignore all
/// quoting.
#[structopt(value_name = "CHAR", long = "quote", default_value = "\"")]
quote: CharSpecifier,
}
lazy_static! {
/// Either a CRLF newline, a LF newline, or a CR newline. Any of these
/// will break certain CSV parsers, including BigQuery's CSV importer.
static ref NEWLINE_RE: Regex = Regex::new(r#"\n|\r\n?"#)
.expect("regex in source code is unparseable");
}
/// This is a helper function called by our `main` function. Unlike
/// `main`, we return a `Result`, which means that we can use `?` and other
/// standard error-handling machinery.
fn run() -> Result<()> {
// Set up logging.
env_logger::init();
// Parse our command-line arguments using `docopt`.
let opt: Opt = Opt::from_args();
debug!("Options: {:#?}", opt);
// Remember the time we started.
let start_time = now();
// Build a regex containing our `--null` value.
let null_re = if let Some(null_re_str) = opt.null.as_ref() {
// Always match the full CSV value.
let s = format!("^{}$", null_re_str);
let re = Regex::new(&s).context("can't compile regular expression")?;
Some(re)
} else {
None
};
// Fetch our input from either standard input or a file. The only tricky
// detail here is that we use a `Box<dyn Read>` to represent "some object | // flush, not on every tiny write.
let stdin = io::stdin();
let input: Box<dyn Read> = if let Some(ref path) = opt.input {
Box::new(
fs::File::open(path)
.with_context(|_| format!("cannot open {}", path.display()))?,
)
} else {
Box::new(stdin.lock())
};
// Create our CSV reader.
let mut rdr_builder = csv::ReaderBuilder::new();
// Set a reasonable buffer size.
rdr_builder.buffer_capacity(BUFFER_SIZE);
// We need headers so that we can honor --drop-row-if-null.
rdr_builder.has_headers(true);
// Allow records with the wrong number of columns.
rdr_builder.flexible(true);
// Configure our delimiter.
if let Some(delimiter) = opt.delimiter.char() {
rdr_builder.delimiter(delimiter);
} else {
return Err(format_err!("field delimiter is required"));
}
// Configure our quote character.
if let Some(quote) = opt.quote.char() {
rdr_builder.quote(quote);
} else {
rdr_builder.quoting(false);
}
let mut rdr = rdr_builder.from_reader(input);
// We lock `stdout`, giving us exclusive access. In the past, this has made
// an enormous difference in performance.
let stdout = io::stdout();
let output = stdout.lock();
// Create our CSV writer. Note that we _don't_ allow variable numbers
// of columns, non-standard delimiters, or other nonsense: We want our
// output to be highly normalized.
let mut wtr = csv::WriterBuilder::new()
.buffer_capacity(BUFFER_SIZE)
.from_writer(output);
// Get our header and, if we were asked, make sure all the column names are unique.
let mut hdr = rdr
.byte_headers()
.context("cannot read headers")?
.to_owned();
if opt.clean_column_names {
let mut uniquifier = Uniquifier::default();
let mut new_hdr = ByteRecord::default();
for col in hdr.into_iter() {
// Convert from bytes to UTF-8, make unique (and clean), and convert back to bytes.
let col = String::from_utf8_lossy(col);
let col = uniquifier.unique_id_for(&col)?.to_owned();
new_hdr.push_field(col.as_bytes());
}
hdr = new_hdr;
}
// Write our header to our output.
wtr.write_byte_record(&hdr)
.context("cannot write headers")?;
// Calculate the number of expected columns.
let expected_cols = hdr.len();
// Just in case --drop-row-if-null was passed, precompute which columns are
// required to contain a value.
let required_cols = hdr
.iter()
.map(|name| -> bool {
opt.drop_row_if_null
.iter()
.any(|requried_name| requried_name.as_bytes() == name)
})
.collect::<Vec<bool>>();
// Keep track of total rows and malformed rows seen. We count the header as
// a row for backwards compatibility.
let mut rows: u64 = 1;
let mut bad_rows: u64 = 0;
// Can we use the fast path and copy the data through unchanged? Or do we
// need to clean up emebedded newlines in our data? (These break BigQuery,
// for example.)
let use_fast_path = null_re.is_none()
&& !opt.replace_newlines
&& !opt.trim_whitespace
&& opt.drop_row_if_null.is_empty();
// Iterate over all the rows, checking to make sure they look reasonable.
//
// If we use the lowest-level, zero-copy API for `csv`, we can process about
// 225 MB/s. But it turns out we can't do that, because we need to count
// all the row's fields before deciding whether or not to write it out.
'next_row: for record in rdr.byte_records() {
let record = record.context("cannot read record")?;
// Keep track of how many rows we've seen.
rows += 1;
// Check if we have the right number of columns in this row.
if record.len() != expected_cols {
bad_rows += 1;
debug!(
"row {}: expected {} columns, found {}",
rows,
expected_cols,
record.len(),
);
continue 'next_row;
}
// Decide how to handle this row.
if use_fast_path {
// We don't need to do anything fancy, so just pass it through.
// I'm not sure how much this actually buys us in current Rust
// versions, but it seemed like a good idea at the time.
wtr.write_record(record.into_iter())
.context("cannot write record")?;
} else {
// We need to apply one or more cleanups, so run the slow path.
let cleaned = record.into_iter().map(|mut val: &[u8]| -> Cow<[u8]> {
// Convert values matching `--null` regex to empty strings.
if let Some(ref null_re) = null_re {
if null_re.is_match(val) {
val = &[]
}
}
// Remove whitespace from our cells.
if opt.trim_whitespace {
// We do this manually, because the built-in `trim` only
// works on UTF-8 strings, and we work on any
// "ASCII-compatible" encoding.
let first = val.iter().position(|c| !c.is_ascii_whitespace());
let last = val.iter().rposition(|c| !c.is_ascii_whitespace());
val = match (first, last) {
(Some(first), Some(last)) if first <= last => {
&val[first..=last]
}
(None, None) => &[],
_ => panic!(
"tried to trim {:?}, got impossible indices {:?} {:?}",
val, first, last,
),
};
}
// Fix newlines.
if opt.replace_newlines
&& (val.contains(&b'\n') || val.contains(&b'\r'))
{
NEWLINE_RE.replace_all(val, &b" "[..])
} else {
Cow::Borrowed(val)
}
});
if opt.drop_row_if_null.is_empty() {
// Still somewhat fast!
wtr.write_record(cleaned).context("cannot write record")?;
} else {
// We need to rebuild the record, check for null columns,
// and only output the record if everything's OK.
let row = cleaned.collect::<Vec<Cow<[u8]>>>();
for (value, &is_required_col) in row.iter().zip(required_cols.iter()) {
// If the column is NULL but shouldn't be, bail on this row.
if is_required_col && value.is_empty() {
bad_rows += 1;
debug!("row {}: required column is empty", rows);
continue 'next_row;
}
}
wtr.write_record(row).context("cannot write record")?;
}
}
}
// Flush all our buffers.
wtr.flush().context("error writing records")?;
// Print out some information about our run.
if !opt.quiet {
let ellapsed = (now() - start_time).as_seconds_f64();
let bytes_per_second = (rdr.position().byte() as f64 / ellapsed) as i64;
eprintln!(
"{} rows ({} bad) in {:.2} seconds, {}/sec",
rows,
bad_rows,
ellapsed,
bytes_per_second.file_size(file_size_opts::BINARY)?,
);
}
// If more than 10% of rows are bad, assume something has gone horribly
// wrong.
if bad_rows.checked_mul(10).expect("multiplication overflow") > rows {
eprintln!("Too many rows ({} of {}) were bad", bad_rows, rows);
process::exit(2);
}
Ok(())
}
fn main() {
if let Err(err) = run() {
eprintln!("ERROR: {}", err);
let mut source = err.source();
while let Some(cause) = source {
eprintln!(" caused by: {}", cause);
source = cause.source();
}
process::exit(1);
}
} | // implementing `Read`, stored on the heap." This allows us to do runtime
// dispatch (as if Rust were object oriented). But because `csv` wraps a
// `BufReader` around the box, we only do that dispatch once per buffer | random_line_split |
main.rs | #![warn(clippy::all)]
#![forbid(unsafe_code)]
// Import from other crates.
use csv::ByteRecord;
use humansize::{file_size_opts, FileSize};
use lazy_static::lazy_static;
use log::debug;
use regex::bytes::Regex;
use std::{
borrow::Cow,
fs,
io::{self, prelude::*},
path::PathBuf,
process,
};
use structopt::StructOpt;
// Modules defined in separate files.
#[macro_use]
mod errors;
mod uniquifier;
mod util;
// Import from our own crates.
use crate::errors::*;
use crate::uniquifier::Uniquifier;
use crate::util::{now, CharSpecifier};
/// Use reasonably large input and output buffers. This seems to give us a
/// performance boost of around 5-10% compared to the standard 8 KiB buffer used
/// by `csv`.
const BUFFER_SIZE: usize = 256 * 1024;
/// Our command-line arguments.
#[derive(Debug, StructOpt)]
#[structopt(
name = "scrubcsv",
about = "Clean and normalize a CSV file.",
after_help = "Read a CSV file, normalize the \"good\" lines, and print them to standard
output. Discard any lines with the wrong number of columns.
Regular expressions use Rust syntax, as described here:
https://doc.rust-lang.org/regex/regex/index.html#syntax
scrubcsv should work with any ASCII-compatible encoding, but it will not
attempt to transcode.
Exit code:
0 on success
1 on error
2 if more than 10% of rows were bad"
)]
struct Opt {
/// Input file (uses stdin if omitted).
input: Option<PathBuf>,
/// Character used to separate fields in a row (must be a single ASCII
/// byte, or "tab").
#[structopt(
value_name = "CHAR",
short = "d",
long = "delimiter",
default_value = ","
)]
delimiter: CharSpecifier,
/// Convert values matching NULL_REGEX to an empty string. For a case-insensitive
/// match, use `(?i)`: `--null '(?i)NULL'`.
#[structopt(value_name = "NULL_REGEX", short = "n", long = "null")]
null: Option<String>,
/// Replace LF and CRLF sequences in values with spaces. This should improve
/// compatibility with systems like BigQuery that don't expect newlines
/// inside escaped strings.
#[structopt(long = "replace-newlines")]
replace_newlines: bool,
/// Remove whitespace at beginning and end of each cell.
#[structopt(long = "trim-whitespace")]
trim_whitespace: bool,
/// Make sure column names are unique, and use only lowercase letters, numbers
/// and underscores.
#[structopt(long = "clean-column-names")]
clean_column_names: bool,
/// Drop any rows where the specified column is empty or NULL. Can be passed
/// more than once. Useful for cleaning primary key columns before
/// upserting. Uses the cleaned form of column names.
#[structopt(value_name = "COL", long = "drop-row-if-null")]
drop_row_if_null: Vec<String>,
/// Do not print performance information.
#[structopt(short = "q", long = "quiet")]
quiet: bool,
/// Character used to quote entries. May be set to "none" to ignore all
/// quoting.
#[structopt(value_name = "CHAR", long = "quote", default_value = "\"")]
quote: CharSpecifier,
}
lazy_static! {
/// Either a CRLF newline, a LF newline, or a CR newline. Any of these
/// will break certain CSV parsers, including BigQuery's CSV importer.
static ref NEWLINE_RE: Regex = Regex::new(r#"\n|\r\n?"#)
.expect("regex in source code is unparseable");
}
/// This is a helper function called by our `main` function. Unlike
/// `main`, we return a `Result`, which means that we can use `?` and other
/// standard error-handling machinery.
fn run() -> Result<()> {
// Set up logging.
env_logger::init();
// Parse our command-line arguments using `docopt`.
let opt: Opt = Opt::from_args();
debug!("Options: {:#?}", opt);
// Remember the time we started.
let start_time = now();
// Build a regex containing our `--null` value.
let null_re = if let Some(null_re_str) = opt.null.as_ref() {
// Always match the full CSV value.
let s = format!("^{}$", null_re_str);
let re = Regex::new(&s).context("can't compile regular expression")?;
Some(re)
} else {
None
};
// Fetch our input from either standard input or a file. The only tricky
// detail here is that we use a `Box<dyn Read>` to represent "some object
// implementing `Read`, stored on the heap." This allows us to do runtime
// dispatch (as if Rust were object oriented). But because `csv` wraps a
// `BufReader` around the box, we only do that dispatch once per buffer
// flush, not on every tiny write.
let stdin = io::stdin();
let input: Box<dyn Read> = if let Some(ref path) = opt.input {
Box::new(
fs::File::open(path)
.with_context(|_| format!("cannot open {}", path.display()))?,
)
} else {
Box::new(stdin.lock())
};
// Create our CSV reader.
let mut rdr_builder = csv::ReaderBuilder::new();
// Set a reasonable buffer size.
rdr_builder.buffer_capacity(BUFFER_SIZE);
// We need headers so that we can honor --drop-row-if-null.
rdr_builder.has_headers(true);
// Allow records with the wrong number of columns.
rdr_builder.flexible(true);
// Configure our delimiter.
if let Some(delimiter) = opt.delimiter.char() {
rdr_builder.delimiter(delimiter);
} else {
return Err(format_err!("field delimiter is required"));
}
// Configure our quote character.
if let Some(quote) = opt.quote.char() {
rdr_builder.quote(quote);
} else {
rdr_builder.quoting(false);
}
let mut rdr = rdr_builder.from_reader(input);
// We lock `stdout`, giving us exclusive access. In the past, this has made
// an enormous difference in performance.
let stdout = io::stdout();
let output = stdout.lock();
// Create our CSV writer. Note that we _don't_ allow variable numbers
// of columns, non-standard delimiters, or other nonsense: We want our
// output to be highly normalized.
let mut wtr = csv::WriterBuilder::new()
.buffer_capacity(BUFFER_SIZE)
.from_writer(output);
// Get our header and, if we were asked, make sure all the column names are unique.
let mut hdr = rdr
.byte_headers()
.context("cannot read headers")?
.to_owned();
if opt.clean_column_names {
let mut uniquifier = Uniquifier::default();
let mut new_hdr = ByteRecord::default();
for col in hdr.into_iter() {
// Convert from bytes to UTF-8, make unique (and clean), and convert back to bytes.
let col = String::from_utf8_lossy(col);
let col = uniquifier.unique_id_for(&col)?.to_owned();
new_hdr.push_field(col.as_bytes());
}
hdr = new_hdr;
}
// Write our header to our output.
wtr.write_byte_record(&hdr)
.context("cannot write headers")?;
// Calculate the number of expected columns.
let expected_cols = hdr.len();
// Just in case --drop-row-if-null was passed, precompute which columns are
// required to contain a value.
let required_cols = hdr
.iter()
.map(|name| -> bool {
opt.drop_row_if_null
.iter()
.any(|requried_name| requried_name.as_bytes() == name)
})
.collect::<Vec<bool>>();
// Keep track of total rows and malformed rows seen. We count the header as
// a row for backwards compatibility.
let mut rows: u64 = 1;
let mut bad_rows: u64 = 0;
// Can we use the fast path and copy the data through unchanged? Or do we
// need to clean up emebedded newlines in our data? (These break BigQuery,
// for example.)
let use_fast_path = null_re.is_none()
&& !opt.replace_newlines
&& !opt.trim_whitespace
&& opt.drop_row_if_null.is_empty();
// Iterate over all the rows, checking to make sure they look reasonable.
//
// If we use the lowest-level, zero-copy API for `csv`, we can process about
// 225 MB/s. But it turns out we can't do that, because we need to count
// all the row's fields before deciding whether or not to write it out.
'next_row: for record in rdr.byte_records() {
let record = record.context("cannot read record")?;
// Keep track of how many rows we've seen.
rows += 1;
// Check if we have the right number of columns in this row.
if record.len() != expected_cols {
bad_rows += 1;
debug!(
"row {}: expected {} columns, found {}",
rows,
expected_cols,
record.len(),
);
continue 'next_row;
}
// Decide how to handle this row.
if use_fast_path {
// We don't need to do anything fancy, so just pass it through.
// I'm not sure how much this actually buys us in current Rust
// versions, but it seemed like a good idea at the time.
wtr.write_record(record.into_iter())
.context("cannot write record")?;
} else {
// We need to apply one or more cleanups, so run the slow path.
let cleaned = record.into_iter().map(|mut val: &[u8]| -> Cow<[u8]> {
// Convert values matching `--null` regex to empty strings.
if let Some(ref null_re) = null_re |
// Remove whitespace from our cells.
if opt.trim_whitespace {
// We do this manually, because the built-in `trim` only
// works on UTF-8 strings, and we work on any
// "ASCII-compatible" encoding.
let first = val.iter().position(|c| !c.is_ascii_whitespace());
let last = val.iter().rposition(|c| !c.is_ascii_whitespace());
val = match (first, last) {
(Some(first), Some(last)) if first <= last => {
&val[first..=last]
}
(None, None) => &[],
_ => panic!(
"tried to trim {:?}, got impossible indices {:?} {:?}",
val, first, last,
),
};
}
// Fix newlines.
if opt.replace_newlines
&& (val.contains(&b'\n') || val.contains(&b'\r'))
{
NEWLINE_RE.replace_all(val, &b" "[..])
} else {
Cow::Borrowed(val)
}
});
if opt.drop_row_if_null.is_empty() {
// Still somewhat fast!
wtr.write_record(cleaned).context("cannot write record")?;
} else {
// We need to rebuild the record, check for null columns,
// and only output the record if everything's OK.
let row = cleaned.collect::<Vec<Cow<[u8]>>>();
for (value, &is_required_col) in row.iter().zip(required_cols.iter()) {
// If the column is NULL but shouldn't be, bail on this row.
if is_required_col && value.is_empty() {
bad_rows += 1;
debug!("row {}: required column is empty", rows);
continue 'next_row;
}
}
wtr.write_record(row).context("cannot write record")?;
}
}
}
// Flush all our buffers.
wtr.flush().context("error writing records")?;
// Print out some information about our run.
if !opt.quiet {
let ellapsed = (now() - start_time).as_seconds_f64();
let bytes_per_second = (rdr.position().byte() as f64 / ellapsed) as i64;
eprintln!(
"{} rows ({} bad) in {:.2} seconds, {}/sec",
rows,
bad_rows,
ellapsed,
bytes_per_second.file_size(file_size_opts::BINARY)?,
);
}
// If more than 10% of rows are bad, assume something has gone horribly
// wrong.
if bad_rows.checked_mul(10).expect("multiplication overflow") > rows {
eprintln!("Too many rows ({} of {}) were bad", bad_rows, rows);
process::exit(2);
}
Ok(())
}
fn main() {
if let Err(err) = run() {
eprintln!("ERROR: {}", err);
let mut source = err.source();
while let Some(cause) = source {
eprintln!(" caused by: {}", cause);
source = cause.source();
}
process::exit(1);
}
}
| {
if null_re.is_match(val) {
val = &[]
}
} | conditional_block |
cmd.go | // Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package app
import (
"context"
"fmt"
"net"
"net/netip"
"os/exec"
"strings"
"github.com/spf13/cobra"
"github.com/spf13/cobra/doc"
"istio.io/api/annotation"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/cmd/pilot-agent/config"
"istio.io/istio/pilot/cmd/pilot-agent/options"
"istio.io/istio/pilot/cmd/pilot-agent/status"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/util/network"
"istio.io/istio/pkg/bootstrap"
"istio.io/istio/pkg/cmd"
"istio.io/istio/pkg/collateral"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/envoy"
istio_agent "istio.io/istio/pkg/istio-agent"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/security"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/version"
stsserver "istio.io/istio/security/pkg/stsservice/server"
"istio.io/istio/security/pkg/stsservice/tokenmanager"
cleaniptables "istio.io/istio/tools/istio-clean-iptables/pkg/cmd"
iptables "istio.io/istio/tools/istio-iptables/pkg/cmd"
iptableslog "istio.io/istio/tools/istio-iptables/pkg/log"
)
const (
localHostIPv4 = "127.0.0.1"
localHostIPv6 = "::1"
)
var (
loggingOptions = log.DefaultOptions()
proxyArgs options.ProxyArgs
)
func NewRootCommand() *cobra.Command {
rootCmd := &cobra.Command{
Use: "pilot-agent",
Short: "Istio Pilot agent.",
Long: "Istio Pilot agent runs in the sidecar or gateway container and bootstraps Envoy.",
SilenceUsage: true,
FParseErrWhitelist: cobra.FParseErrWhitelist{
// Allow unknown flags for backward-compatibility.
UnknownFlags: true,
},
}
// Attach the Istio logging options to the command.
loggingOptions.AttachCobraFlags(rootCmd)
cmd.AddFlags(rootCmd)
proxyCmd := newProxyCommand()
addFlags(proxyCmd)
rootCmd.AddCommand(proxyCmd)
rootCmd.AddCommand(requestCmd)
rootCmd.AddCommand(waitCmd)
rootCmd.AddCommand(version.CobraCommand())
rootCmd.AddCommand(iptables.GetCommand())
rootCmd.AddCommand(cleaniptables.GetCommand())
rootCmd.AddCommand(collateral.CobraCommand(rootCmd, &doc.GenManHeader{
Title: "Istio Pilot Agent",
Section: "pilot-agent CLI",
Manual: "Istio Pilot Agent",
}))
return rootCmd
}
func newProxyCommand() *cobra.Command {
return &cobra.Command{
Use: "proxy",
Short: "XDS proxy agent",
FParseErrWhitelist: cobra.FParseErrWhitelist{
// Allow unknown flags for backward-compatibility.
UnknownFlags: true,
},
PersistentPreRunE: configureLogging,
RunE: func(c *cobra.Command, args []string) error {
cmd.PrintFlags(c.Flags())
log.Infof("Version %s", version.Info.String())
logLimits()
proxy, err := initProxy(args)
if err != nil {
return err
}
proxyConfig, err := config.ConstructProxyConfig(proxyArgs.MeshConfigFile, proxyArgs.ServiceCluster, options.ProxyConfigEnv, proxyArgs.Concurrency, proxy)
if err != nil {
return fmt.Errorf("failed to get proxy config: %v", err)
}
if out, err := protomarshal.ToYAML(proxyConfig); err != nil {
log.Infof("Failed to serialize to YAML: %v", err)
} else {
log.Infof("Effective config: %s", out)
}
secOpts, err := options.NewSecurityOptions(proxyConfig, proxyArgs.StsPort, proxyArgs.TokenManagerPlugin)
if err != nil {
return err
}
// If security token service (STS) port is not zero, start STS server and
// listen on STS port for STS requests. For STS, see
// https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16.
// STS is used for stackdriver or other Envoy services using google gRPC.
if proxyArgs.StsPort > 0 {
stsServer, err := initStsServer(proxy, secOpts.TokenManager)
if err != nil {
return err
}
defer stsServer.Stop()
}
// If we are using a custom template file (for control plane proxy, for example), configure this.
if proxyArgs.TemplateFile != "" && proxyConfig.CustomConfigFile == "" {
proxyConfig.ProxyBootstrapTemplatePath = proxyArgs.TemplateFile
}
envoyOptions := envoy.ProxyConfig{
LogLevel: proxyArgs.ProxyLogLevel,
ComponentLogLevel: proxyArgs.ProxyComponentLogLevel,
LogAsJSON: loggingOptions.JSONEncoding,
NodeIPs: proxy.IPAddresses,
Sidecar: proxy.Type == model.SidecarProxy,
OutlierLogPath: proxyArgs.OutlierLogPath,
}
agentOptions := options.NewAgentOptions(proxy, proxyConfig)
agent := istio_agent.NewAgent(proxyConfig, agentOptions, secOpts, envoyOptions)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
defer agent.Close()
// If a status port was provided, start handling status probes.
if proxyConfig.StatusPort > 0 {
if err := initStatusServer(ctx, proxy, proxyConfig,
agentOptions.EnvoyPrometheusPort, proxyArgs.EnableProfiling, agent); err != nil {
return err
}
}
go iptableslog.ReadNFLOGSocket(ctx)
// On SIGINT or SIGTERM, cancel the context, triggering a graceful shutdown
go cmd.WaitSignalFunc(cancel)
// Start in process SDS, dns server, xds proxy, and Envoy.
wait, err := agent.Run(ctx)
if err != nil {
return err
}
wait()
return nil
},
}
}
func addFlags(proxyCmd *cobra.Command) {
proxyArgs = options.NewProxyArgs()
proxyCmd.PersistentFlags().StringVar(&proxyArgs.DNSDomain, "domain", "",
"DNS domain suffix. If not provided uses ${POD_NAMESPACE}.svc.cluster.local")
proxyCmd.PersistentFlags().StringVar(&proxyArgs.MeshConfigFile, "meshConfig", "./etc/istio/config/mesh",
"File name for Istio mesh configuration. If not specified, a default mesh will be used. This may be overridden by "+
"PROXY_CONFIG environment variable or proxy.istio.io/config annotation.")
proxyCmd.PersistentFlags().IntVar(&proxyArgs.StsPort, "stsPort", 0,
"HTTP Port on which to serve Security Token Service (STS). If zero, STS service will not be provided.")
proxyCmd.PersistentFlags().StringVar(&proxyArgs.TokenManagerPlugin, "tokenManagerPlugin", tokenmanager.GoogleTokenExchange,
"Token provider specific plugin name.")
// DEPRECATED. Flags for proxy configuration
proxyCmd.PersistentFlags().StringVar(&proxyArgs.ServiceCluster, "serviceCluster", constants.ServiceClusterName, "Service cluster")
// Log levels are provided by the library https://github.com/gabime/spdlog, used by Envoy.
proxyCmd.PersistentFlags().StringVar(&proxyArgs.ProxyLogLevel, "proxyLogLevel", "warning,misc:error",
fmt.Sprintf("The log level used to start the Envoy proxy (choose from {%s, %s, %s, %s, %s, %s, %s})."+
"Level may also include one or more scopes, such as 'info,misc:error,upstream:debug'",
"trace", "debug", "info", "warning", "error", "critical", "off"))
proxyCmd.PersistentFlags().IntVar(&proxyArgs.Concurrency, "concurrency", 0, "number of worker threads to run")
// See https://www.envoyproxy.io/docs/envoy/latest/operations/cli#cmdoption-component-log-level
proxyCmd.PersistentFlags().StringVar(&proxyArgs.ProxyComponentLogLevel, "proxyComponentLogLevel", "",
"The component log level used to start the Envoy proxy. Deprecated, use proxyLogLevel instead")
proxyCmd.PersistentFlags().StringVar(&proxyArgs.TemplateFile, "templateFile", "",
"Go template bootstrap config")
proxyCmd.PersistentFlags().StringVar(&proxyArgs.OutlierLogPath, "outlierLogPath", "",
"The log path for outlier detection")
proxyCmd.PersistentFlags().BoolVar(&proxyArgs.EnableProfiling, "profiling", true,
"Enable profiling via web interface host:port/debug/pprof/.")
}
func initStatusServer(ctx context.Context, proxy *model.Proxy, proxyConfig *meshconfig.ProxyConfig,
envoyPrometheusPort int, enableProfiling bool, agent *istio_agent.Agent,
) error |
func initStsServer(proxy *model.Proxy, tokenManager security.TokenManager) (*stsserver.Server, error) {
localHostAddr := localHostIPv4
if proxy.IsIPv6() {
localHostAddr = localHostIPv6
} else {
// if not ipv6-only, it can be ipv4-only or dual-stack
// let InstanceIP decide the localhost
netIP, _ := netip.ParseAddr(options.InstanceIPVar.Get())
if netIP.Is6() && !netIP.IsLinkLocalUnicast() {
localHostAddr = localHostIPv6
}
}
stsServer, err := stsserver.NewServer(stsserver.Config{
LocalHostAddr: localHostAddr,
LocalPort: proxyArgs.StsPort,
}, tokenManager)
if err != nil {
return nil, err
}
return stsServer, nil
}
func getDNSDomain(podNamespace, domain string) string {
if len(domain) == 0 {
domain = podNamespace + ".svc." + constants.DefaultClusterLocalDomain
}
return domain
}
func configureLogging(_ *cobra.Command, _ []string) error {
if err := log.Configure(loggingOptions); err != nil {
return err
}
return nil
}
func initProxy(args []string) (*model.Proxy, error) {
proxy := &model.Proxy{
Type: model.SidecarProxy,
}
if len(args) > 0 {
proxy.Type = model.NodeType(args[0])
if !model.IsApplicationNodeType(proxy.Type) {
return nil, fmt.Errorf("Invalid proxy Type: " + string(proxy.Type))
}
}
podIP, _ := netip.ParseAddr(options.InstanceIPVar.Get()) // protobuf encoding of IP_ADDRESS type
if podIP.IsValid() {
proxy.IPAddresses = []string{podIP.String()}
}
// Obtain all the IPs from the node
if ipAddrs, ok := network.GetPrivateIPs(context.Background()); ok {
if len(proxy.IPAddresses) == 1 {
for _, ip := range ipAddrs {
// prevent duplicate ips, the first one must be the pod ip
// as we pick the first ip as pod ip in istiod
if proxy.IPAddresses[0] != ip {
proxy.IPAddresses = append(proxy.IPAddresses, ip)
}
}
} else {
proxy.IPAddresses = append(proxy.IPAddresses, ipAddrs...)
}
}
// No IP addresses provided, append 127.0.0.1 for ipv4 and ::1 for ipv6
if len(proxy.IPAddresses) == 0 {
proxy.IPAddresses = append(proxy.IPAddresses, localHostIPv4, localHostIPv6)
}
// Apply exclusions from traffic.sidecar.istio.io/excludeInterfaces
proxy.IPAddresses = applyExcludeInterfaces(proxy.IPAddresses)
// After IP addresses are set, let us discover IPMode.
proxy.DiscoverIPMode()
// Extract pod variables.
proxy.ID = proxyArgs.PodName + "." + proxyArgs.PodNamespace
// If not set, set a default based on platform - podNamespace.svc.cluster.local for
// K8S
proxy.DNSDomain = getDNSDomain(proxyArgs.PodNamespace, proxyArgs.DNSDomain)
log.WithLabels("ips", proxy.IPAddresses, "type", proxy.Type, "id", proxy.ID, "domain", proxy.DNSDomain).Info("Proxy role")
return proxy, nil
}
func applyExcludeInterfaces(ifaces []string) []string {
// Get list of excluded interfaces from pod annotation
// TODO: Discuss other input methods such as env, flag (ssuvasanth)
annotations, err := bootstrap.ReadPodAnnotations("")
if err != nil {
log.Debugf("Reading podInfoAnnotations file to get excludeInterfaces was unsuccessful. Continuing without exclusions. msg: %v", err)
return ifaces
}
value, ok := annotations[annotation.SidecarTrafficExcludeInterfaces.Name]
if !ok {
log.Debugf("ExcludeInterfaces annotation is not present. Proxy IPAddresses: %v", ifaces)
return ifaces
}
exclusions := strings.Split(value, ",")
// Find IP addr of excluded interfaces and add to a map for instant lookup
exclusionMap := sets.New[string]()
for _, ifaceName := range exclusions {
iface, err := net.InterfaceByName(ifaceName)
if err != nil {
log.Warnf("Unable to get interface %s: %v", ifaceName, err)
continue
}
addrs, err := iface.Addrs()
if err != nil {
log.Warnf("Unable to get IP addr(s) of interface %s: %v", ifaceName, err)
continue
}
for _, addr := range addrs {
// Get IP only
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
default:
continue
}
// handling ipv4 wrapping in ipv6
ipAddr, okay := netip.AddrFromSlice(ip)
if !okay {
continue
}
unwrapAddr := ipAddr.Unmap()
if !unwrapAddr.IsValid() || unwrapAddr.IsLoopback() || unwrapAddr.IsLinkLocalUnicast() || unwrapAddr.IsLinkLocalMulticast() || unwrapAddr.IsUnspecified() {
continue
}
// Add to map
exclusionMap.Insert(unwrapAddr.String())
}
}
// Remove excluded IP addresses from the input IP addresses list.
var selectedInterfaces []string
for _, ip := range ifaces {
if exclusionMap.Contains(ip) {
log.Infof("Excluding ip %s from proxy IPaddresses list", ip)
continue
}
selectedInterfaces = append(selectedInterfaces, ip)
}
return selectedInterfaces
}
func logLimits() {
out, err := exec.Command("bash", "-c", "ulimit -n").Output()
outStr := strings.TrimSpace(string(out))
if err != nil {
log.Warnf("failed running ulimit command: %v", outStr)
} else {
log.Infof("Maximum file descriptors (ulimit -n): %v", outStr)
}
}
| {
o := options.NewStatusServerOptions(proxy, proxyConfig, agent)
o.EnvoyPrometheusPort = envoyPrometheusPort
o.EnableProfiling = enableProfiling
o.Context = ctx
statusServer, err := status.NewServer(*o)
if err != nil {
return err
}
go statusServer.Run(ctx)
return nil
} | identifier_body |
cmd.go | // Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package app
import (
"context"
"fmt"
"net"
"net/netip"
"os/exec"
"strings"
"github.com/spf13/cobra"
"github.com/spf13/cobra/doc"
"istio.io/api/annotation"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/cmd/pilot-agent/config"
"istio.io/istio/pilot/cmd/pilot-agent/options"
"istio.io/istio/pilot/cmd/pilot-agent/status"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/util/network"
"istio.io/istio/pkg/bootstrap"
"istio.io/istio/pkg/cmd"
"istio.io/istio/pkg/collateral"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/envoy"
istio_agent "istio.io/istio/pkg/istio-agent"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/security"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/version"
stsserver "istio.io/istio/security/pkg/stsservice/server"
"istio.io/istio/security/pkg/stsservice/tokenmanager"
cleaniptables "istio.io/istio/tools/istio-clean-iptables/pkg/cmd"
iptables "istio.io/istio/tools/istio-iptables/pkg/cmd"
iptableslog "istio.io/istio/tools/istio-iptables/pkg/log"
)
const (
localHostIPv4 = "127.0.0.1"
localHostIPv6 = "::1"
)
var (
loggingOptions = log.DefaultOptions()
proxyArgs options.ProxyArgs
)
func | () *cobra.Command {
rootCmd := &cobra.Command{
Use: "pilot-agent",
Short: "Istio Pilot agent.",
Long: "Istio Pilot agent runs in the sidecar or gateway container and bootstraps Envoy.",
SilenceUsage: true,
FParseErrWhitelist: cobra.FParseErrWhitelist{
// Allow unknown flags for backward-compatibility.
UnknownFlags: true,
},
}
// Attach the Istio logging options to the command.
loggingOptions.AttachCobraFlags(rootCmd)
cmd.AddFlags(rootCmd)
proxyCmd := newProxyCommand()
addFlags(proxyCmd)
rootCmd.AddCommand(proxyCmd)
rootCmd.AddCommand(requestCmd)
rootCmd.AddCommand(waitCmd)
rootCmd.AddCommand(version.CobraCommand())
rootCmd.AddCommand(iptables.GetCommand())
rootCmd.AddCommand(cleaniptables.GetCommand())
rootCmd.AddCommand(collateral.CobraCommand(rootCmd, &doc.GenManHeader{
Title: "Istio Pilot Agent",
Section: "pilot-agent CLI",
Manual: "Istio Pilot Agent",
}))
return rootCmd
}
func newProxyCommand() *cobra.Command {
return &cobra.Command{
Use: "proxy",
Short: "XDS proxy agent",
FParseErrWhitelist: cobra.FParseErrWhitelist{
// Allow unknown flags for backward-compatibility.
UnknownFlags: true,
},
PersistentPreRunE: configureLogging,
RunE: func(c *cobra.Command, args []string) error {
cmd.PrintFlags(c.Flags())
log.Infof("Version %s", version.Info.String())
logLimits()
proxy, err := initProxy(args)
if err != nil {
return err
}
proxyConfig, err := config.ConstructProxyConfig(proxyArgs.MeshConfigFile, proxyArgs.ServiceCluster, options.ProxyConfigEnv, proxyArgs.Concurrency, proxy)
if err != nil {
return fmt.Errorf("failed to get proxy config: %v", err)
}
if out, err := protomarshal.ToYAML(proxyConfig); err != nil {
log.Infof("Failed to serialize to YAML: %v", err)
} else {
log.Infof("Effective config: %s", out)
}
secOpts, err := options.NewSecurityOptions(proxyConfig, proxyArgs.StsPort, proxyArgs.TokenManagerPlugin)
if err != nil {
return err
}
// If security token service (STS) port is not zero, start STS server and
// listen on STS port for STS requests. For STS, see
// https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16.
// STS is used for stackdriver or other Envoy services using google gRPC.
if proxyArgs.StsPort > 0 {
stsServer, err := initStsServer(proxy, secOpts.TokenManager)
if err != nil {
return err
}
defer stsServer.Stop()
}
// If we are using a custom template file (for control plane proxy, for example), configure this.
if proxyArgs.TemplateFile != "" && proxyConfig.CustomConfigFile == "" {
proxyConfig.ProxyBootstrapTemplatePath = proxyArgs.TemplateFile
}
envoyOptions := envoy.ProxyConfig{
LogLevel: proxyArgs.ProxyLogLevel,
ComponentLogLevel: proxyArgs.ProxyComponentLogLevel,
LogAsJSON: loggingOptions.JSONEncoding,
NodeIPs: proxy.IPAddresses,
Sidecar: proxy.Type == model.SidecarProxy,
OutlierLogPath: proxyArgs.OutlierLogPath,
}
agentOptions := options.NewAgentOptions(proxy, proxyConfig)
agent := istio_agent.NewAgent(proxyConfig, agentOptions, secOpts, envoyOptions)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
defer agent.Close()
// If a status port was provided, start handling status probes.
if proxyConfig.StatusPort > 0 {
if err := initStatusServer(ctx, proxy, proxyConfig,
agentOptions.EnvoyPrometheusPort, proxyArgs.EnableProfiling, agent); err != nil {
return err
}
}
go iptableslog.ReadNFLOGSocket(ctx)
// On SIGINT or SIGTERM, cancel the context, triggering a graceful shutdown
go cmd.WaitSignalFunc(cancel)
// Start in process SDS, dns server, xds proxy, and Envoy.
wait, err := agent.Run(ctx)
if err != nil {
return err
}
wait()
return nil
},
}
}
func addFlags(proxyCmd *cobra.Command) {
proxyArgs = options.NewProxyArgs()
proxyCmd.PersistentFlags().StringVar(&proxyArgs.DNSDomain, "domain", "",
"DNS domain suffix. If not provided uses ${POD_NAMESPACE}.svc.cluster.local")
proxyCmd.PersistentFlags().StringVar(&proxyArgs.MeshConfigFile, "meshConfig", "./etc/istio/config/mesh",
"File name for Istio mesh configuration. If not specified, a default mesh will be used. This may be overridden by "+
"PROXY_CONFIG environment variable or proxy.istio.io/config annotation.")
proxyCmd.PersistentFlags().IntVar(&proxyArgs.StsPort, "stsPort", 0,
"HTTP Port on which to serve Security Token Service (STS). If zero, STS service will not be provided.")
proxyCmd.PersistentFlags().StringVar(&proxyArgs.TokenManagerPlugin, "tokenManagerPlugin", tokenmanager.GoogleTokenExchange,
"Token provider specific plugin name.")
// DEPRECATED. Flags for proxy configuration
proxyCmd.PersistentFlags().StringVar(&proxyArgs.ServiceCluster, "serviceCluster", constants.ServiceClusterName, "Service cluster")
// Log levels are provided by the library https://github.com/gabime/spdlog, used by Envoy.
proxyCmd.PersistentFlags().StringVar(&proxyArgs.ProxyLogLevel, "proxyLogLevel", "warning,misc:error",
fmt.Sprintf("The log level used to start the Envoy proxy (choose from {%s, %s, %s, %s, %s, %s, %s})."+
"Level may also include one or more scopes, such as 'info,misc:error,upstream:debug'",
"trace", "debug", "info", "warning", "error", "critical", "off"))
proxyCmd.PersistentFlags().IntVar(&proxyArgs.Concurrency, "concurrency", 0, "number of worker threads to run")
// See https://www.envoyproxy.io/docs/envoy/latest/operations/cli#cmdoption-component-log-level
proxyCmd.PersistentFlags().StringVar(&proxyArgs.ProxyComponentLogLevel, "proxyComponentLogLevel", "",
"The component log level used to start the Envoy proxy. Deprecated, use proxyLogLevel instead")
proxyCmd.PersistentFlags().StringVar(&proxyArgs.TemplateFile, "templateFile", "",
"Go template bootstrap config")
proxyCmd.PersistentFlags().StringVar(&proxyArgs.OutlierLogPath, "outlierLogPath", "",
"The log path for outlier detection")
proxyCmd.PersistentFlags().BoolVar(&proxyArgs.EnableProfiling, "profiling", true,
"Enable profiling via web interface host:port/debug/pprof/.")
}
func initStatusServer(ctx context.Context, proxy *model.Proxy, proxyConfig *meshconfig.ProxyConfig,
envoyPrometheusPort int, enableProfiling bool, agent *istio_agent.Agent,
) error {
o := options.NewStatusServerOptions(proxy, proxyConfig, agent)
o.EnvoyPrometheusPort = envoyPrometheusPort
o.EnableProfiling = enableProfiling
o.Context = ctx
statusServer, err := status.NewServer(*o)
if err != nil {
return err
}
go statusServer.Run(ctx)
return nil
}
func initStsServer(proxy *model.Proxy, tokenManager security.TokenManager) (*stsserver.Server, error) {
localHostAddr := localHostIPv4
if proxy.IsIPv6() {
localHostAddr = localHostIPv6
} else {
// if not ipv6-only, it can be ipv4-only or dual-stack
// let InstanceIP decide the localhost
netIP, _ := netip.ParseAddr(options.InstanceIPVar.Get())
if netIP.Is6() && !netIP.IsLinkLocalUnicast() {
localHostAddr = localHostIPv6
}
}
stsServer, err := stsserver.NewServer(stsserver.Config{
LocalHostAddr: localHostAddr,
LocalPort: proxyArgs.StsPort,
}, tokenManager)
if err != nil {
return nil, err
}
return stsServer, nil
}
func getDNSDomain(podNamespace, domain string) string {
if len(domain) == 0 {
domain = podNamespace + ".svc." + constants.DefaultClusterLocalDomain
}
return domain
}
func configureLogging(_ *cobra.Command, _ []string) error {
if err := log.Configure(loggingOptions); err != nil {
return err
}
return nil
}
func initProxy(args []string) (*model.Proxy, error) {
proxy := &model.Proxy{
Type: model.SidecarProxy,
}
if len(args) > 0 {
proxy.Type = model.NodeType(args[0])
if !model.IsApplicationNodeType(proxy.Type) {
return nil, fmt.Errorf("Invalid proxy Type: " + string(proxy.Type))
}
}
podIP, _ := netip.ParseAddr(options.InstanceIPVar.Get()) // protobuf encoding of IP_ADDRESS type
if podIP.IsValid() {
proxy.IPAddresses = []string{podIP.String()}
}
// Obtain all the IPs from the node
if ipAddrs, ok := network.GetPrivateIPs(context.Background()); ok {
if len(proxy.IPAddresses) == 1 {
for _, ip := range ipAddrs {
// prevent duplicate ips, the first one must be the pod ip
// as we pick the first ip as pod ip in istiod
if proxy.IPAddresses[0] != ip {
proxy.IPAddresses = append(proxy.IPAddresses, ip)
}
}
} else {
proxy.IPAddresses = append(proxy.IPAddresses, ipAddrs...)
}
}
// No IP addresses provided, append 127.0.0.1 for ipv4 and ::1 for ipv6
if len(proxy.IPAddresses) == 0 {
proxy.IPAddresses = append(proxy.IPAddresses, localHostIPv4, localHostIPv6)
}
// Apply exclusions from traffic.sidecar.istio.io/excludeInterfaces
proxy.IPAddresses = applyExcludeInterfaces(proxy.IPAddresses)
// After IP addresses are set, let us discover IPMode.
proxy.DiscoverIPMode()
// Extract pod variables.
proxy.ID = proxyArgs.PodName + "." + proxyArgs.PodNamespace
// If not set, set a default based on platform - podNamespace.svc.cluster.local for
// K8S
proxy.DNSDomain = getDNSDomain(proxyArgs.PodNamespace, proxyArgs.DNSDomain)
log.WithLabels("ips", proxy.IPAddresses, "type", proxy.Type, "id", proxy.ID, "domain", proxy.DNSDomain).Info("Proxy role")
return proxy, nil
}
func applyExcludeInterfaces(ifaces []string) []string {
// Get list of excluded interfaces from pod annotation
// TODO: Discuss other input methods such as env, flag (ssuvasanth)
annotations, err := bootstrap.ReadPodAnnotations("")
if err != nil {
log.Debugf("Reading podInfoAnnotations file to get excludeInterfaces was unsuccessful. Continuing without exclusions. msg: %v", err)
return ifaces
}
value, ok := annotations[annotation.SidecarTrafficExcludeInterfaces.Name]
if !ok {
log.Debugf("ExcludeInterfaces annotation is not present. Proxy IPAddresses: %v", ifaces)
return ifaces
}
exclusions := strings.Split(value, ",")
// Find IP addr of excluded interfaces and add to a map for instant lookup
exclusionMap := sets.New[string]()
for _, ifaceName := range exclusions {
iface, err := net.InterfaceByName(ifaceName)
if err != nil {
log.Warnf("Unable to get interface %s: %v", ifaceName, err)
continue
}
addrs, err := iface.Addrs()
if err != nil {
log.Warnf("Unable to get IP addr(s) of interface %s: %v", ifaceName, err)
continue
}
for _, addr := range addrs {
// Get IP only
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
default:
continue
}
// handling ipv4 wrapping in ipv6
ipAddr, okay := netip.AddrFromSlice(ip)
if !okay {
continue
}
unwrapAddr := ipAddr.Unmap()
if !unwrapAddr.IsValid() || unwrapAddr.IsLoopback() || unwrapAddr.IsLinkLocalUnicast() || unwrapAddr.IsLinkLocalMulticast() || unwrapAddr.IsUnspecified() {
continue
}
// Add to map
exclusionMap.Insert(unwrapAddr.String())
}
}
// Remove excluded IP addresses from the input IP addresses list.
var selectedInterfaces []string
for _, ip := range ifaces {
if exclusionMap.Contains(ip) {
log.Infof("Excluding ip %s from proxy IPaddresses list", ip)
continue
}
selectedInterfaces = append(selectedInterfaces, ip)
}
return selectedInterfaces
}
func logLimits() {
out, err := exec.Command("bash", "-c", "ulimit -n").Output()
outStr := strings.TrimSpace(string(out))
if err != nil {
log.Warnf("failed running ulimit command: %v", outStr)
} else {
log.Infof("Maximum file descriptors (ulimit -n): %v", outStr)
}
}
| NewRootCommand | identifier_name |
cmd.go | // Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package app
import (
"context"
"fmt"
"net"
"net/netip"
"os/exec"
"strings"
"github.com/spf13/cobra"
"github.com/spf13/cobra/doc"
"istio.io/api/annotation"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/cmd/pilot-agent/config"
"istio.io/istio/pilot/cmd/pilot-agent/options"
"istio.io/istio/pilot/cmd/pilot-agent/status"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/util/network"
"istio.io/istio/pkg/bootstrap"
"istio.io/istio/pkg/cmd"
"istio.io/istio/pkg/collateral"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/envoy"
istio_agent "istio.io/istio/pkg/istio-agent"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/security"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/version"
stsserver "istio.io/istio/security/pkg/stsservice/server"
"istio.io/istio/security/pkg/stsservice/tokenmanager"
cleaniptables "istio.io/istio/tools/istio-clean-iptables/pkg/cmd"
iptables "istio.io/istio/tools/istio-iptables/pkg/cmd"
iptableslog "istio.io/istio/tools/istio-iptables/pkg/log"
)
const (
localHostIPv4 = "127.0.0.1"
localHostIPv6 = "::1"
)
var (
loggingOptions = log.DefaultOptions()
proxyArgs options.ProxyArgs
)
func NewRootCommand() *cobra.Command {
rootCmd := &cobra.Command{
Use: "pilot-agent",
Short: "Istio Pilot agent.",
Long: "Istio Pilot agent runs in the sidecar or gateway container and bootstraps Envoy.",
SilenceUsage: true,
FParseErrWhitelist: cobra.FParseErrWhitelist{
// Allow unknown flags for backward-compatibility.
UnknownFlags: true,
},
}
// Attach the Istio logging options to the command.
loggingOptions.AttachCobraFlags(rootCmd)
cmd.AddFlags(rootCmd)
proxyCmd := newProxyCommand()
addFlags(proxyCmd)
rootCmd.AddCommand(proxyCmd)
rootCmd.AddCommand(requestCmd)
rootCmd.AddCommand(waitCmd)
rootCmd.AddCommand(version.CobraCommand())
rootCmd.AddCommand(iptables.GetCommand())
rootCmd.AddCommand(cleaniptables.GetCommand())
rootCmd.AddCommand(collateral.CobraCommand(rootCmd, &doc.GenManHeader{
Title: "Istio Pilot Agent",
Section: "pilot-agent CLI",
Manual: "Istio Pilot Agent",
}))
return rootCmd
}
func newProxyCommand() *cobra.Command {
return &cobra.Command{
Use: "proxy",
Short: "XDS proxy agent",
FParseErrWhitelist: cobra.FParseErrWhitelist{
// Allow unknown flags for backward-compatibility.
UnknownFlags: true,
},
PersistentPreRunE: configureLogging,
RunE: func(c *cobra.Command, args []string) error {
cmd.PrintFlags(c.Flags())
log.Infof("Version %s", version.Info.String())
logLimits()
proxy, err := initProxy(args)
if err != nil {
return err
}
proxyConfig, err := config.ConstructProxyConfig(proxyArgs.MeshConfigFile, proxyArgs.ServiceCluster, options.ProxyConfigEnv, proxyArgs.Concurrency, proxy)
if err != nil {
return fmt.Errorf("failed to get proxy config: %v", err)
}
if out, err := protomarshal.ToYAML(proxyConfig); err != nil {
log.Infof("Failed to serialize to YAML: %v", err)
} else {
log.Infof("Effective config: %s", out)
}
secOpts, err := options.NewSecurityOptions(proxyConfig, proxyArgs.StsPort, proxyArgs.TokenManagerPlugin)
if err != nil {
return err
}
// If security token service (STS) port is not zero, start STS server and
// listen on STS port for STS requests. For STS, see
// https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16.
// STS is used for stackdriver or other Envoy services using google gRPC.
if proxyArgs.StsPort > 0 {
stsServer, err := initStsServer(proxy, secOpts.TokenManager)
if err != nil {
return err
}
defer stsServer.Stop()
}
// If we are using a custom template file (for control plane proxy, for example), configure this.
if proxyArgs.TemplateFile != "" && proxyConfig.CustomConfigFile == "" {
proxyConfig.ProxyBootstrapTemplatePath = proxyArgs.TemplateFile
}
envoyOptions := envoy.ProxyConfig{
LogLevel: proxyArgs.ProxyLogLevel,
ComponentLogLevel: proxyArgs.ProxyComponentLogLevel,
LogAsJSON: loggingOptions.JSONEncoding,
NodeIPs: proxy.IPAddresses,
Sidecar: proxy.Type == model.SidecarProxy,
OutlierLogPath: proxyArgs.OutlierLogPath,
}
agentOptions := options.NewAgentOptions(proxy, proxyConfig)
agent := istio_agent.NewAgent(proxyConfig, agentOptions, secOpts, envoyOptions)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
defer agent.Close()
// If a status port was provided, start handling status probes.
if proxyConfig.StatusPort > 0 {
if err := initStatusServer(ctx, proxy, proxyConfig,
agentOptions.EnvoyPrometheusPort, proxyArgs.EnableProfiling, agent); err != nil {
return err
}
}
go iptableslog.ReadNFLOGSocket(ctx)
// On SIGINT or SIGTERM, cancel the context, triggering a graceful shutdown
go cmd.WaitSignalFunc(cancel)
// Start in process SDS, dns server, xds proxy, and Envoy.
wait, err := agent.Run(ctx)
if err != nil {
return err
}
wait()
return nil
},
}
}
func addFlags(proxyCmd *cobra.Command) {
proxyArgs = options.NewProxyArgs()
proxyCmd.PersistentFlags().StringVar(&proxyArgs.DNSDomain, "domain", "",
"DNS domain suffix. If not provided uses ${POD_NAMESPACE}.svc.cluster.local")
proxyCmd.PersistentFlags().StringVar(&proxyArgs.MeshConfigFile, "meshConfig", "./etc/istio/config/mesh",
"File name for Istio mesh configuration. If not specified, a default mesh will be used. This may be overridden by "+
"PROXY_CONFIG environment variable or proxy.istio.io/config annotation.")
proxyCmd.PersistentFlags().IntVar(&proxyArgs.StsPort, "stsPort", 0,
"HTTP Port on which to serve Security Token Service (STS). If zero, STS service will not be provided.")
proxyCmd.PersistentFlags().StringVar(&proxyArgs.TokenManagerPlugin, "tokenManagerPlugin", tokenmanager.GoogleTokenExchange,
"Token provider specific plugin name.")
// DEPRECATED. Flags for proxy configuration
proxyCmd.PersistentFlags().StringVar(&proxyArgs.ServiceCluster, "serviceCluster", constants.ServiceClusterName, "Service cluster")
// Log levels are provided by the library https://github.com/gabime/spdlog, used by Envoy.
proxyCmd.PersistentFlags().StringVar(&proxyArgs.ProxyLogLevel, "proxyLogLevel", "warning,misc:error",
fmt.Sprintf("The log level used to start the Envoy proxy (choose from {%s, %s, %s, %s, %s, %s, %s})."+
"Level may also include one or more scopes, such as 'info,misc:error,upstream:debug'",
"trace", "debug", "info", "warning", "error", "critical", "off"))
proxyCmd.PersistentFlags().IntVar(&proxyArgs.Concurrency, "concurrency", 0, "number of worker threads to run")
// See https://www.envoyproxy.io/docs/envoy/latest/operations/cli#cmdoption-component-log-level
proxyCmd.PersistentFlags().StringVar(&proxyArgs.ProxyComponentLogLevel, "proxyComponentLogLevel", "",
"The component log level used to start the Envoy proxy. Deprecated, use proxyLogLevel instead")
proxyCmd.PersistentFlags().StringVar(&proxyArgs.TemplateFile, "templateFile", "",
"Go template bootstrap config")
proxyCmd.PersistentFlags().StringVar(&proxyArgs.OutlierLogPath, "outlierLogPath", "",
"The log path for outlier detection")
proxyCmd.PersistentFlags().BoolVar(&proxyArgs.EnableProfiling, "profiling", true,
"Enable profiling via web interface host:port/debug/pprof/.")
}
func initStatusServer(ctx context.Context, proxy *model.Proxy, proxyConfig *meshconfig.ProxyConfig,
envoyPrometheusPort int, enableProfiling bool, agent *istio_agent.Agent,
) error {
o := options.NewStatusServerOptions(proxy, proxyConfig, agent)
o.EnvoyPrometheusPort = envoyPrometheusPort
o.EnableProfiling = enableProfiling
o.Context = ctx
statusServer, err := status.NewServer(*o)
if err != nil {
return err
}
go statusServer.Run(ctx)
return nil
}
func initStsServer(proxy *model.Proxy, tokenManager security.TokenManager) (*stsserver.Server, error) {
localHostAddr := localHostIPv4
if proxy.IsIPv6() {
localHostAddr = localHostIPv6
} else {
// if not ipv6-only, it can be ipv4-only or dual-stack
// let InstanceIP decide the localhost
netIP, _ := netip.ParseAddr(options.InstanceIPVar.Get())
if netIP.Is6() && !netIP.IsLinkLocalUnicast() {
localHostAddr = localHostIPv6
}
}
stsServer, err := stsserver.NewServer(stsserver.Config{
LocalHostAddr: localHostAddr,
LocalPort: proxyArgs.StsPort,
}, tokenManager)
if err != nil {
return nil, err
}
return stsServer, nil
}
func getDNSDomain(podNamespace, domain string) string {
if len(domain) == 0 {
domain = podNamespace + ".svc." + constants.DefaultClusterLocalDomain
}
return domain
}
func configureLogging(_ *cobra.Command, _ []string) error {
if err := log.Configure(loggingOptions); err != nil {
return err
}
return nil
}
func initProxy(args []string) (*model.Proxy, error) {
proxy := &model.Proxy{
Type: model.SidecarProxy,
}
if len(args) > 0 {
proxy.Type = model.NodeType(args[0])
if !model.IsApplicationNodeType(proxy.Type) |
}
podIP, _ := netip.ParseAddr(options.InstanceIPVar.Get()) // protobuf encoding of IP_ADDRESS type
if podIP.IsValid() {
proxy.IPAddresses = []string{podIP.String()}
}
// Obtain all the IPs from the node
if ipAddrs, ok := network.GetPrivateIPs(context.Background()); ok {
if len(proxy.IPAddresses) == 1 {
for _, ip := range ipAddrs {
// prevent duplicate ips, the first one must be the pod ip
// as we pick the first ip as pod ip in istiod
if proxy.IPAddresses[0] != ip {
proxy.IPAddresses = append(proxy.IPAddresses, ip)
}
}
} else {
proxy.IPAddresses = append(proxy.IPAddresses, ipAddrs...)
}
}
// No IP addresses provided, append 127.0.0.1 for ipv4 and ::1 for ipv6
if len(proxy.IPAddresses) == 0 {
proxy.IPAddresses = append(proxy.IPAddresses, localHostIPv4, localHostIPv6)
}
// Apply exclusions from traffic.sidecar.istio.io/excludeInterfaces
proxy.IPAddresses = applyExcludeInterfaces(proxy.IPAddresses)
// After IP addresses are set, let us discover IPMode.
proxy.DiscoverIPMode()
// Extract pod variables.
proxy.ID = proxyArgs.PodName + "." + proxyArgs.PodNamespace
// If not set, set a default based on platform - podNamespace.svc.cluster.local for
// K8S
proxy.DNSDomain = getDNSDomain(proxyArgs.PodNamespace, proxyArgs.DNSDomain)
log.WithLabels("ips", proxy.IPAddresses, "type", proxy.Type, "id", proxy.ID, "domain", proxy.DNSDomain).Info("Proxy role")
return proxy, nil
}
func applyExcludeInterfaces(ifaces []string) []string {
// Get list of excluded interfaces from pod annotation
// TODO: Discuss other input methods such as env, flag (ssuvasanth)
annotations, err := bootstrap.ReadPodAnnotations("")
if err != nil {
log.Debugf("Reading podInfoAnnotations file to get excludeInterfaces was unsuccessful. Continuing without exclusions. msg: %v", err)
return ifaces
}
value, ok := annotations[annotation.SidecarTrafficExcludeInterfaces.Name]
if !ok {
log.Debugf("ExcludeInterfaces annotation is not present. Proxy IPAddresses: %v", ifaces)
return ifaces
}
exclusions := strings.Split(value, ",")
// Find IP addr of excluded interfaces and add to a map for instant lookup
exclusionMap := sets.New[string]()
for _, ifaceName := range exclusions {
iface, err := net.InterfaceByName(ifaceName)
if err != nil {
log.Warnf("Unable to get interface %s: %v", ifaceName, err)
continue
}
addrs, err := iface.Addrs()
if err != nil {
log.Warnf("Unable to get IP addr(s) of interface %s: %v", ifaceName, err)
continue
}
for _, addr := range addrs {
// Get IP only
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
default:
continue
}
// handling ipv4 wrapping in ipv6
ipAddr, okay := netip.AddrFromSlice(ip)
if !okay {
continue
}
unwrapAddr := ipAddr.Unmap()
if !unwrapAddr.IsValid() || unwrapAddr.IsLoopback() || unwrapAddr.IsLinkLocalUnicast() || unwrapAddr.IsLinkLocalMulticast() || unwrapAddr.IsUnspecified() {
continue
}
// Add to map
exclusionMap.Insert(unwrapAddr.String())
}
}
// Remove excluded IP addresses from the input IP addresses list.
var selectedInterfaces []string
for _, ip := range ifaces {
if exclusionMap.Contains(ip) {
log.Infof("Excluding ip %s from proxy IPaddresses list", ip)
continue
}
selectedInterfaces = append(selectedInterfaces, ip)
}
return selectedInterfaces
}
func logLimits() {
out, err := exec.Command("bash", "-c", "ulimit -n").Output()
outStr := strings.TrimSpace(string(out))
if err != nil {
log.Warnf("failed running ulimit command: %v", outStr)
} else {
log.Infof("Maximum file descriptors (ulimit -n): %v", outStr)
}
}
| {
return nil, fmt.Errorf("Invalid proxy Type: " + string(proxy.Type))
} | conditional_block |
cmd.go | // Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package app
import (
"context"
"fmt"
"net"
"net/netip"
"os/exec"
"strings"
"github.com/spf13/cobra"
"github.com/spf13/cobra/doc"
"istio.io/api/annotation" | "istio.io/istio/pilot/cmd/pilot-agent/config"
"istio.io/istio/pilot/cmd/pilot-agent/options"
"istio.io/istio/pilot/cmd/pilot-agent/status"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/util/network"
"istio.io/istio/pkg/bootstrap"
"istio.io/istio/pkg/cmd"
"istio.io/istio/pkg/collateral"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/envoy"
istio_agent "istio.io/istio/pkg/istio-agent"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/security"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/version"
stsserver "istio.io/istio/security/pkg/stsservice/server"
"istio.io/istio/security/pkg/stsservice/tokenmanager"
cleaniptables "istio.io/istio/tools/istio-clean-iptables/pkg/cmd"
iptables "istio.io/istio/tools/istio-iptables/pkg/cmd"
iptableslog "istio.io/istio/tools/istio-iptables/pkg/log"
)
const (
localHostIPv4 = "127.0.0.1"
localHostIPv6 = "::1"
)
var (
loggingOptions = log.DefaultOptions()
proxyArgs options.ProxyArgs
)
func NewRootCommand() *cobra.Command {
rootCmd := &cobra.Command{
Use: "pilot-agent",
Short: "Istio Pilot agent.",
Long: "Istio Pilot agent runs in the sidecar or gateway container and bootstraps Envoy.",
SilenceUsage: true,
FParseErrWhitelist: cobra.FParseErrWhitelist{
// Allow unknown flags for backward-compatibility.
UnknownFlags: true,
},
}
// Attach the Istio logging options to the command.
loggingOptions.AttachCobraFlags(rootCmd)
cmd.AddFlags(rootCmd)
proxyCmd := newProxyCommand()
addFlags(proxyCmd)
rootCmd.AddCommand(proxyCmd)
rootCmd.AddCommand(requestCmd)
rootCmd.AddCommand(waitCmd)
rootCmd.AddCommand(version.CobraCommand())
rootCmd.AddCommand(iptables.GetCommand())
rootCmd.AddCommand(cleaniptables.GetCommand())
rootCmd.AddCommand(collateral.CobraCommand(rootCmd, &doc.GenManHeader{
Title: "Istio Pilot Agent",
Section: "pilot-agent CLI",
Manual: "Istio Pilot Agent",
}))
return rootCmd
}
func newProxyCommand() *cobra.Command {
return &cobra.Command{
Use: "proxy",
Short: "XDS proxy agent",
FParseErrWhitelist: cobra.FParseErrWhitelist{
// Allow unknown flags for backward-compatibility.
UnknownFlags: true,
},
PersistentPreRunE: configureLogging,
RunE: func(c *cobra.Command, args []string) error {
cmd.PrintFlags(c.Flags())
log.Infof("Version %s", version.Info.String())
logLimits()
proxy, err := initProxy(args)
if err != nil {
return err
}
proxyConfig, err := config.ConstructProxyConfig(proxyArgs.MeshConfigFile, proxyArgs.ServiceCluster, options.ProxyConfigEnv, proxyArgs.Concurrency, proxy)
if err != nil {
return fmt.Errorf("failed to get proxy config: %v", err)
}
if out, err := protomarshal.ToYAML(proxyConfig); err != nil {
log.Infof("Failed to serialize to YAML: %v", err)
} else {
log.Infof("Effective config: %s", out)
}
secOpts, err := options.NewSecurityOptions(proxyConfig, proxyArgs.StsPort, proxyArgs.TokenManagerPlugin)
if err != nil {
return err
}
// If security token service (STS) port is not zero, start STS server and
// listen on STS port for STS requests. For STS, see
// https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16.
// STS is used for stackdriver or other Envoy services using google gRPC.
if proxyArgs.StsPort > 0 {
stsServer, err := initStsServer(proxy, secOpts.TokenManager)
if err != nil {
return err
}
defer stsServer.Stop()
}
// If we are using a custom template file (for control plane proxy, for example), configure this.
if proxyArgs.TemplateFile != "" && proxyConfig.CustomConfigFile == "" {
proxyConfig.ProxyBootstrapTemplatePath = proxyArgs.TemplateFile
}
envoyOptions := envoy.ProxyConfig{
LogLevel: proxyArgs.ProxyLogLevel,
ComponentLogLevel: proxyArgs.ProxyComponentLogLevel,
LogAsJSON: loggingOptions.JSONEncoding,
NodeIPs: proxy.IPAddresses,
Sidecar: proxy.Type == model.SidecarProxy,
OutlierLogPath: proxyArgs.OutlierLogPath,
}
agentOptions := options.NewAgentOptions(proxy, proxyConfig)
agent := istio_agent.NewAgent(proxyConfig, agentOptions, secOpts, envoyOptions)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
defer agent.Close()
// If a status port was provided, start handling status probes.
if proxyConfig.StatusPort > 0 {
if err := initStatusServer(ctx, proxy, proxyConfig,
agentOptions.EnvoyPrometheusPort, proxyArgs.EnableProfiling, agent); err != nil {
return err
}
}
go iptableslog.ReadNFLOGSocket(ctx)
// On SIGINT or SIGTERM, cancel the context, triggering a graceful shutdown
go cmd.WaitSignalFunc(cancel)
// Start in process SDS, dns server, xds proxy, and Envoy.
wait, err := agent.Run(ctx)
if err != nil {
return err
}
wait()
return nil
},
}
}
func addFlags(proxyCmd *cobra.Command) {
proxyArgs = options.NewProxyArgs()
proxyCmd.PersistentFlags().StringVar(&proxyArgs.DNSDomain, "domain", "",
"DNS domain suffix. If not provided uses ${POD_NAMESPACE}.svc.cluster.local")
proxyCmd.PersistentFlags().StringVar(&proxyArgs.MeshConfigFile, "meshConfig", "./etc/istio/config/mesh",
"File name for Istio mesh configuration. If not specified, a default mesh will be used. This may be overridden by "+
"PROXY_CONFIG environment variable or proxy.istio.io/config annotation.")
proxyCmd.PersistentFlags().IntVar(&proxyArgs.StsPort, "stsPort", 0,
"HTTP Port on which to serve Security Token Service (STS). If zero, STS service will not be provided.")
proxyCmd.PersistentFlags().StringVar(&proxyArgs.TokenManagerPlugin, "tokenManagerPlugin", tokenmanager.GoogleTokenExchange,
"Token provider specific plugin name.")
// DEPRECATED. Flags for proxy configuration
proxyCmd.PersistentFlags().StringVar(&proxyArgs.ServiceCluster, "serviceCluster", constants.ServiceClusterName, "Service cluster")
// Log levels are provided by the library https://github.com/gabime/spdlog, used by Envoy.
proxyCmd.PersistentFlags().StringVar(&proxyArgs.ProxyLogLevel, "proxyLogLevel", "warning,misc:error",
fmt.Sprintf("The log level used to start the Envoy proxy (choose from {%s, %s, %s, %s, %s, %s, %s})."+
"Level may also include one or more scopes, such as 'info,misc:error,upstream:debug'",
"trace", "debug", "info", "warning", "error", "critical", "off"))
proxyCmd.PersistentFlags().IntVar(&proxyArgs.Concurrency, "concurrency", 0, "number of worker threads to run")
// See https://www.envoyproxy.io/docs/envoy/latest/operations/cli#cmdoption-component-log-level
proxyCmd.PersistentFlags().StringVar(&proxyArgs.ProxyComponentLogLevel, "proxyComponentLogLevel", "",
"The component log level used to start the Envoy proxy. Deprecated, use proxyLogLevel instead")
proxyCmd.PersistentFlags().StringVar(&proxyArgs.TemplateFile, "templateFile", "",
"Go template bootstrap config")
proxyCmd.PersistentFlags().StringVar(&proxyArgs.OutlierLogPath, "outlierLogPath", "",
"The log path for outlier detection")
proxyCmd.PersistentFlags().BoolVar(&proxyArgs.EnableProfiling, "profiling", true,
"Enable profiling via web interface host:port/debug/pprof/.")
}
func initStatusServer(ctx context.Context, proxy *model.Proxy, proxyConfig *meshconfig.ProxyConfig,
envoyPrometheusPort int, enableProfiling bool, agent *istio_agent.Agent,
) error {
o := options.NewStatusServerOptions(proxy, proxyConfig, agent)
o.EnvoyPrometheusPort = envoyPrometheusPort
o.EnableProfiling = enableProfiling
o.Context = ctx
statusServer, err := status.NewServer(*o)
if err != nil {
return err
}
go statusServer.Run(ctx)
return nil
}
func initStsServer(proxy *model.Proxy, tokenManager security.TokenManager) (*stsserver.Server, error) {
localHostAddr := localHostIPv4
if proxy.IsIPv6() {
localHostAddr = localHostIPv6
} else {
// if not ipv6-only, it can be ipv4-only or dual-stack
// let InstanceIP decide the localhost
netIP, _ := netip.ParseAddr(options.InstanceIPVar.Get())
if netIP.Is6() && !netIP.IsLinkLocalUnicast() {
localHostAddr = localHostIPv6
}
}
stsServer, err := stsserver.NewServer(stsserver.Config{
LocalHostAddr: localHostAddr,
LocalPort: proxyArgs.StsPort,
}, tokenManager)
if err != nil {
return nil, err
}
return stsServer, nil
}
func getDNSDomain(podNamespace, domain string) string {
if len(domain) == 0 {
domain = podNamespace + ".svc." + constants.DefaultClusterLocalDomain
}
return domain
}
func configureLogging(_ *cobra.Command, _ []string) error {
if err := log.Configure(loggingOptions); err != nil {
return err
}
return nil
}
func initProxy(args []string) (*model.Proxy, error) {
proxy := &model.Proxy{
Type: model.SidecarProxy,
}
if len(args) > 0 {
proxy.Type = model.NodeType(args[0])
if !model.IsApplicationNodeType(proxy.Type) {
return nil, fmt.Errorf("Invalid proxy Type: " + string(proxy.Type))
}
}
podIP, _ := netip.ParseAddr(options.InstanceIPVar.Get()) // protobuf encoding of IP_ADDRESS type
if podIP.IsValid() {
proxy.IPAddresses = []string{podIP.String()}
}
// Obtain all the IPs from the node
if ipAddrs, ok := network.GetPrivateIPs(context.Background()); ok {
if len(proxy.IPAddresses) == 1 {
for _, ip := range ipAddrs {
// prevent duplicate ips, the first one must be the pod ip
// as we pick the first ip as pod ip in istiod
if proxy.IPAddresses[0] != ip {
proxy.IPAddresses = append(proxy.IPAddresses, ip)
}
}
} else {
proxy.IPAddresses = append(proxy.IPAddresses, ipAddrs...)
}
}
// No IP addresses provided, append 127.0.0.1 for ipv4 and ::1 for ipv6
if len(proxy.IPAddresses) == 0 {
proxy.IPAddresses = append(proxy.IPAddresses, localHostIPv4, localHostIPv6)
}
// Apply exclusions from traffic.sidecar.istio.io/excludeInterfaces
proxy.IPAddresses = applyExcludeInterfaces(proxy.IPAddresses)
// After IP addresses are set, let us discover IPMode.
proxy.DiscoverIPMode()
// Extract pod variables.
proxy.ID = proxyArgs.PodName + "." + proxyArgs.PodNamespace
// If not set, set a default based on platform - podNamespace.svc.cluster.local for
// K8S
proxy.DNSDomain = getDNSDomain(proxyArgs.PodNamespace, proxyArgs.DNSDomain)
log.WithLabels("ips", proxy.IPAddresses, "type", proxy.Type, "id", proxy.ID, "domain", proxy.DNSDomain).Info("Proxy role")
return proxy, nil
}
func applyExcludeInterfaces(ifaces []string) []string {
// Get list of excluded interfaces from pod annotation
// TODO: Discuss other input methods such as env, flag (ssuvasanth)
annotations, err := bootstrap.ReadPodAnnotations("")
if err != nil {
log.Debugf("Reading podInfoAnnotations file to get excludeInterfaces was unsuccessful. Continuing without exclusions. msg: %v", err)
return ifaces
}
value, ok := annotations[annotation.SidecarTrafficExcludeInterfaces.Name]
if !ok {
log.Debugf("ExcludeInterfaces annotation is not present. Proxy IPAddresses: %v", ifaces)
return ifaces
}
exclusions := strings.Split(value, ",")
// Find IP addr of excluded interfaces and add to a map for instant lookup
exclusionMap := sets.New[string]()
for _, ifaceName := range exclusions {
iface, err := net.InterfaceByName(ifaceName)
if err != nil {
log.Warnf("Unable to get interface %s: %v", ifaceName, err)
continue
}
addrs, err := iface.Addrs()
if err != nil {
log.Warnf("Unable to get IP addr(s) of interface %s: %v", ifaceName, err)
continue
}
for _, addr := range addrs {
// Get IP only
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
default:
continue
}
// handling ipv4 wrapping in ipv6
ipAddr, okay := netip.AddrFromSlice(ip)
if !okay {
continue
}
unwrapAddr := ipAddr.Unmap()
if !unwrapAddr.IsValid() || unwrapAddr.IsLoopback() || unwrapAddr.IsLinkLocalUnicast() || unwrapAddr.IsLinkLocalMulticast() || unwrapAddr.IsUnspecified() {
continue
}
// Add to map
exclusionMap.Insert(unwrapAddr.String())
}
}
// Remove excluded IP addresses from the input IP addresses list.
var selectedInterfaces []string
for _, ip := range ifaces {
if exclusionMap.Contains(ip) {
log.Infof("Excluding ip %s from proxy IPaddresses list", ip)
continue
}
selectedInterfaces = append(selectedInterfaces, ip)
}
return selectedInterfaces
}
func logLimits() {
out, err := exec.Command("bash", "-c", "ulimit -n").Output()
outStr := strings.TrimSpace(string(out))
if err != nil {
log.Warnf("failed running ulimit command: %v", outStr)
} else {
log.Infof("Maximum file descriptors (ulimit -n): %v", outStr)
}
} | meshconfig "istio.io/api/mesh/v1alpha1" | random_line_split |
EventForm.js | import React from "react";
import { useHistory } from "react-router";
import * as Yup from "yup";
import { Formik } from "formik";
import Map from "../mapbox/mapbox";
import axios from "axios";
import {
Box,
Button,
Card,
CardContent,
CardHeader,
Divider,
FormHelperText,
Grid,
TextField,
Typography,
makeStyles,
} from "@material-ui/core";
import "date-fns";
import DateFnsUtils from "@date-io/date-fns";
import {
MuiPickersUtilsProvider,
KeyboardTimePicker,
KeyboardDatePicker,
} from "@material-ui/pickers";
// <==========AutoComplete imports and functions ===========>
import Autocomplete from "@material-ui/lab/Autocomplete";
import LocationOnIcon from "@material-ui/icons/LocationOn";
import parse from "autosuggest-highlight/parse";
import throttle from "lodash/throttle";
function | (src, position, id) {
if (!position) {
return;
}
const script = document.createElement("script");
script.setAttribute("async", "");
script.setAttribute("id", id);
script.src = src;
position.appendChild(script);
}
const autocompleteService = { current: null };
// <===================>
const useStyles = makeStyles(theme => ({
root: {},
icon: {
color: theme.palette.text.secondary,
marginRight: theme.spacing(2),
},
}));
function EventForm({ className, ...rest }) {
const classes = useStyles();
const history = useHistory();
// const { enqueueSnackbar } = useSnackbar();
const [selectedDate, setSelectedDate] = React.useState(new Date());
// <====================Helper Funtions for AutoFill===========>
// eslint-disable-next-line
const [value, setValue] = React.useState(null);
const [inputValue, setInputValue] = React.useState("");
const [options, setOptions] = React.useState([]);
const loaded = React.useRef(false);
if (typeof window !== "undefined" && !loaded.current) {
if (!document.querySelector("#google-maps")) {
loadScript(
`https://maps.googleapis.com/maps/api/js?key=${process.env.REACT_APP_GOOGLE_MAP_APP_API_KEY}&libraries=places`,
document.querySelector("head"),
"google-maps"
);
}
loaded.current = true;
}
const fetch = React.useMemo(
() =>
throttle((request, callback) => {
autocompleteService.current.getPlacePredictions(request, callback);
}, 200),
[]
);
React.useEffect(() => {
let active = true;
if (!autocompleteService.current && window.google) {
autocompleteService.current = new window.google.maps.places.AutocompleteService();
}
if (!autocompleteService.current) {
return undefined;
}
if (inputValue === "") {
setOptions(value ? [value] : []);
return undefined;
}
fetch({ input: inputValue }, results => {
if (active) {
let newOptions = [];
if (value) {
newOptions = [value];
}
if (results) {
newOptions = [...newOptions, ...results];
}
setOptions(newOptions);
}
});
return () => {
active = false;
};
}, [value, inputValue, fetch]);
// <===========================================>
const handleDateChange = date => {
setSelectedDate(date);
};
return (
<Formik
enableReinitialize
initialValues={{
name: "",
description: "",
location: "",
date: null,
time: null,
img: "",
}}
validationSchema={Yup.object().shape({
description: Yup.string().max(5000),
name: Yup.string().max(255).required(),
})}
onSubmit={(
values,
{ resetForm, setErrors, setStatus, setSubmitting }
) => {
const { description, date, location, name, time, img } = values;
axios
.post("/api/event", { description, location, date, time, name, img })
.then(res => {
resetForm();
setStatus({ success: true });
history.push("/events");
// enqueueSnackbar("Profile updated", {
// // variant: "success",
// // });
})
.catch(error => {
setStatus({ success: false });
setErrors({ submit: error.message });
setSubmitting(false);
});
}}
>
{({
errors,
handleBlur,
handleChange,
handleSubmit,
isSubmitting,
touched,
values,
setFieldValue,
}) => (
<form onSubmit={handleSubmit} className={classes.root}>
<Grid container spacing={3}>
<Grid item xs={12} lg={8}>
<Card>
<CardContent>
<Box mt={2}>
<TextField
error={Boolean(touched.name && errors.name)}
fullWidth
helperText={touched.name && errors.name}
label="Name"
name="name"
onBlur={handleBlur}
onChange={handleChange}
value={values.name}
variant="outlined"
/>
</Box>
<Box mt={2}>
<TextField
id="description"
error={Boolean(touched.description && errors.description)}
fullWidth
helperText={touched.description && errors.description}
multiline
rows={5}
label="Event Description"
name="description"
onBlur={handleBlur}
onChange={handleChange}
value={values.description}
variant="outlined"
/>
</Box>
<Box mt={2}>
<TextField
fullWidth
label="Image Url"
name="img"
onBlur={handleBlur}
onChange={handleChange}
value={values.img}
variant="outlined"
/>
</Box>
</CardContent>
</Card>
<Box mt={3}>
<Card>
<CardHeader title="Time & Date" />
<Divider />
<CardContent>
<MuiPickersUtilsProvider utils={DateFnsUtils}>
<Grid container justify="space-evenly">
<KeyboardDatePicker
disableToolbar
disablePast
variant="inline"
format="MM/dd/yyyy"
margin="normal"
name="date"
value={values.date}
id="date-picker-inline"
label="Pick a Date"
onChange={(event, newValue) => {
handleDateChange();
setFieldValue("date", newValue);
}}
KeyboardButtonProps={{
"aria-label": "change date",
}}
/>
<KeyboardTimePicker
margin="normal"
id="time-picker"
label="Select a time"
name="time"
minutesStep={5}
value={selectedDate}
color="secondary"
onChange={(event, newValue) => {
setFieldValue("time", newValue);
}}
KeyboardButtonProps={{
"aria-label": "change time",
}}
/>
</Grid>
</MuiPickersUtilsProvider>
</CardContent>
</Card>
</Box>
<Box mt={3}>
<Card>
<CardHeader title="Location" />
<Divider />
<CardContent>
<Autocomplete
fullWidth
name="location"
getOptionLabel={option =>
typeof option === "string" ? option : option.description
}
filterOptions={x => x}
options={options}
autoComplete
includeInputInList
filterSelectedOptions
onChange={(event, newValue) => {
setOptions(newValue ? [newValue, ...options] : options);
// setValue(newValue.description);
setFieldValue("location", newValue.description);
}}
onInputChange={(event, newInputValue) => {
setInputValue(newInputValue);
}}
renderInput={params => (
<TextField
{...params}
label="Add a location"
variant="outlined"
fullWidth
/>
)}
renderOption={option => {
const matches =
option.structured_formatting
.main_text_matched_substrings;
const parts = parse(
option.structured_formatting.main_text,
matches.map(match => [
match.offset,
match.offset + match.length,
])
);
return (
<Grid container alignItems="center">
<Grid item>
<LocationOnIcon className={classes.icon} />
</Grid>
<Grid item xs>
{parts.map((part, index) => (
<span
key={index}
style={{
fontWeight: part.highlight ? 700 : 400,
}}
>
{part.text}
</span>
))}
<Typography variant="body2" color="textSecondary">
{option.structured_formatting.secondary_text}
</Typography>
</Grid>
</Grid>
);
}}
/>
</CardContent>
</Card>
</Box>
</Grid>
<Grid item xs={12} lg={4}>
<Card>
<CardHeader title="Map" />
<Divider />
<CardContent>
<Box>
<Map />
</Box>
</CardContent>
</Card>
</Grid>
</Grid>
{errors.submit && (
<Box mt={3}>
<FormHelperText error>{errors.submit}</FormHelperText>
</Box>
)}
<Box mt={2}>
<Button
color="secondary"
variant="contained"
type="submit"
disabled={isSubmitting}
>
Create event
</Button>
</Box>
</form>
)}
</Formik>
);
}
export default EventForm;
| loadScript | identifier_name |
EventForm.js | import React from "react";
import { useHistory } from "react-router";
import * as Yup from "yup";
import { Formik } from "formik";
import Map from "../mapbox/mapbox";
import axios from "axios";
import {
Box,
Button,
Card,
CardContent,
CardHeader,
Divider,
FormHelperText,
Grid,
TextField,
Typography,
makeStyles,
} from "@material-ui/core";
import "date-fns";
import DateFnsUtils from "@date-io/date-fns";
import {
MuiPickersUtilsProvider,
KeyboardTimePicker,
KeyboardDatePicker,
} from "@material-ui/pickers";
// <==========AutoComplete imports and functions ===========>
import Autocomplete from "@material-ui/lab/Autocomplete";
import LocationOnIcon from "@material-ui/icons/LocationOn";
import parse from "autosuggest-highlight/parse";
import throttle from "lodash/throttle";
function loadScript(src, position, id) {
if (!position) {
return;
}
const script = document.createElement("script");
script.setAttribute("async", "");
script.setAttribute("id", id);
script.src = src;
position.appendChild(script);
}
const autocompleteService = { current: null };
// <===================>
const useStyles = makeStyles(theme => ({
root: {},
icon: {
color: theme.palette.text.secondary,
marginRight: theme.spacing(2),
},
}));
function EventForm({ className, ...rest }) |
export default EventForm;
| {
const classes = useStyles();
const history = useHistory();
// const { enqueueSnackbar } = useSnackbar();
const [selectedDate, setSelectedDate] = React.useState(new Date());
// <====================Helper Funtions for AutoFill===========>
// eslint-disable-next-line
const [value, setValue] = React.useState(null);
const [inputValue, setInputValue] = React.useState("");
const [options, setOptions] = React.useState([]);
const loaded = React.useRef(false);
if (typeof window !== "undefined" && !loaded.current) {
if (!document.querySelector("#google-maps")) {
loadScript(
`https://maps.googleapis.com/maps/api/js?key=${process.env.REACT_APP_GOOGLE_MAP_APP_API_KEY}&libraries=places`,
document.querySelector("head"),
"google-maps"
);
}
loaded.current = true;
}
const fetch = React.useMemo(
() =>
throttle((request, callback) => {
autocompleteService.current.getPlacePredictions(request, callback);
}, 200),
[]
);
React.useEffect(() => {
let active = true;
if (!autocompleteService.current && window.google) {
autocompleteService.current = new window.google.maps.places.AutocompleteService();
}
if (!autocompleteService.current) {
return undefined;
}
if (inputValue === "") {
setOptions(value ? [value] : []);
return undefined;
}
fetch({ input: inputValue }, results => {
if (active) {
let newOptions = [];
if (value) {
newOptions = [value];
}
if (results) {
newOptions = [...newOptions, ...results];
}
setOptions(newOptions);
}
});
return () => {
active = false;
};
}, [value, inputValue, fetch]);
// <===========================================>
const handleDateChange = date => {
setSelectedDate(date);
};
return (
<Formik
enableReinitialize
initialValues={{
name: "",
description: "",
location: "",
date: null,
time: null,
img: "",
}}
validationSchema={Yup.object().shape({
description: Yup.string().max(5000),
name: Yup.string().max(255).required(),
})}
onSubmit={(
values,
{ resetForm, setErrors, setStatus, setSubmitting }
) => {
const { description, date, location, name, time, img } = values;
axios
.post("/api/event", { description, location, date, time, name, img })
.then(res => {
resetForm();
setStatus({ success: true });
history.push("/events");
// enqueueSnackbar("Profile updated", {
// // variant: "success",
// // });
})
.catch(error => {
setStatus({ success: false });
setErrors({ submit: error.message });
setSubmitting(false);
});
}}
>
{({
errors,
handleBlur,
handleChange,
handleSubmit,
isSubmitting,
touched,
values,
setFieldValue,
}) => (
<form onSubmit={handleSubmit} className={classes.root}>
<Grid container spacing={3}>
<Grid item xs={12} lg={8}>
<Card>
<CardContent>
<Box mt={2}>
<TextField
error={Boolean(touched.name && errors.name)}
fullWidth
helperText={touched.name && errors.name}
label="Name"
name="name"
onBlur={handleBlur}
onChange={handleChange}
value={values.name}
variant="outlined"
/>
</Box>
<Box mt={2}>
<TextField
id="description"
error={Boolean(touched.description && errors.description)}
fullWidth
helperText={touched.description && errors.description}
multiline
rows={5}
label="Event Description"
name="description"
onBlur={handleBlur}
onChange={handleChange}
value={values.description}
variant="outlined"
/>
</Box>
<Box mt={2}>
<TextField
fullWidth
label="Image Url"
name="img"
onBlur={handleBlur}
onChange={handleChange}
value={values.img}
variant="outlined"
/>
</Box>
</CardContent>
</Card>
<Box mt={3}>
<Card>
<CardHeader title="Time & Date" />
<Divider />
<CardContent>
<MuiPickersUtilsProvider utils={DateFnsUtils}>
<Grid container justify="space-evenly">
<KeyboardDatePicker
disableToolbar
disablePast
variant="inline"
format="MM/dd/yyyy"
margin="normal"
name="date"
value={values.date}
id="date-picker-inline"
label="Pick a Date"
onChange={(event, newValue) => {
handleDateChange();
setFieldValue("date", newValue);
}}
KeyboardButtonProps={{
"aria-label": "change date",
}}
/>
<KeyboardTimePicker
margin="normal"
id="time-picker"
label="Select a time"
name="time"
minutesStep={5}
value={selectedDate}
color="secondary"
onChange={(event, newValue) => {
setFieldValue("time", newValue);
}}
KeyboardButtonProps={{
"aria-label": "change time",
}}
/>
</Grid>
</MuiPickersUtilsProvider>
</CardContent>
</Card>
</Box>
<Box mt={3}>
<Card>
<CardHeader title="Location" />
<Divider />
<CardContent>
<Autocomplete
fullWidth
name="location"
getOptionLabel={option =>
typeof option === "string" ? option : option.description
}
filterOptions={x => x}
options={options}
autoComplete
includeInputInList
filterSelectedOptions
onChange={(event, newValue) => {
setOptions(newValue ? [newValue, ...options] : options);
// setValue(newValue.description);
setFieldValue("location", newValue.description);
}}
onInputChange={(event, newInputValue) => {
setInputValue(newInputValue);
}}
renderInput={params => (
<TextField
{...params}
label="Add a location"
variant="outlined"
fullWidth
/>
)}
renderOption={option => {
const matches =
option.structured_formatting
.main_text_matched_substrings;
const parts = parse(
option.structured_formatting.main_text,
matches.map(match => [
match.offset,
match.offset + match.length,
])
);
return (
<Grid container alignItems="center">
<Grid item>
<LocationOnIcon className={classes.icon} />
</Grid>
<Grid item xs>
{parts.map((part, index) => (
<span
key={index}
style={{
fontWeight: part.highlight ? 700 : 400,
}}
>
{part.text}
</span>
))}
<Typography variant="body2" color="textSecondary">
{option.structured_formatting.secondary_text}
</Typography>
</Grid>
</Grid>
);
}}
/>
</CardContent>
</Card>
</Box>
</Grid>
<Grid item xs={12} lg={4}>
<Card>
<CardHeader title="Map" />
<Divider />
<CardContent>
<Box>
<Map />
</Box>
</CardContent>
</Card>
</Grid>
</Grid>
{errors.submit && (
<Box mt={3}>
<FormHelperText error>{errors.submit}</FormHelperText>
</Box>
)}
<Box mt={2}>
<Button
color="secondary"
variant="contained"
type="submit"
disabled={isSubmitting}
>
Create event
</Button>
</Box>
</form>
)}
</Formik>
);
} | identifier_body |
EventForm.js | import React from "react";
import { useHistory } from "react-router";
import * as Yup from "yup";
import { Formik } from "formik";
import Map from "../mapbox/mapbox";
import axios from "axios";
import {
Box,
Button,
Card,
CardContent,
CardHeader,
Divider,
FormHelperText,
Grid,
TextField,
Typography,
makeStyles,
} from "@material-ui/core";
import "date-fns";
import DateFnsUtils from "@date-io/date-fns";
import {
MuiPickersUtilsProvider,
KeyboardTimePicker,
KeyboardDatePicker,
} from "@material-ui/pickers";
// <==========AutoComplete imports and functions ===========>
import Autocomplete from "@material-ui/lab/Autocomplete";
import LocationOnIcon from "@material-ui/icons/LocationOn";
import parse from "autosuggest-highlight/parse";
import throttle from "lodash/throttle";
function loadScript(src, position, id) {
if (!position) {
return;
}
const script = document.createElement("script");
script.setAttribute("async", "");
script.setAttribute("id", id);
script.src = src;
position.appendChild(script);
}
const autocompleteService = { current: null };
// <===================>
const useStyles = makeStyles(theme => ({
root: {},
icon: {
color: theme.palette.text.secondary,
marginRight: theme.spacing(2),
},
}));
function EventForm({ className, ...rest }) {
const classes = useStyles();
const history = useHistory();
// const { enqueueSnackbar } = useSnackbar();
const [selectedDate, setSelectedDate] = React.useState(new Date());
// <====================Helper Funtions for AutoFill===========>
// eslint-disable-next-line
const [value, setValue] = React.useState(null);
const [inputValue, setInputValue] = React.useState("");
const [options, setOptions] = React.useState([]);
const loaded = React.useRef(false);
if (typeof window !== "undefined" && !loaded.current) |
const fetch = React.useMemo(
() =>
throttle((request, callback) => {
autocompleteService.current.getPlacePredictions(request, callback);
}, 200),
[]
);
React.useEffect(() => {
let active = true;
if (!autocompleteService.current && window.google) {
autocompleteService.current = new window.google.maps.places.AutocompleteService();
}
if (!autocompleteService.current) {
return undefined;
}
if (inputValue === "") {
setOptions(value ? [value] : []);
return undefined;
}
fetch({ input: inputValue }, results => {
if (active) {
let newOptions = [];
if (value) {
newOptions = [value];
}
if (results) {
newOptions = [...newOptions, ...results];
}
setOptions(newOptions);
}
});
return () => {
active = false;
};
}, [value, inputValue, fetch]);
// <===========================================>
const handleDateChange = date => {
setSelectedDate(date);
};
return (
<Formik
enableReinitialize
initialValues={{
name: "",
description: "",
location: "",
date: null,
time: null,
img: "",
}}
validationSchema={Yup.object().shape({
description: Yup.string().max(5000),
name: Yup.string().max(255).required(),
})}
onSubmit={(
values,
{ resetForm, setErrors, setStatus, setSubmitting }
) => {
const { description, date, location, name, time, img } = values;
axios
.post("/api/event", { description, location, date, time, name, img })
.then(res => {
resetForm();
setStatus({ success: true });
history.push("/events");
// enqueueSnackbar("Profile updated", {
// // variant: "success",
// // });
})
.catch(error => {
setStatus({ success: false });
setErrors({ submit: error.message });
setSubmitting(false);
});
}}
>
{({
errors,
handleBlur,
handleChange,
handleSubmit,
isSubmitting,
touched,
values,
setFieldValue,
}) => (
<form onSubmit={handleSubmit} className={classes.root}>
<Grid container spacing={3}>
<Grid item xs={12} lg={8}>
<Card>
<CardContent>
<Box mt={2}>
<TextField
error={Boolean(touched.name && errors.name)}
fullWidth
helperText={touched.name && errors.name}
label="Name"
name="name"
onBlur={handleBlur}
onChange={handleChange}
value={values.name}
variant="outlined"
/>
</Box>
<Box mt={2}>
<TextField
id="description"
error={Boolean(touched.description && errors.description)}
fullWidth
helperText={touched.description && errors.description}
multiline
rows={5}
label="Event Description"
name="description"
onBlur={handleBlur}
onChange={handleChange}
value={values.description}
variant="outlined"
/>
</Box>
<Box mt={2}>
<TextField
fullWidth
label="Image Url"
name="img"
onBlur={handleBlur}
onChange={handleChange}
value={values.img}
variant="outlined"
/>
</Box>
</CardContent>
</Card>
<Box mt={3}>
<Card>
<CardHeader title="Time & Date" />
<Divider />
<CardContent>
<MuiPickersUtilsProvider utils={DateFnsUtils}>
<Grid container justify="space-evenly">
<KeyboardDatePicker
disableToolbar
disablePast
variant="inline"
format="MM/dd/yyyy"
margin="normal"
name="date"
value={values.date}
id="date-picker-inline"
label="Pick a Date"
onChange={(event, newValue) => {
handleDateChange();
setFieldValue("date", newValue);
}}
KeyboardButtonProps={{
"aria-label": "change date",
}}
/>
<KeyboardTimePicker
margin="normal"
id="time-picker"
label="Select a time"
name="time"
minutesStep={5}
value={selectedDate}
color="secondary"
onChange={(event, newValue) => {
setFieldValue("time", newValue);
}}
KeyboardButtonProps={{
"aria-label": "change time",
}}
/>
</Grid>
</MuiPickersUtilsProvider>
</CardContent>
</Card>
</Box>
<Box mt={3}>
<Card>
<CardHeader title="Location" />
<Divider />
<CardContent>
<Autocomplete
fullWidth
name="location"
getOptionLabel={option =>
typeof option === "string" ? option : option.description
}
filterOptions={x => x}
options={options}
autoComplete
includeInputInList
filterSelectedOptions
onChange={(event, newValue) => {
setOptions(newValue ? [newValue, ...options] : options);
// setValue(newValue.description);
setFieldValue("location", newValue.description);
}}
onInputChange={(event, newInputValue) => {
setInputValue(newInputValue);
}}
renderInput={params => (
<TextField
{...params}
label="Add a location"
variant="outlined"
fullWidth
/>
)}
renderOption={option => {
const matches =
option.structured_formatting
.main_text_matched_substrings;
const parts = parse(
option.structured_formatting.main_text,
matches.map(match => [
match.offset,
match.offset + match.length,
])
);
return (
<Grid container alignItems="center">
<Grid item>
<LocationOnIcon className={classes.icon} />
</Grid>
<Grid item xs>
{parts.map((part, index) => (
<span
key={index}
style={{
fontWeight: part.highlight ? 700 : 400,
}}
>
{part.text}
</span>
))}
<Typography variant="body2" color="textSecondary">
{option.structured_formatting.secondary_text}
</Typography>
</Grid>
</Grid>
);
}}
/>
</CardContent>
</Card>
</Box>
</Grid>
<Grid item xs={12} lg={4}>
<Card>
<CardHeader title="Map" />
<Divider />
<CardContent>
<Box>
<Map />
</Box>
</CardContent>
</Card>
</Grid>
</Grid>
{errors.submit && (
<Box mt={3}>
<FormHelperText error>{errors.submit}</FormHelperText>
</Box>
)}
<Box mt={2}>
<Button
color="secondary"
variant="contained"
type="submit"
disabled={isSubmitting}
>
Create event
</Button>
</Box>
</form>
)}
</Formik>
);
}
export default EventForm;
| {
if (!document.querySelector("#google-maps")) {
loadScript(
`https://maps.googleapis.com/maps/api/js?key=${process.env.REACT_APP_GOOGLE_MAP_APP_API_KEY}&libraries=places`,
document.querySelector("head"),
"google-maps"
);
}
loaded.current = true;
} | conditional_block |
EventForm.js | import React from "react";
import { useHistory } from "react-router";
import * as Yup from "yup";
import { Formik } from "formik";
import Map from "../mapbox/mapbox";
import axios from "axios";
import {
Box,
Button,
Card,
CardContent,
CardHeader,
Divider,
FormHelperText,
Grid,
TextField,
Typography,
makeStyles,
} from "@material-ui/core";
import "date-fns";
import DateFnsUtils from "@date-io/date-fns";
import {
MuiPickersUtilsProvider,
KeyboardTimePicker,
KeyboardDatePicker,
} from "@material-ui/pickers";
// <==========AutoComplete imports and functions ===========>
import Autocomplete from "@material-ui/lab/Autocomplete";
import LocationOnIcon from "@material-ui/icons/LocationOn";
import parse from "autosuggest-highlight/parse";
import throttle from "lodash/throttle";
function loadScript(src, position, id) {
if (!position) {
return;
}
const script = document.createElement("script");
script.setAttribute("async", "");
script.setAttribute("id", id);
script.src = src;
position.appendChild(script);
}
const autocompleteService = { current: null };
// <===================>
const useStyles = makeStyles(theme => ({
root: {},
icon: {
color: theme.palette.text.secondary,
marginRight: theme.spacing(2),
},
}));
function EventForm({ className, ...rest }) {
const classes = useStyles();
const history = useHistory();
// const { enqueueSnackbar } = useSnackbar();
const [selectedDate, setSelectedDate] = React.useState(new Date());
// <====================Helper Funtions for AutoFill===========>
// eslint-disable-next-line
const [value, setValue] = React.useState(null);
const [inputValue, setInputValue] = React.useState("");
const [options, setOptions] = React.useState([]);
const loaded = React.useRef(false);
if (typeof window !== "undefined" && !loaded.current) {
if (!document.querySelector("#google-maps")) {
loadScript(
`https://maps.googleapis.com/maps/api/js?key=${process.env.REACT_APP_GOOGLE_MAP_APP_API_KEY}&libraries=places`,
document.querySelector("head"),
"google-maps"
);
}
loaded.current = true;
}
const fetch = React.useMemo(
() =>
throttle((request, callback) => {
autocompleteService.current.getPlacePredictions(request, callback);
}, 200),
[]
);
React.useEffect(() => {
let active = true;
if (!autocompleteService.current && window.google) {
autocompleteService.current = new window.google.maps.places.AutocompleteService();
}
if (!autocompleteService.current) {
return undefined;
}
if (inputValue === "") {
setOptions(value ? [value] : []);
return undefined;
}
fetch({ input: inputValue }, results => {
if (active) {
let newOptions = [];
if (value) {
newOptions = [value];
}
if (results) {
newOptions = [...newOptions, ...results];
}
setOptions(newOptions);
}
});
return () => {
active = false;
};
}, [value, inputValue, fetch]);
// <===========================================>
const handleDateChange = date => {
setSelectedDate(date);
};
return (
<Formik
enableReinitialize
initialValues={{
name: "",
description: "",
location: "",
date: null,
time: null,
img: "",
}}
validationSchema={Yup.object().shape({
description: Yup.string().max(5000),
name: Yup.string().max(255).required(),
})}
onSubmit={(
values,
{ resetForm, setErrors, setStatus, setSubmitting }
) => {
const { description, date, location, name, time, img } = values;
axios
.post("/api/event", { description, location, date, time, name, img })
.then(res => {
resetForm();
setStatus({ success: true });
history.push("/events");
// enqueueSnackbar("Profile updated", {
// // variant: "success",
// // });
})
.catch(error => {
setStatus({ success: false });
setErrors({ submit: error.message });
setSubmitting(false);
});
}}
>
{({
errors,
handleBlur,
handleChange,
handleSubmit,
isSubmitting,
touched,
values,
setFieldValue,
}) => (
<form onSubmit={handleSubmit} className={classes.root}>
<Grid container spacing={3}>
<Grid item xs={12} lg={8}>
<Card>
<CardContent>
<Box mt={2}>
<TextField
error={Boolean(touched.name && errors.name)}
fullWidth
helperText={touched.name && errors.name}
label="Name"
name="name"
onBlur={handleBlur}
onChange={handleChange}
value={values.name}
variant="outlined"
/>
</Box>
<Box mt={2}>
<TextField
id="description"
error={Boolean(touched.description && errors.description)}
fullWidth
helperText={touched.description && errors.description}
multiline
rows={5}
label="Event Description"
name="description"
onBlur={handleBlur}
onChange={handleChange}
value={values.description}
variant="outlined"
/>
</Box>
<Box mt={2}>
<TextField
fullWidth
label="Image Url"
name="img"
onBlur={handleBlur}
onChange={handleChange}
value={values.img}
variant="outlined"
/>
</Box>
</CardContent>
</Card>
<Box mt={3}>
<Card>
<CardHeader title="Time & Date" />
<Divider />
<CardContent>
<MuiPickersUtilsProvider utils={DateFnsUtils}>
<Grid container justify="space-evenly">
<KeyboardDatePicker
disableToolbar
disablePast
variant="inline"
format="MM/dd/yyyy"
margin="normal"
name="date"
value={values.date}
id="date-picker-inline"
label="Pick a Date"
onChange={(event, newValue) => {
handleDateChange();
setFieldValue("date", newValue);
}}
KeyboardButtonProps={{
"aria-label": "change date",
}}
/>
<KeyboardTimePicker
margin="normal"
id="time-picker"
label="Select a time"
name="time"
minutesStep={5}
value={selectedDate}
color="secondary"
onChange={(event, newValue) => {
setFieldValue("time", newValue);
}}
KeyboardButtonProps={{
"aria-label": "change time",
}}
/>
</Grid>
</MuiPickersUtilsProvider> | <CardHeader title="Location" />
<Divider />
<CardContent>
<Autocomplete
fullWidth
name="location"
getOptionLabel={option =>
typeof option === "string" ? option : option.description
}
filterOptions={x => x}
options={options}
autoComplete
includeInputInList
filterSelectedOptions
onChange={(event, newValue) => {
setOptions(newValue ? [newValue, ...options] : options);
// setValue(newValue.description);
setFieldValue("location", newValue.description);
}}
onInputChange={(event, newInputValue) => {
setInputValue(newInputValue);
}}
renderInput={params => (
<TextField
{...params}
label="Add a location"
variant="outlined"
fullWidth
/>
)}
renderOption={option => {
const matches =
option.structured_formatting
.main_text_matched_substrings;
const parts = parse(
option.structured_formatting.main_text,
matches.map(match => [
match.offset,
match.offset + match.length,
])
);
return (
<Grid container alignItems="center">
<Grid item>
<LocationOnIcon className={classes.icon} />
</Grid>
<Grid item xs>
{parts.map((part, index) => (
<span
key={index}
style={{
fontWeight: part.highlight ? 700 : 400,
}}
>
{part.text}
</span>
))}
<Typography variant="body2" color="textSecondary">
{option.structured_formatting.secondary_text}
</Typography>
</Grid>
</Grid>
);
}}
/>
</CardContent>
</Card>
</Box>
</Grid>
<Grid item xs={12} lg={4}>
<Card>
<CardHeader title="Map" />
<Divider />
<CardContent>
<Box>
<Map />
</Box>
</CardContent>
</Card>
</Grid>
</Grid>
{errors.submit && (
<Box mt={3}>
<FormHelperText error>{errors.submit}</FormHelperText>
</Box>
)}
<Box mt={2}>
<Button
color="secondary"
variant="contained"
type="submit"
disabled={isSubmitting}
>
Create event
</Button>
</Box>
</form>
)}
</Formik>
);
}
export default EventForm; | </CardContent>
</Card>
</Box>
<Box mt={3}>
<Card> | random_line_split |
main.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Benchstat computes and compares statistics about benchmarks.
//
// This package has moved. Please use https://golang.org/x/perf/cmd/benchstat
package main
import (
"bytes"
"flag"
"fmt"
"html"
"io/ioutil"
"log"
"os"
"strconv"
"strings"
"unicode/utf8"
"rsc.io/benchstat/internal/go-moremath/stats"
)
func usage() {
fmt.Fprintf(os.Stderr, "usage: benchstat [options] old.txt [new.txt] [more.txt ...]\n")
fmt.Fprintf(os.Stderr, "options:\n")
flag.PrintDefaults()
os.Exit(2)
}
var (
flagDeltaTest = flag.String("delta-test", "utest", "significance `test` to apply to delta: utest, ttest, or none")
flagAlpha = flag.Float64("alpha", 0.05, "consider change significant if p < `α`")
flagGeomean = flag.Bool("geomean", false, "print the geometric mean of each file")
flagHTML = flag.Bool("html", false, "print results as an HTML table")
)
var deltaTestNames = map[string]func(old, new *Benchstat) (float64, error){
"none": notest,
"u": utest,
"u-test": utest,
"utest": utest,
"t": ttest,
"t-test": ttest,
"ttest": ttest,
}
type row struct {
cols []string
}
func newRow(cols ...string) *row {
return &row{cols: cols}
}
func (r *row) add(col string) {
r.cols = append(r.cols, col)
}
func (r *row) trim() {
for len(r.cols) > 0 && r.cols[len(r.cols)-1] == "" {
r.cols = r.cols[:len(r.cols)-1]
}
}
func main() {
log.SetPrefix("benchstat: ")
log.SetFlags(0)
flag.Usage = usage
flag.Parse()
deltaTest := deltaTestNames[strings.ToLower(*flagDeltaTest)]
if flag.NArg() < 1 || deltaTest == nil {
flag.Usage()
}
// Read in benchmark data.
c := readFiles(flag.Args())
for _, stat := range c.Stats {
stat.ComputeStats()
}
var tables [][]*row
switch len(c.Configs) {
case 2:
before, after := c.Configs[0], c.Configs[1]
key := BenchKey{}
for _, key.Unit = range c.Units {
var table []*row
metric := metricOf(key.Unit)
for _, key.Benchmark = range c.Benchmarks {
key.Config = before
old := c.Stats[key]
key.Config = after
new := c.Stats[key]
if old == nil || new == nil {
continue
}
if len(table) == 0 {
table = append(table, newRow("name", "old "+metric, "new "+metric, "delta"))
}
pval, testerr := deltaTest(old, new)
scaler := newScaler(old.Mean, old.Unit)
row := newRow(key.Benchmark, old.Format(scaler), new.Format(scaler), "~ ")
if testerr == stats.ErrZeroVariance {
row.add("(zero variance)")
} else if testerr == stats.ErrSampleSize {
row.add("(too few samples)")
} else if testerr == stats.ErrSamplesEqual {
row.add("(all equal)")
} else if testerr != nil {
row.add(fmt.Sprintf("(%s)", testerr))
} else if pval < *flagAlpha {
row.cols[3] = fmt.Sprintf("%+.2f%%", ((new.Mean/old.Mean)-1.0)*100.0)
}
if len(row.cols) == 4 && pval != -1 {
row.add(fmt.Sprintf("(p=%0.3f n=%d+%d)", pval, len(old.RValues), len(new.RValues)))
}
table = append(table, row)
}
if len(table) > 0 {
table = addGeomean(table, c, key.Unit, true)
tables = append(tables, table)
}
}
default:
key := BenchKey{}
for _, key.Unit = range c.Units {
var table []*row
metric := metricOf(key.Unit)
if len(c.Configs) > 1 {
hdr := newRow("name \\ " + metric)
for _, config := range c.Configs {
hdr.add(config)
}
table = append(table, hdr)
} else {
table = append(table, newRow("name", metric))
}
for _, key.Benchmark = range c.Benchmarks {
row := newRow(key.Benchmark)
var scaler func(float64) string
for _, key.Config = range c.Configs {
stat := c.Stats[key]
if stat == nil {
row.add("")
continue
}
if scaler == nil {
scaler = newScaler(stat.Mean, stat.Unit)
}
row.add(stat.Format(scaler))
}
row.trim()
if len(row.cols) > 1 {
table = append(table, row)
}
}
table = addGeomean(table, c, key.Unit, false)
tables = append(tables, table)
}
}
numColumn := 0
for _, table := range tables {
for _, row := range table {
if numColumn < len(row.cols) {
numColumn = len(row.cols)
}
}
}
max := make([]int, numColumn)
for _, table := range tables {
for _, row := range table {
for i, s := range row.cols {
n := utf8.RuneCountInString(s)
if max[i] < n {
max[i] = n
}
}
}
}
var buf bytes.Buffer
for i, table := range tables {
if i > 0 {
fmt.Fprintf(&buf, "\n")
}
if *flagHTML {
fmt.Fprintf(&buf, "<style>.benchstat tbody td:nth-child(1n+2) { text-align: right; padding: 0em 1em; }</style>\n")
fmt.Fprintf(&buf, "<table class='benchstat'>\n")
printRow := func(row *row, tag string) {
fmt.Fprintf(&buf, "<tr>")
for _, cell := range row.cols {
fmt.Fprintf(&buf, "<%s>%s</%s>", tag, html.EscapeString(cell), tag)
}
fmt.Fprintf(&buf, "\n")
}
printRow(table[0], "th")
for _, row := range table[1:] {
printRow(row, "td")
}
fmt.Fprintf(&buf, "</table>\n")
continue
}
// headings
row := table[0]
for i, s := range row.cols {
switch i {
case 0:
fmt.Fprintf(&buf, "%-*s", max[i], s)
default:
fmt.Fprintf(&buf, " %-*s", max[i], s)
case len(row.cols) - 1:
fmt.Fprintf(&buf, " %s\n", s)
}
}
// data
for _, row := range table[1:] {
for i, s := range row.cols {
switch i {
case 0:
fmt.Fprintf(&buf, "%-*s", max[i], s)
default:
if i == len(row.cols)-1 && len(s) > 0 && s[0] == '(' {
// Left-align p value.
fmt.Fprintf(&buf, " %s", s)
break
}
fmt.Fprintf(&buf, " %*s", max[i], s)
}
}
fmt.Fprintf(&buf, "\n")
}
}
os.Stdout.Write(buf.Bytes())
}
func addGeomean(table []*row, c *Collection, unit string, delta bool) []*row {
if !*flagGeomean {
return table
}
row := newRow("[Geo mean]")
key := BenchKey{Unit: unit}
geomeans := []float64{}
for _, key.Config = range c.Configs {
var means []float64
for _, key.Benchmark = range c.Benchmarks {
stat := c.Stats[key]
if stat != nil {
means = append(means, stat.Mean)
}
}
if len(means) == 0 {
row.add("")
delta = false
} else {
geomean := stats.GeoMean(means)
geomeans = append(geomeans, geomean)
row.add(newScaler(geomean, unit)(geomean) + " ")
}
}
if delta {
row.add(fmt.Sprintf("%+.2f%%", ((geomeans[1]/geomeans[0])-1.0)*100.0))
}
return append(table, row)
}
func timeScaler(ns float64) func(float64) string {
var format string
var scale float64
switch x := ns / 1e9; {
case x >= 99.5:
format, scale = "%.0fs", 1
case x >= 9.95:
format, scale = "%.1fs", 1
case x >= 0.995:
format, scale = "%.2fs", 1
case x >= 0.0995:
format, scale = "%.0fms", 1000
case x >= 0.00995:
format, scale = "%.1fms", 1000
case x >= 0.000995:
format, scale = "%.2fms", 1000
case x >= 0.0000995:
format, scale = "%.0fµs", 1000*1000
case x >= 0.00000995:
format, scale = "%.1fµs", 1000*1000
case x >= 0.000000995:
format, scale = "%.2fµs", 1000*1000
case x >= 0.0000000995:
format, scale = "%.0fns", 1000*1000*1000
case x >= 0.00000000995:
format, scale = "%.1fns", 1000*1000*1000
default:
format, scale = "%.2fns", 1000*1000*1000
}
return func(ns float64) string {
return fmt.Sprintf(format, ns/1e9*scale)
}
}
func newScaler(val float64, unit string) func(float64) string {
if unit == "ns/op" {
return timeScaler(val)
}
var format string
var scale float64
var suffix string
prescale := 1.0
if unit == "MB/s" {
prescale = 1e6
}
switch x := val * prescale; {
case x >= 99500000000000:
format, scale, suffix = "%.0f", 1e12, "T"
case x >= 9950000000000:
format, scale, suffix = "%.1f", 1e12, "T"
case x >= 995000000000:
format, scale, suffix = "%.2f", 1e12, "T"
case x >= 99500000000:
format, scale, suffix = "%.0f", 1e9, "G"
case x >= 9950000000:
format, scale, suffix = "%.1f", 1e9, "G"
case x >= 995000000:
format, scale, suffix = "%.2f", 1e9, "G"
case x >= 99500000:
format, scale, suffix = "%.0f", 1e6, "M"
case x >= 9950000:
format, scale, suffix = "%.1f", 1e6, "M"
case x >= 995000:
format, scale, suffix = "%.2f", 1e6, "M"
case x >= 99500:
format, scale, suffix = "%.0f", 1e3, "k"
case x >= 9950:
format, scale, suffix = "%.1f", 1e3, "k"
case x >= 995:
format, scale, suffix = "%.2f", 1e3, "k"
case x >= 99.5:
format, scale, suffix = "%.0f", 1, ""
case x >= 9.95:
format, scale, suffix = "%.1f", 1, ""
default:
format, scale, suffix = "%.2f", 1, ""
}
if unit == "B/op" {
suffix += "B"
}
if unit == "MB/s" {
suffix += "B/s"
}
scale /= prescale
return func(val float64) string {
return fmt.Sprintf(format+suffix, val/scale)
}
}
func (b *Benchstat) Format(scaler func(float64) string) string {
diff := 1 - b.Min/b.Mean
if d := b.Max/b.Mean - 1; d > diff {
diff = d
}
s := scaler(b.Mean)
if b.Mean == 0 {
s += " "
} else {
s = fmt.Sprintf("%s ±%3s", s, fmt.Sprintf("%.0f%%", diff*100.0))
}
return s
}
// ComputeStats updates the derived statistics in s from the raw
// samples in s.Values.
func (stat *Benchstat) ComputeStats() {
// Discard outliers.
values := stats.Sample{Xs: stat.Values}
q1, q3 := values.Percentile(0.25), values.Percentile(0.75)
lo, hi := q1-1.5*(q3-q1), q3+1.5*(q3-q1)
for _, value := range stat.Values {
if lo <= value && value <= hi {
stat.RValues = append(stat.RValues, value)
}
}
// Compute statistics of remaining data.
stat.Min, stat.Max = stats.Bounds(stat.RValues)
stat.Mean = stats.Mean(stat.RValues)
}
// A Benchstat is the metrics along one axis (e.g., ns/op or MB/s)
// for all runs of a specific benchmark.
type Benchstat struct {
Unit string
Values []float64 // metrics
RValues []float64 // metrics with outliers removed
Min float64 // min of RValues
Mean float64 // mean of RValues
Max float64 // max of RValues
}
// A BenchKey identifies one metric (e.g., "ns/op", "B/op") from one
// benchmark (function name sans "Benchmark" prefix) in one
// configuration (input file name).
type BenchKey struct {
Config, Benchmark, Unit string
}
type Collection struct {
Stats map[BenchKey]*Benchstat
// Configs, Benchmarks, and Units give the set of configs,
// benchmarks, and units from the keys in Stats in an order
// meant to match the order the benchmarks were read in.
Configs, Benchmarks, Units []string
}
func (c *Collection) AddStat(key BenchKey) *Benchstat {
if stat, ok := c.Stats[key]; ok {
return stat
}
addString := func(strings *[]string, add string) {
for _, s := range *strings {
if s == add {
return
}
}
*strings = append(*strings, add)
}
addString(&c.Configs, key.Config)
addString(&c.Benchmarks, key.Benchmark)
addString(&c.Units, key.Unit)
stat := &Benchstat{Unit: key.Unit}
c.Stats[key] = stat
return stat
}
// readFiles reads a set of benchmark files.
func readFiles(files []string) *Collection {
c := Collection{Stats: make(map[BenchKey]*Benchstat)}
for _, file := range files {
readFile(file, &c)
}
return &c
}
// readFile reads a set of benchmarks from a file in to a Collection.
func readFile(file string, c *Collection) {
c.Configs = append(c.Configs, file)
key := BenchKey{Config: file}
text, err := ioutil.ReadFile(file)
if err != nil {
log.Fatal(err)
}
for _, line := range strings.Split(string(text), "\n") {
f := strings.Fields(line)
if len(f) < 4 {
continue
}
name := f[0]
if !strings.HasPrefix(name, "Benchmark") {
continue
}
name = strings.TrimPrefix(name, "Benchmark")
n, _ := strconv.Atoi(f[1])
if n == 0 {
continue
}
key.Benchmark = name
for i := 2; i+2 <= len(f); i += 2 {
val, err := strconv.ParseFloat(f[i], 64)
if err != nil {
continue
}
key.Unit = f[i+1]
stat := c.AddStat(key)
stat.Values = append(stat.Values, val)
}
}
}
func metricOf(unit string) string {
switch unit {
case "ns/op":
return "time/op"
case "B/op":
return "alloc/op"
case "MB/s":
return "speed"
default:
return unit
}
}
// Significance tests.
func notest(old, new *Benchstat) (pval float64, err error) {
return -1, nil
}
func ttest | new *Benchstat) (pval float64, err error) {
t, err := stats.TwoSampleWelchTTest(stats.Sample{Xs: old.RValues}, stats.Sample{Xs: new.RValues}, stats.LocationDiffers)
if err != nil {
return -1, err
}
return t.P, nil
}
func utest(old, new *Benchstat) (pval float64, err error) {
u, err := stats.MannWhitneyUTest(old.RValues, new.RValues, stats.LocationDiffers)
if err != nil {
return -1, err
}
return u.P, nil
}
| (old, | identifier_name |
main.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Benchstat computes and compares statistics about benchmarks.
//
// This package has moved. Please use https://golang.org/x/perf/cmd/benchstat
package main
import (
"bytes"
"flag"
"fmt"
"html"
"io/ioutil"
"log"
"os"
"strconv"
"strings"
"unicode/utf8"
"rsc.io/benchstat/internal/go-moremath/stats"
)
func usage() {
fmt.Fprintf(os.Stderr, "usage: benchstat [options] old.txt [new.txt] [more.txt ...]\n")
fmt.Fprintf(os.Stderr, "options:\n")
flag.PrintDefaults()
os.Exit(2)
}
var (
flagDeltaTest = flag.String("delta-test", "utest", "significance `test` to apply to delta: utest, ttest, or none")
flagAlpha = flag.Float64("alpha", 0.05, "consider change significant if p < `α`")
flagGeomean = flag.Bool("geomean", false, "print the geometric mean of each file")
flagHTML = flag.Bool("html", false, "print results as an HTML table")
)
var deltaTestNames = map[string]func(old, new *Benchstat) (float64, error){
"none": notest,
"u": utest,
"u-test": utest,
"utest": utest,
"t": ttest,
"t-test": ttest,
"ttest": ttest,
}
type row struct {
cols []string
}
func newRow(cols ...string) *row {
return &row{cols: cols}
}
func (r *row) add(col string) {
r.cols = append(r.cols, col)
}
func (r *row) trim() {
for len(r.cols) > 0 && r.cols[len(r.cols)-1] == "" {
r.cols = r.cols[:len(r.cols)-1]
}
}
func main() {
log.SetPrefix("benchstat: ")
log.SetFlags(0)
flag.Usage = usage
flag.Parse()
deltaTest := deltaTestNames[strings.ToLower(*flagDeltaTest)]
if flag.NArg() < 1 || deltaTest == nil {
flag.Usage()
}
// Read in benchmark data.
c := readFiles(flag.Args())
for _, stat := range c.Stats {
stat.ComputeStats()
}
var tables [][]*row
switch len(c.Configs) {
case 2:
before, after := c.Configs[0], c.Configs[1]
key := BenchKey{}
for _, key.Unit = range c.Units {
var table []*row
metric := metricOf(key.Unit)
for _, key.Benchmark = range c.Benchmarks {
key.Config = before
old := c.Stats[key]
key.Config = after
new := c.Stats[key]
if old == nil || new == nil {
continue
}
if len(table) == 0 {
table = append(table, newRow("name", "old "+metric, "new "+metric, "delta"))
}
pval, testerr := deltaTest(old, new)
scaler := newScaler(old.Mean, old.Unit)
row := newRow(key.Benchmark, old.Format(scaler), new.Format(scaler), "~ ")
if testerr == stats.ErrZeroVariance {
row.add("(zero variance)")
} else if testerr == stats.ErrSampleSize {
row.add("(too few samples)")
} else if testerr == stats.ErrSamplesEqual {
row.add("(all equal)")
} else if testerr != nil {
row.add(fmt.Sprintf("(%s)", testerr))
} else if pval < *flagAlpha {
row.cols[3] = fmt.Sprintf("%+.2f%%", ((new.Mean/old.Mean)-1.0)*100.0)
}
if len(row.cols) == 4 && pval != -1 {
row.add(fmt.Sprintf("(p=%0.3f n=%d+%d)", pval, len(old.RValues), len(new.RValues)))
}
table = append(table, row)
}
if len(table) > 0 {
table = addGeomean(table, c, key.Unit, true)
tables = append(tables, table)
}
}
default:
key := BenchKey{}
for _, key.Unit = range c.Units {
var table []*row
metric := metricOf(key.Unit)
if len(c.Configs) > 1 {
hdr := newRow("name \\ " + metric)
for _, config := range c.Configs {
hdr.add(config)
}
table = append(table, hdr)
} else {
table = append(table, newRow("name", metric))
}
for _, key.Benchmark = range c.Benchmarks {
row := newRow(key.Benchmark)
var scaler func(float64) string
for _, key.Config = range c.Configs {
stat := c.Stats[key]
if stat == nil {
row.add("")
continue
}
if scaler == nil {
scaler = newScaler(stat.Mean, stat.Unit)
}
row.add(stat.Format(scaler))
}
row.trim()
if len(row.cols) > 1 {
table = append(table, row)
}
}
table = addGeomean(table, c, key.Unit, false)
tables = append(tables, table)
}
}
numColumn := 0
for _, table := range tables {
for _, row := range table {
if numColumn < len(row.cols) {
numColumn = len(row.cols)
}
}
}
max := make([]int, numColumn)
for _, table := range tables {
for _, row := range table {
for i, s := range row.cols {
n := utf8.RuneCountInString(s)
if max[i] < n {
max[i] = n
}
}
}
}
var buf bytes.Buffer
for i, table := range tables {
if i > 0 {
fmt.Fprintf(&buf, "\n")
}
if *flagHTML {
fmt.Fprintf(&buf, "<style>.benchstat tbody td:nth-child(1n+2) { text-align: right; padding: 0em 1em; }</style>\n")
fmt.Fprintf(&buf, "<table class='benchstat'>\n")
printRow := func(row *row, tag string) {
fmt.Fprintf(&buf, "<tr>")
for _, cell := range row.cols {
fmt.Fprintf(&buf, "<%s>%s</%s>", tag, html.EscapeString(cell), tag)
}
fmt.Fprintf(&buf, "\n")
}
printRow(table[0], "th")
for _, row := range table[1:] {
printRow(row, "td")
}
fmt.Fprintf(&buf, "</table>\n")
continue
}
// headings
row := table[0]
for i, s := range row.cols {
switch i {
case 0:
fmt.Fprintf(&buf, "%-*s", max[i], s)
default:
fmt.Fprintf(&buf, " %-*s", max[i], s)
case len(row.cols) - 1:
fmt.Fprintf(&buf, " %s\n", s)
}
}
// data
for _, row := range table[1:] {
for i, s := range row.cols {
switch i {
case 0:
fmt.Fprintf(&buf, "%-*s", max[i], s)
default:
if i == len(row.cols)-1 && len(s) > 0 && s[0] == '(' {
// Left-align p value.
fmt.Fprintf(&buf, " %s", s)
break
}
fmt.Fprintf(&buf, " %*s", max[i], s)
}
}
fmt.Fprintf(&buf, "\n")
}
}
os.Stdout.Write(buf.Bytes())
}
func addGeomean(table []*row, c *Collection, unit string, delta bool) []*row {
if !*flagGeomean {
return table
}
row := newRow("[Geo mean]")
key := BenchKey{Unit: unit}
geomeans := []float64{}
for _, key.Config = range c.Configs {
var means []float64
for _, key.Benchmark = range c.Benchmarks {
stat := c.Stats[key]
if stat != nil {
means = append(means, stat.Mean)
}
}
if len(means) == 0 {
row.add("")
delta = false
} else {
geomean := stats.GeoMean(means)
geomeans = append(geomeans, geomean)
row.add(newScaler(geomean, unit)(geomean) + " ")
}
}
if delta {
row.add(fmt.Sprintf("%+.2f%%", ((geomeans[1]/geomeans[0])-1.0)*100.0))
}
return append(table, row)
}
func timeScaler(ns float64) func(float64) string {
var format string
var scale float64
switch x := ns / 1e9; {
case x >= 99.5:
format, scale = "%.0fs", 1
case x >= 9.95:
format, scale = "%.1fs", 1
case x >= 0.995:
format, scale = "%.2fs", 1
case x >= 0.0995:
format, scale = "%.0fms", 1000
case x >= 0.00995:
format, scale = "%.1fms", 1000
case x >= 0.000995:
format, scale = "%.2fms", 1000
case x >= 0.0000995:
format, scale = "%.0fµs", 1000*1000
case x >= 0.00000995:
format, scale = "%.1fµs", 1000*1000
case x >= 0.000000995:
format, scale = "%.2fµs", 1000*1000
case x >= 0.0000000995:
format, scale = "%.0fns", 1000*1000*1000
case x >= 0.00000000995:
format, scale = "%.1fns", 1000*1000*1000
default:
format, scale = "%.2fns", 1000*1000*1000
}
return func(ns float64) string {
return fmt.Sprintf(format, ns/1e9*scale)
}
}
func newScaler(val float64, unit string) func(float64) string {
if unit == "ns/op" {
return timeScaler(val)
}
var format string
var scale float64
var suffix string
prescale := 1.0
if unit == "MB/s" {
prescale = 1e6
}
switch x := val * prescale; {
case x >= 99500000000000:
format, scale, suffix = "%.0f", 1e12, "T"
case x >= 9950000000000:
format, scale, suffix = "%.1f", 1e12, "T"
case x >= 995000000000:
format, scale, suffix = "%.2f", 1e12, "T"
case x >= 99500000000:
format, scale, suffix = "%.0f", 1e9, "G"
case x >= 9950000000:
format, scale, suffix = "%.1f", 1e9, "G"
case x >= 995000000:
format, scale, suffix = "%.2f", 1e9, "G"
case x >= 99500000:
format, scale, suffix = "%.0f", 1e6, "M"
case x >= 9950000:
format, scale, suffix = "%.1f", 1e6, "M"
case x >= 995000:
format, scale, suffix = "%.2f", 1e6, "M"
case x >= 99500:
format, scale, suffix = "%.0f", 1e3, "k"
case x >= 9950:
format, scale, suffix = "%.1f", 1e3, "k"
case x >= 995:
format, scale, suffix = "%.2f", 1e3, "k"
case x >= 99.5:
format, scale, suffix = "%.0f", 1, ""
case x >= 9.95:
format, scale, suffix = "%.1f", 1, ""
default:
format, scale, suffix = "%.2f", 1, ""
}
if unit == "B/op" {
suffix += "B"
}
if unit == "MB/s" {
suffix += "B/s"
}
scale /= prescale
return func(val float64) string {
return fmt.Sprintf(format+suffix, val/scale)
}
}
func (b *Benchstat) Format(scaler func(float64) string) string {
diff := 1 - b.Min/b.Mean
if d := b.Max/b.Mean - 1; d > diff {
diff = d
}
s := scaler(b.Mean)
if b.Mean == 0 {
s += " "
} else {
s = fmt.Sprintf("%s ±%3s", s, fmt.Sprintf("%.0f%%", diff*100.0))
}
return s
}
// ComputeStats updates the derived statistics in s from the raw
// samples in s.Values.
func (stat *Benchstat) ComputeStats() {
// Discard outliers.
values := stats.Sample{Xs: stat.Values}
q1, q3 := values.Percentile(0.25), values.Percentile(0.75)
lo, hi := q1-1.5*(q3-q1), q3+1.5*(q3-q1)
for _, value := range stat.Values {
if lo <= value && value <= hi {
stat.RValues = append(stat.RValues, value)
}
}
// Compute statistics of remaining data.
stat.Min, stat.Max = stats.Bounds(stat.RValues)
stat.Mean = stats.Mean(stat.RValues)
}
// A Benchstat is the metrics along one axis (e.g., ns/op or MB/s)
// for all runs of a specific benchmark.
type Benchstat struct {
Unit string
Values []float64 // metrics
RValues []float64 // metrics with outliers removed
Min float64 // min of RValues
Mean float64 // mean of RValues
Max float64 // max of RValues
}
// A BenchKey identifies one metric (e.g., "ns/op", "B/op") from one
// benchmark (function name sans "Benchmark" prefix) in one
// configuration (input file name).
type BenchKey struct {
Config, Benchmark, Unit string
}
type Collection struct {
Stats map[BenchKey]*Benchstat
// Configs, Benchmarks, and Units give the set of configs,
// benchmarks, and units from the keys in Stats in an order
// meant to match the order the benchmarks were read in.
Configs, Benchmarks, Units []string
}
func (c *Collection) AddStat(key BenchKey) *Benchstat {
if stat, ok := c.Stats[key]; ok {
return stat
}
addString := func(strings *[]string, add string) {
for _, s := range *strings {
if s == add {
return
}
}
*strings = append(*strings, add)
}
addString(&c.Configs, key.Config)
addString(&c.Benchmarks, key.Benchmark)
addString(&c.Units, key.Unit)
stat := &Benchstat{Unit: key.Unit}
c.Stats[key] = stat
return stat
}
// readFiles reads a set of benchmark files.
func readFiles(files []string) *Collection {
c := Collection{Stats: make(map[BenchKey]*Benchstat)}
for _, file := range files {
readFile(file, &c)
}
return &c
}
// readFile reads a set of benchmarks from a file in to a Collection.
func readFile(file string, c *Collection) {
c.Configs = append(c.Configs, file)
key := BenchKey{Config: file}
text, err := ioutil.ReadFile(file)
if err != nil {
log.Fatal(err)
}
for _, line := range strings.Split(string(text), "\n") {
f := strings.Fields(line)
if len(f) < 4 {
continue
}
name := f[0]
if !strings.HasPrefix(name, "Benchmark") {
continue
}
name = strings.TrimPrefix(name, "Benchmark")
n, _ := strconv.Atoi(f[1])
if n == 0 {
continue
}
key.Benchmark = name
for i := 2; i+2 <= len(f); i += 2 {
val, err := strconv.ParseFloat(f[i], 64)
if err != nil {
continue
}
key.Unit = f[i+1]
stat := c.AddStat(key)
stat.Values = append(stat.Values, val)
}
}
}
func metricOf(unit string) string {
switch unit {
case "ns/op":
return "time/op"
case "B/op":
return "alloc/op"
case "MB/s":
return "speed"
default:
return unit
}
}
// Significance tests.
func notest(old, new *Benchstat) (pval float64, err error) {
return -1, nil
}
func ttest(old, new *Benchstat) (pval float64, err error) {
t, | c utest(old, new *Benchstat) (pval float64, err error) {
u, err := stats.MannWhitneyUTest(old.RValues, new.RValues, stats.LocationDiffers)
if err != nil {
return -1, err
}
return u.P, nil
}
| err := stats.TwoSampleWelchTTest(stats.Sample{Xs: old.RValues}, stats.Sample{Xs: new.RValues}, stats.LocationDiffers)
if err != nil {
return -1, err
}
return t.P, nil
}
fun | identifier_body |
main.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Benchstat computes and compares statistics about benchmarks.
//
// This package has moved. Please use https://golang.org/x/perf/cmd/benchstat
package main
import (
"bytes"
"flag"
"fmt"
"html"
"io/ioutil"
"log"
"os"
"strconv"
"strings"
"unicode/utf8"
"rsc.io/benchstat/internal/go-moremath/stats"
)
func usage() {
fmt.Fprintf(os.Stderr, "usage: benchstat [options] old.txt [new.txt] [more.txt ...]\n")
fmt.Fprintf(os.Stderr, "options:\n")
flag.PrintDefaults()
os.Exit(2)
}
var (
flagDeltaTest = flag.String("delta-test", "utest", "significance `test` to apply to delta: utest, ttest, or none")
flagAlpha = flag.Float64("alpha", 0.05, "consider change significant if p < `α`")
flagGeomean = flag.Bool("geomean", false, "print the geometric mean of each file")
flagHTML = flag.Bool("html", false, "print results as an HTML table")
)
var deltaTestNames = map[string]func(old, new *Benchstat) (float64, error){
"none": notest,
"u": utest,
"u-test": utest,
"utest": utest,
"t": ttest,
"t-test": ttest,
"ttest": ttest,
}
type row struct {
cols []string
}
func newRow(cols ...string) *row {
return &row{cols: cols}
}
func (r *row) add(col string) {
r.cols = append(r.cols, col)
}
func (r *row) trim() {
for len(r.cols) > 0 && r.cols[len(r.cols)-1] == "" {
r.cols = r.cols[:len(r.cols)-1]
}
}
func main() {
log.SetPrefix("benchstat: ")
log.SetFlags(0)
flag.Usage = usage
flag.Parse()
deltaTest := deltaTestNames[strings.ToLower(*flagDeltaTest)]
if flag.NArg() < 1 || deltaTest == nil {
flag.Usage()
}
// Read in benchmark data.
c := readFiles(flag.Args())
for _, stat := range c.Stats {
stat.ComputeStats()
}
var tables [][]*row
switch len(c.Configs) {
case 2:
before, after := c.Configs[0], c.Configs[1]
key := BenchKey{}
for _, key.Unit = range c.Units {
var table []*row
metric := metricOf(key.Unit)
for _, key.Benchmark = range c.Benchmarks {
key.Config = before
old := c.Stats[key]
key.Config = after
new := c.Stats[key]
if old == nil || new == nil {
continue
}
if len(table) == 0 {
table = append(table, newRow("name", "old "+metric, "new "+metric, "delta"))
}
pval, testerr := deltaTest(old, new)
scaler := newScaler(old.Mean, old.Unit)
row := newRow(key.Benchmark, old.Format(scaler), new.Format(scaler), "~ ")
if testerr == stats.ErrZeroVariance {
row.add("(zero variance)")
} else if testerr == stats.ErrSampleSize {
row.add("(too few samples)")
} else if testerr == stats.ErrSamplesEqual {
row.add("(all equal)")
} else if testerr != nil {
row.add(fmt.Sprintf("(%s)", testerr))
} else if pval < *flagAlpha {
row.cols[3] = fmt.Sprintf("%+.2f%%", ((new.Mean/old.Mean)-1.0)*100.0)
}
if len(row.cols) == 4 && pval != -1 {
row.add(fmt.Sprintf("(p=%0.3f n=%d+%d)", pval, len(old.RValues), len(new.RValues)))
}
table = append(table, row)
}
if len(table) > 0 {
table = addGeomean(table, c, key.Unit, true)
tables = append(tables, table)
}
}
default:
key := BenchKey{}
for _, key.Unit = range c.Units {
var table []*row
metric := metricOf(key.Unit)
if len(c.Configs) > 1 {
hdr := newRow("name \\ " + metric)
for _, config := range c.Configs {
hdr.add(config)
}
table = append(table, hdr)
} else {
table = append(table, newRow("name", metric))
}
for _, key.Benchmark = range c.Benchmarks {
row := newRow(key.Benchmark)
var scaler func(float64) string
for _, key.Config = range c.Configs {
stat := c.Stats[key]
if stat == nil {
row.add("")
continue
}
if scaler == nil {
scaler = newScaler(stat.Mean, stat.Unit)
}
row.add(stat.Format(scaler))
}
row.trim()
if len(row.cols) > 1 {
table = append(table, row)
}
}
table = addGeomean(table, c, key.Unit, false)
tables = append(tables, table)
}
}
numColumn := 0
for _, table := range tables {
for _, row := range table {
if numColumn < len(row.cols) {
numColumn = len(row.cols)
}
}
}
max := make([]int, numColumn)
for _, table := range tables {
for _, row := range table {
for i, s := range row.cols {
n := utf8.RuneCountInString(s)
if max[i] < n {
max[i] = n
}
}
}
}
var buf bytes.Buffer
for i, table := range tables {
if i > 0 {
fmt.Fprintf(&buf, "\n")
}
if *flagHTML {
fmt.Fprintf(&buf, "<style>.benchstat tbody td:nth-child(1n+2) { text-align: right; padding: 0em 1em; }</style>\n")
fmt.Fprintf(&buf, "<table class='benchstat'>\n")
printRow := func(row *row, tag string) {
fmt.Fprintf(&buf, "<tr>")
for _, cell := range row.cols {
fmt.Fprintf(&buf, "<%s>%s</%s>", tag, html.EscapeString(cell), tag)
}
fmt.Fprintf(&buf, "\n")
}
printRow(table[0], "th")
for _, row := range table[1:] {
printRow(row, "td")
}
fmt.Fprintf(&buf, "</table>\n")
continue
}
// headings
row := table[0]
for i, s := range row.cols {
switch i {
case 0:
fmt.Fprintf(&buf, "%-*s", max[i], s)
default:
fmt.Fprintf(&buf, " %-*s", max[i], s)
case len(row.cols) - 1:
fmt.Fprintf(&buf, " %s\n", s)
}
}
// data
for _, row := range table[1:] {
for i, s := range row.cols {
switch i {
case 0:
fmt.Fprintf(&buf, "%-*s", max[i], s)
default:
if i == len(row.cols)-1 && len(s) > 0 && s[0] == '(' {
// Left-align p value.
fmt.Fprintf(&buf, " %s", s)
break
}
fmt.Fprintf(&buf, " %*s", max[i], s)
}
}
fmt.Fprintf(&buf, "\n")
}
}
os.Stdout.Write(buf.Bytes())
}
func addGeomean(table []*row, c *Collection, unit string, delta bool) []*row {
if !*flagGeomean {
return table
}
row := newRow("[Geo mean]")
key := BenchKey{Unit: unit}
geomeans := []float64{}
for _, key.Config = range c.Configs {
var means []float64
for _, key.Benchmark = range c.Benchmarks {
stat := c.Stats[key]
if stat != nil {
means = append(means, stat.Mean)
}
}
if len(means) == 0 {
row.add("")
delta = false
} else {
geomean := stats.GeoMean(means)
geomeans = append(geomeans, geomean)
row.add(newScaler(geomean, unit)(geomean) + " ")
}
}
if delta {
row.add(fmt.Sprintf("%+.2f%%", ((geomeans[1]/geomeans[0])-1.0)*100.0))
}
return append(table, row)
}
func timeScaler(ns float64) func(float64) string {
var format string
var scale float64
switch x := ns / 1e9; {
case x >= 99.5:
format, scale = "%.0fs", 1
case x >= 9.95:
format, scale = "%.1fs", 1
case x >= 0.995:
format, scale = "%.2fs", 1
case x >= 0.0995:
format, scale = "%.0fms", 1000
case x >= 0.00995:
format, scale = "%.1fms", 1000
case x >= 0.000995:
format, scale = "%.2fms", 1000
case x >= 0.0000995:
format, scale = "%.0fµs", 1000*1000
case x >= 0.00000995:
format, scale = "%.1fµs", 1000*1000
case x >= 0.000000995:
format, scale = "%.2fµs", 1000*1000
case x >= 0.0000000995:
format, scale = "%.0fns", 1000*1000*1000
case x >= 0.00000000995:
format, scale = "%.1fns", 1000*1000*1000
default:
format, scale = "%.2fns", 1000*1000*1000
}
return func(ns float64) string {
return fmt.Sprintf(format, ns/1e9*scale)
}
}
func newScaler(val float64, unit string) func(float64) string {
if unit == "ns/op" {
return timeScaler(val)
}
var format string
var scale float64
var suffix string
prescale := 1.0
if unit == "MB/s" {
prescale = 1e6
}
switch x := val * prescale; {
case x >= 99500000000000:
format, scale, suffix = "%.0f", 1e12, "T"
case x >= 9950000000000:
format, scale, suffix = "%.1f", 1e12, "T"
case x >= 995000000000:
format, scale, suffix = "%.2f", 1e12, "T"
case x >= 99500000000:
format, scale, suffix = "%.0f", 1e9, "G"
case x >= 9950000000:
format, scale, suffix = "%.1f", 1e9, "G"
case x >= 995000000:
format, scale, suffix = "%.2f", 1e9, "G"
case x >= 99500000:
format, scale, suffix = "%.0f", 1e6, "M"
case x >= 9950000:
format, scale, suffix = "%.1f", 1e6, "M"
case x >= 995000:
format, scale, suffix = "%.2f", 1e6, "M"
case x >= 99500:
format, scale, suffix = "%.0f", 1e3, "k"
case x >= 9950:
format, scale, suffix = "%.1f", 1e3, "k"
case x >= 995:
format, scale, suffix = "%.2f", 1e3, "k"
case x >= 99.5:
format, scale, suffix = "%.0f", 1, ""
case x >= 9.95:
format, scale, suffix = "%.1f", 1, ""
default:
format, scale, suffix = "%.2f", 1, ""
}
if unit == "B/op" {
suffix += "B"
}
if unit == "MB/s" {
suffix += "B/s"
}
scale /= prescale
return func(val float64) string {
return fmt.Sprintf(format+suffix, val/scale) | }
func (b *Benchstat) Format(scaler func(float64) string) string {
diff := 1 - b.Min/b.Mean
if d := b.Max/b.Mean - 1; d > diff {
diff = d
}
s := scaler(b.Mean)
if b.Mean == 0 {
s += " "
} else {
s = fmt.Sprintf("%s ±%3s", s, fmt.Sprintf("%.0f%%", diff*100.0))
}
return s
}
// ComputeStats updates the derived statistics in s from the raw
// samples in s.Values.
func (stat *Benchstat) ComputeStats() {
// Discard outliers.
values := stats.Sample{Xs: stat.Values}
q1, q3 := values.Percentile(0.25), values.Percentile(0.75)
lo, hi := q1-1.5*(q3-q1), q3+1.5*(q3-q1)
for _, value := range stat.Values {
if lo <= value && value <= hi {
stat.RValues = append(stat.RValues, value)
}
}
// Compute statistics of remaining data.
stat.Min, stat.Max = stats.Bounds(stat.RValues)
stat.Mean = stats.Mean(stat.RValues)
}
// A Benchstat is the metrics along one axis (e.g., ns/op or MB/s)
// for all runs of a specific benchmark.
type Benchstat struct {
Unit string
Values []float64 // metrics
RValues []float64 // metrics with outliers removed
Min float64 // min of RValues
Mean float64 // mean of RValues
Max float64 // max of RValues
}
// A BenchKey identifies one metric (e.g., "ns/op", "B/op") from one
// benchmark (function name sans "Benchmark" prefix) in one
// configuration (input file name).
type BenchKey struct {
Config, Benchmark, Unit string
}
type Collection struct {
Stats map[BenchKey]*Benchstat
// Configs, Benchmarks, and Units give the set of configs,
// benchmarks, and units from the keys in Stats in an order
// meant to match the order the benchmarks were read in.
Configs, Benchmarks, Units []string
}
func (c *Collection) AddStat(key BenchKey) *Benchstat {
if stat, ok := c.Stats[key]; ok {
return stat
}
addString := func(strings *[]string, add string) {
for _, s := range *strings {
if s == add {
return
}
}
*strings = append(*strings, add)
}
addString(&c.Configs, key.Config)
addString(&c.Benchmarks, key.Benchmark)
addString(&c.Units, key.Unit)
stat := &Benchstat{Unit: key.Unit}
c.Stats[key] = stat
return stat
}
// readFiles reads a set of benchmark files.
func readFiles(files []string) *Collection {
c := Collection{Stats: make(map[BenchKey]*Benchstat)}
for _, file := range files {
readFile(file, &c)
}
return &c
}
// readFile reads a set of benchmarks from a file in to a Collection.
func readFile(file string, c *Collection) {
c.Configs = append(c.Configs, file)
key := BenchKey{Config: file}
text, err := ioutil.ReadFile(file)
if err != nil {
log.Fatal(err)
}
for _, line := range strings.Split(string(text), "\n") {
f := strings.Fields(line)
if len(f) < 4 {
continue
}
name := f[0]
if !strings.HasPrefix(name, "Benchmark") {
continue
}
name = strings.TrimPrefix(name, "Benchmark")
n, _ := strconv.Atoi(f[1])
if n == 0 {
continue
}
key.Benchmark = name
for i := 2; i+2 <= len(f); i += 2 {
val, err := strconv.ParseFloat(f[i], 64)
if err != nil {
continue
}
key.Unit = f[i+1]
stat := c.AddStat(key)
stat.Values = append(stat.Values, val)
}
}
}
func metricOf(unit string) string {
switch unit {
case "ns/op":
return "time/op"
case "B/op":
return "alloc/op"
case "MB/s":
return "speed"
default:
return unit
}
}
// Significance tests.
func notest(old, new *Benchstat) (pval float64, err error) {
return -1, nil
}
func ttest(old, new *Benchstat) (pval float64, err error) {
t, err := stats.TwoSampleWelchTTest(stats.Sample{Xs: old.RValues}, stats.Sample{Xs: new.RValues}, stats.LocationDiffers)
if err != nil {
return -1, err
}
return t.P, nil
}
func utest(old, new *Benchstat) (pval float64, err error) {
u, err := stats.MannWhitneyUTest(old.RValues, new.RValues, stats.LocationDiffers)
if err != nil {
return -1, err
}
return u.P, nil
} | } | random_line_split |
main.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Benchstat computes and compares statistics about benchmarks.
//
// This package has moved. Please use https://golang.org/x/perf/cmd/benchstat
package main
import (
"bytes"
"flag"
"fmt"
"html"
"io/ioutil"
"log"
"os"
"strconv"
"strings"
"unicode/utf8"
"rsc.io/benchstat/internal/go-moremath/stats"
)
func usage() {
fmt.Fprintf(os.Stderr, "usage: benchstat [options] old.txt [new.txt] [more.txt ...]\n")
fmt.Fprintf(os.Stderr, "options:\n")
flag.PrintDefaults()
os.Exit(2)
}
var (
flagDeltaTest = flag.String("delta-test", "utest", "significance `test` to apply to delta: utest, ttest, or none")
flagAlpha = flag.Float64("alpha", 0.05, "consider change significant if p < `α`")
flagGeomean = flag.Bool("geomean", false, "print the geometric mean of each file")
flagHTML = flag.Bool("html", false, "print results as an HTML table")
)
var deltaTestNames = map[string]func(old, new *Benchstat) (float64, error){
"none": notest,
"u": utest,
"u-test": utest,
"utest": utest,
"t": ttest,
"t-test": ttest,
"ttest": ttest,
}
type row struct {
cols []string
}
func newRow(cols ...string) *row {
return &row{cols: cols}
}
func (r *row) add(col string) {
r.cols = append(r.cols, col)
}
func (r *row) trim() {
for len(r.cols) > 0 && r.cols[len(r.cols)-1] == "" { | }
func main() {
log.SetPrefix("benchstat: ")
log.SetFlags(0)
flag.Usage = usage
flag.Parse()
deltaTest := deltaTestNames[strings.ToLower(*flagDeltaTest)]
if flag.NArg() < 1 || deltaTest == nil {
flag.Usage()
}
// Read in benchmark data.
c := readFiles(flag.Args())
for _, stat := range c.Stats {
stat.ComputeStats()
}
var tables [][]*row
switch len(c.Configs) {
case 2:
before, after := c.Configs[0], c.Configs[1]
key := BenchKey{}
for _, key.Unit = range c.Units {
var table []*row
metric := metricOf(key.Unit)
for _, key.Benchmark = range c.Benchmarks {
key.Config = before
old := c.Stats[key]
key.Config = after
new := c.Stats[key]
if old == nil || new == nil {
continue
}
if len(table) == 0 {
table = append(table, newRow("name", "old "+metric, "new "+metric, "delta"))
}
pval, testerr := deltaTest(old, new)
scaler := newScaler(old.Mean, old.Unit)
row := newRow(key.Benchmark, old.Format(scaler), new.Format(scaler), "~ ")
if testerr == stats.ErrZeroVariance {
row.add("(zero variance)")
} else if testerr == stats.ErrSampleSize {
row.add("(too few samples)")
} else if testerr == stats.ErrSamplesEqual {
row.add("(all equal)")
} else if testerr != nil {
row.add(fmt.Sprintf("(%s)", testerr))
} else if pval < *flagAlpha {
row.cols[3] = fmt.Sprintf("%+.2f%%", ((new.Mean/old.Mean)-1.0)*100.0)
}
if len(row.cols) == 4 && pval != -1 {
row.add(fmt.Sprintf("(p=%0.3f n=%d+%d)", pval, len(old.RValues), len(new.RValues)))
}
table = append(table, row)
}
if len(table) > 0 {
table = addGeomean(table, c, key.Unit, true)
tables = append(tables, table)
}
}
default:
key := BenchKey{}
for _, key.Unit = range c.Units {
var table []*row
metric := metricOf(key.Unit)
if len(c.Configs) > 1 {
hdr := newRow("name \\ " + metric)
for _, config := range c.Configs {
hdr.add(config)
}
table = append(table, hdr)
} else {
table = append(table, newRow("name", metric))
}
for _, key.Benchmark = range c.Benchmarks {
row := newRow(key.Benchmark)
var scaler func(float64) string
for _, key.Config = range c.Configs {
stat := c.Stats[key]
if stat == nil {
row.add("")
continue
}
if scaler == nil {
scaler = newScaler(stat.Mean, stat.Unit)
}
row.add(stat.Format(scaler))
}
row.trim()
if len(row.cols) > 1 {
table = append(table, row)
}
}
table = addGeomean(table, c, key.Unit, false)
tables = append(tables, table)
}
}
numColumn := 0
for _, table := range tables {
for _, row := range table {
if numColumn < len(row.cols) {
numColumn = len(row.cols)
}
}
}
max := make([]int, numColumn)
for _, table := range tables {
for _, row := range table {
for i, s := range row.cols {
n := utf8.RuneCountInString(s)
if max[i] < n {
max[i] = n
}
}
}
}
var buf bytes.Buffer
for i, table := range tables {
if i > 0 {
fmt.Fprintf(&buf, "\n")
}
if *flagHTML {
fmt.Fprintf(&buf, "<style>.benchstat tbody td:nth-child(1n+2) { text-align: right; padding: 0em 1em; }</style>\n")
fmt.Fprintf(&buf, "<table class='benchstat'>\n")
printRow := func(row *row, tag string) {
fmt.Fprintf(&buf, "<tr>")
for _, cell := range row.cols {
fmt.Fprintf(&buf, "<%s>%s</%s>", tag, html.EscapeString(cell), tag)
}
fmt.Fprintf(&buf, "\n")
}
printRow(table[0], "th")
for _, row := range table[1:] {
printRow(row, "td")
}
fmt.Fprintf(&buf, "</table>\n")
continue
}
// headings
row := table[0]
for i, s := range row.cols {
switch i {
case 0:
fmt.Fprintf(&buf, "%-*s", max[i], s)
default:
fmt.Fprintf(&buf, " %-*s", max[i], s)
case len(row.cols) - 1:
fmt.Fprintf(&buf, " %s\n", s)
}
}
// data
for _, row := range table[1:] {
for i, s := range row.cols {
switch i {
case 0:
fmt.Fprintf(&buf, "%-*s", max[i], s)
default:
if i == len(row.cols)-1 && len(s) > 0 && s[0] == '(' {
// Left-align p value.
fmt.Fprintf(&buf, " %s", s)
break
}
fmt.Fprintf(&buf, " %*s", max[i], s)
}
}
fmt.Fprintf(&buf, "\n")
}
}
os.Stdout.Write(buf.Bytes())
}
func addGeomean(table []*row, c *Collection, unit string, delta bool) []*row {
if !*flagGeomean {
return table
}
row := newRow("[Geo mean]")
key := BenchKey{Unit: unit}
geomeans := []float64{}
for _, key.Config = range c.Configs {
var means []float64
for _, key.Benchmark = range c.Benchmarks {
stat := c.Stats[key]
if stat != nil {
means = append(means, stat.Mean)
}
}
if len(means) == 0 {
row.add("")
delta = false
} else {
geomean := stats.GeoMean(means)
geomeans = append(geomeans, geomean)
row.add(newScaler(geomean, unit)(geomean) + " ")
}
}
if delta {
row.add(fmt.Sprintf("%+.2f%%", ((geomeans[1]/geomeans[0])-1.0)*100.0))
}
return append(table, row)
}
func timeScaler(ns float64) func(float64) string {
var format string
var scale float64
switch x := ns / 1e9; {
case x >= 99.5:
format, scale = "%.0fs", 1
case x >= 9.95:
format, scale = "%.1fs", 1
case x >= 0.995:
format, scale = "%.2fs", 1
case x >= 0.0995:
format, scale = "%.0fms", 1000
case x >= 0.00995:
format, scale = "%.1fms", 1000
case x >= 0.000995:
format, scale = "%.2fms", 1000
case x >= 0.0000995:
format, scale = "%.0fµs", 1000*1000
case x >= 0.00000995:
format, scale = "%.1fµs", 1000*1000
case x >= 0.000000995:
format, scale = "%.2fµs", 1000*1000
case x >= 0.0000000995:
format, scale = "%.0fns", 1000*1000*1000
case x >= 0.00000000995:
format, scale = "%.1fns", 1000*1000*1000
default:
format, scale = "%.2fns", 1000*1000*1000
}
return func(ns float64) string {
return fmt.Sprintf(format, ns/1e9*scale)
}
}
func newScaler(val float64, unit string) func(float64) string {
if unit == "ns/op" {
return timeScaler(val)
}
var format string
var scale float64
var suffix string
prescale := 1.0
if unit == "MB/s" {
prescale = 1e6
}
switch x := val * prescale; {
case x >= 99500000000000:
format, scale, suffix = "%.0f", 1e12, "T"
case x >= 9950000000000:
format, scale, suffix = "%.1f", 1e12, "T"
case x >= 995000000000:
format, scale, suffix = "%.2f", 1e12, "T"
case x >= 99500000000:
format, scale, suffix = "%.0f", 1e9, "G"
case x >= 9950000000:
format, scale, suffix = "%.1f", 1e9, "G"
case x >= 995000000:
format, scale, suffix = "%.2f", 1e9, "G"
case x >= 99500000:
format, scale, suffix = "%.0f", 1e6, "M"
case x >= 9950000:
format, scale, suffix = "%.1f", 1e6, "M"
case x >= 995000:
format, scale, suffix = "%.2f", 1e6, "M"
case x >= 99500:
format, scale, suffix = "%.0f", 1e3, "k"
case x >= 9950:
format, scale, suffix = "%.1f", 1e3, "k"
case x >= 995:
format, scale, suffix = "%.2f", 1e3, "k"
case x >= 99.5:
format, scale, suffix = "%.0f", 1, ""
case x >= 9.95:
format, scale, suffix = "%.1f", 1, ""
default:
format, scale, suffix = "%.2f", 1, ""
}
if unit == "B/op" {
suffix += "B"
}
if unit == "MB/s" {
suffix += "B/s"
}
scale /= prescale
return func(val float64) string {
return fmt.Sprintf(format+suffix, val/scale)
}
}
func (b *Benchstat) Format(scaler func(float64) string) string {
diff := 1 - b.Min/b.Mean
if d := b.Max/b.Mean - 1; d > diff {
diff = d
}
s := scaler(b.Mean)
if b.Mean == 0 {
s += " "
} else {
s = fmt.Sprintf("%s ±%3s", s, fmt.Sprintf("%.0f%%", diff*100.0))
}
return s
}
// ComputeStats updates the derived statistics in s from the raw
// samples in s.Values.
func (stat *Benchstat) ComputeStats() {
// Discard outliers.
values := stats.Sample{Xs: stat.Values}
q1, q3 := values.Percentile(0.25), values.Percentile(0.75)
lo, hi := q1-1.5*(q3-q1), q3+1.5*(q3-q1)
for _, value := range stat.Values {
if lo <= value && value <= hi {
stat.RValues = append(stat.RValues, value)
}
}
// Compute statistics of remaining data.
stat.Min, stat.Max = stats.Bounds(stat.RValues)
stat.Mean = stats.Mean(stat.RValues)
}
// A Benchstat is the metrics along one axis (e.g., ns/op or MB/s)
// for all runs of a specific benchmark.
type Benchstat struct {
Unit string
Values []float64 // metrics
RValues []float64 // metrics with outliers removed
Min float64 // min of RValues
Mean float64 // mean of RValues
Max float64 // max of RValues
}
// A BenchKey identifies one metric (e.g., "ns/op", "B/op") from one
// benchmark (function name sans "Benchmark" prefix) in one
// configuration (input file name).
type BenchKey struct {
Config, Benchmark, Unit string
}
type Collection struct {
Stats map[BenchKey]*Benchstat
// Configs, Benchmarks, and Units give the set of configs,
// benchmarks, and units from the keys in Stats in an order
// meant to match the order the benchmarks were read in.
Configs, Benchmarks, Units []string
}
func (c *Collection) AddStat(key BenchKey) *Benchstat {
if stat, ok := c.Stats[key]; ok {
return stat
}
addString := func(strings *[]string, add string) {
for _, s := range *strings {
if s == add {
return
}
}
*strings = append(*strings, add)
}
addString(&c.Configs, key.Config)
addString(&c.Benchmarks, key.Benchmark)
addString(&c.Units, key.Unit)
stat := &Benchstat{Unit: key.Unit}
c.Stats[key] = stat
return stat
}
// readFiles reads a set of benchmark files.
func readFiles(files []string) *Collection {
c := Collection{Stats: make(map[BenchKey]*Benchstat)}
for _, file := range files {
readFile(file, &c)
}
return &c
}
// readFile reads a set of benchmarks from a file in to a Collection.
func readFile(file string, c *Collection) {
c.Configs = append(c.Configs, file)
key := BenchKey{Config: file}
text, err := ioutil.ReadFile(file)
if err != nil {
log.Fatal(err)
}
for _, line := range strings.Split(string(text), "\n") {
f := strings.Fields(line)
if len(f) < 4 {
continue
}
name := f[0]
if !strings.HasPrefix(name, "Benchmark") {
continue
}
name = strings.TrimPrefix(name, "Benchmark")
n, _ := strconv.Atoi(f[1])
if n == 0 {
continue
}
key.Benchmark = name
for i := 2; i+2 <= len(f); i += 2 {
val, err := strconv.ParseFloat(f[i], 64)
if err != nil {
continue
}
key.Unit = f[i+1]
stat := c.AddStat(key)
stat.Values = append(stat.Values, val)
}
}
}
func metricOf(unit string) string {
switch unit {
case "ns/op":
return "time/op"
case "B/op":
return "alloc/op"
case "MB/s":
return "speed"
default:
return unit
}
}
// Significance tests.
func notest(old, new *Benchstat) (pval float64, err error) {
return -1, nil
}
func ttest(old, new *Benchstat) (pval float64, err error) {
t, err := stats.TwoSampleWelchTTest(stats.Sample{Xs: old.RValues}, stats.Sample{Xs: new.RValues}, stats.LocationDiffers)
if err != nil {
return -1, err
}
return t.P, nil
}
func utest(old, new *Benchstat) (pval float64, err error) {
u, err := stats.MannWhitneyUTest(old.RValues, new.RValues, stats.LocationDiffers)
if err != nil {
return -1, err
}
return u.P, nil
}
|
r.cols = r.cols[:len(r.cols)-1]
}
| conditional_block |
extractor.rs | use crate::{
client,
config::{Configuration, CONFIGURATION},
scanner::SCANNED_URLS,
statistics::{
StatCommand::{self, UpdateUsizeField},
StatField::{LinksExtracted, TotalExpected},
},
utils::{format_url, make_request},
FeroxResponse,
};
use lazy_static::lazy_static;
use regex::Regex;
use reqwest::Url;
use std::collections::HashSet;
use tokio::sync::mpsc::UnboundedSender;
/// Regular expression used in [LinkFinder](https://github.com/GerbenJavado/LinkFinder)
///
/// Incorporates change from this [Pull Request](https://github.com/GerbenJavado/LinkFinder/pull/66/files)
const LINKFINDER_REGEX: &str = r#"(?:"|')(((?:[a-zA-Z]{1,10}://|//)[^"'/]{1,}\.[a-zA-Z]{2,}[^"']{0,})|((?:/|\.\./|\./)[^"'><,;| *()(%%$^/\\\[\]][^"'><,;|()]{1,})|([a-zA-Z0-9_\-/]{1,}/[a-zA-Z0-9_\-/]{1,}\.(?:[a-zA-Z]{1,4}|action)(?:[\?|#][^"|']{0,}|))|([a-zA-Z0-9_\-/]{1,}/[a-zA-Z0-9_\-/]{3,}(?:[\?|#][^"|']{0,}|))|([a-zA-Z0-9_\-.]{1,}\.(?:php|asp|aspx|jsp|json|action|html|js|txt|xml)(?:[\?|#][^"|']{0,}|)))(?:"|')"#;
/// Regular expression to pull url paths from robots.txt
///
/// ref: https://developers.google.com/search/reference/robots_txt
const ROBOTS_TXT_REGEX: &str =
r#"(?m)^ *(Allow|Disallow): *(?P<url_path>[a-zA-Z0-9._/?#@!&'()+,;%=-]+?)$"#; // multi-line (?m)
lazy_static! {
/// `LINKFINDER_REGEX` as a regex::Regex type
static ref LINKS_REGEX: Regex = Regex::new(LINKFINDER_REGEX).unwrap();
/// `ROBOTS_TXT_REGEX` as a regex::Regex type
static ref ROBOTS_REGEX: Regex = Regex::new(ROBOTS_TXT_REGEX).unwrap();
}
/// Iterate over a given path, return a list of every sub-path found
///
/// example: `path` contains a link fragment `homepage/assets/img/icons/handshake.svg`
/// the following fragments would be returned:
/// - homepage/assets/img/icons/handshake.svg
/// - homepage/assets/img/icons/
/// - homepage/assets/img/
/// - homepage/assets/
/// - homepage/
fn get_sub_paths_from_path(path: &str) -> Vec<String> {
log::trace!("enter: get_sub_paths_from_path({})", path);
let mut paths = vec![];
// filter out any empty strings caused by .split
let mut parts: Vec<&str> = path.split('/').filter(|s| !s.is_empty()).collect();
let length = parts.len();
for i in 0..length {
// iterate over all parts of the path
if parts.is_empty() {
// pop left us with an empty vector, we're done
break;
}
let mut possible_path = parts.join("/");
if possible_path.is_empty() {
// .join can result in an empty string, which we don't need, ignore
continue;
}
if i > 0 {
// this isn't the last index of the parts array
// ex: /buried/misc/stupidfile.php
// this block skips the file but sees all parent folders
possible_path = format!("{}/", possible_path);
}
paths.push(possible_path); // good sub-path found
parts.pop(); // use .pop() to remove the last part of the path and continue iteration
}
log::trace!("exit: get_sub_paths_from_path -> {:?}", paths);
paths
}
/// simple helper to stay DRY, trys to join a url + fragment and add it to the `links` HashSet
fn add_link_to_set_of_links(link: &str, url: &Url, links: &mut HashSet<String>) {
log::trace!(
"enter: add_link_to_set_of_links({}, {}, {:?})",
link,
url.to_string(),
links
);
match url.join(&link) {
Ok(new_url) => {
links.insert(new_url.to_string());
}
Err(e) => {
log::error!("Could not join given url to the base url: {}", e);
}
}
log::trace!("exit: add_link_to_set_of_links");
}
/// Given a `reqwest::Response`, perform the following actions
/// - parse the response's text for links using the linkfinder regex
/// - for every link found take its url path and parse each sub-path
/// - example: Response contains a link fragment `homepage/assets/img/icons/handshake.svg`
/// with a base url of http://localhost, the following urls would be returned:
/// - homepage/assets/img/icons/handshake.svg
/// - homepage/assets/img/icons/
/// - homepage/assets/img/
/// - homepage/assets/
/// - homepage/
pub async fn get_links(
response: &FeroxResponse,
tx_stats: UnboundedSender<StatCommand>,
) -> HashSet<String> {
log::trace!(
"enter: get_links({}, {:?})",
response.url().as_str(),
tx_stats
);
let mut links = HashSet::<String>::new();
let body = response.text();
for capture in LINKS_REGEX.captures_iter(&body) {
// remove single & double quotes from both ends of the capture
// capture[0] is the entire match, additional capture groups start at [1]
let link = capture[0].trim_matches(|c| c == '\'' || c == '"');
match Url::parse(link) {
Ok(absolute) => {
if absolute.domain() != response.url().domain()
|| absolute.host() != response.url().host()
{
// domains/ips are not the same, don't scan things that aren't part of the original
// target url
continue;
}
add_all_sub_paths(absolute.path(), &response, &mut links);
}
Err(e) => {
// this is the expected error that happens when we try to parse a url fragment
// ex: Url::parse("/login") -> Err("relative URL without a base")
// while this is technically an error, these are good results for us
if e.to_string().contains("relative URL without a base") {
add_all_sub_paths(link, &response, &mut links);
} else {
// unexpected error has occurred
log::error!("Could not parse given url: {}", e);
}
}
}
}
let multiplier = CONFIGURATION.extensions.len().max(1);
update_stat!(tx_stats, UpdateUsizeField(LinksExtracted, links.len()));
update_stat!(
tx_stats,
UpdateUsizeField(TotalExpected, links.len() * multiplier)
);
log::trace!("exit: get_links -> {:?}", links);
links
}
/// take a url fragment like homepage/assets/img/icons/handshake.svg and
/// incrementally add
/// - homepage/assets/img/icons/
/// - homepage/assets/img/
/// - homepage/assets/
/// - homepage/
fn add_all_sub_paths(url_path: &str, response: &FeroxResponse, mut links: &mut HashSet<String>) {
log::trace!(
"enter: add_all_sub_paths({}, {}, {:?})",
url_path,
response,
links
);
for sub_path in get_sub_paths_from_path(url_path) {
log::debug!("Adding {} to {:?}", sub_path, links);
add_link_to_set_of_links(&sub_path, &response.url(), &mut links);
}
log::trace!("exit: add_all_sub_paths");
}
/// Wrapper around link extraction logic
/// currently used in two places:
/// - links from response bodys
/// - links from robots.txt responses
///
/// general steps taken:
/// - create a new Url object based on cli options/args
/// - check if the new Url has already been seen/scanned -> None
/// - make a request to the new Url ? -> Some(response) : None
pub async fn request_feroxresponse_from_new_link(
url: &str,
tx_stats: UnboundedSender<StatCommand>,
) -> Option<FeroxResponse> {
log::trace!(
"enter: request_feroxresponse_from_new_link({}, {:?})",
url,
tx_stats
);
// create a url based on the given command line options, return None on error
let new_url = match format_url(
&url,
&"",
CONFIGURATION.add_slash,
&CONFIGURATION.queries,
None,
tx_stats.clone(),
) {
Ok(url) => url,
Err(_) => |
};
if SCANNED_URLS.get_scan_by_url(&new_url.to_string()).is_some() {
//we've seen the url before and don't need to scan again
log::trace!("exit: request_feroxresponse_from_new_link -> None");
return None;
}
// make the request and store the response
let new_response = match make_request(&CONFIGURATION.client, &new_url, tx_stats).await {
Ok(resp) => resp,
Err(_) => {
log::trace!("exit: request_feroxresponse_from_new_link -> None");
return None;
}
};
let new_ferox_response = FeroxResponse::from(new_response, true).await;
log::trace!(
"exit: request_feroxresponse_from_new_link -> {:?}",
new_ferox_response
);
Some(new_ferox_response)
}
/// helper function that simply requests /robots.txt on the given url's base url
///
/// example:
/// http://localhost/api/users -> http://localhost/robots.txt
///
/// The length of the given path has no effect on what's requested; it's always
/// base url + /robots.txt
pub async fn request_robots_txt(
base_url: &str,
config: &Configuration,
tx_stats: UnboundedSender<StatCommand>,
) -> Option<FeroxResponse> {
log::trace!(
"enter: get_robots_file({}, CONFIGURATION, {:?})",
base_url,
tx_stats
);
// more often than not, domain/robots.txt will redirect to www.domain/robots.txt or something
// similar; to account for that, create a client that will follow redirects, regardless of
// what the user specified for the scanning client. Other than redirects, it will respect
// all other user specified settings
let follow_redirects = true;
let proxy = if config.proxy.is_empty() {
None
} else {
Some(config.proxy.as_str())
};
let client = client::initialize(
config.timeout,
&config.user_agent,
follow_redirects,
config.insecure,
&config.headers,
proxy,
);
if let Ok(mut url) = Url::parse(base_url) {
url.set_path("/robots.txt"); // overwrite existing path with /robots.txt
if let Ok(response) = make_request(&client, &url, tx_stats).await {
let ferox_response = FeroxResponse::from(response, true).await;
log::trace!("exit: get_robots_file -> {}", ferox_response);
return Some(ferox_response);
}
}
None
}
/// Entry point to perform link extraction from robots.txt
///
/// `base_url` can have paths and subpaths, however robots.txt will be requested from the
/// root of the url
/// given the url:
/// http://localhost/stuff/things
/// this function requests:
/// http://localhost/robots.txt
pub async fn extract_robots_txt(
base_url: &str,
config: &Configuration,
tx_stats: UnboundedSender<StatCommand>,
) -> HashSet<String> {
log::trace!(
"enter: extract_robots_txt({}, CONFIGURATION, {:?})",
base_url,
tx_stats
);
let mut links = HashSet::new();
if let Some(response) = request_robots_txt(&base_url, &config, tx_stats.clone()).await {
for capture in ROBOTS_REGEX.captures_iter(response.text.as_str()) {
if let Some(new_path) = capture.name("url_path") {
if let Ok(mut new_url) = Url::parse(base_url) {
new_url.set_path(new_path.as_str());
add_all_sub_paths(new_url.path(), &response, &mut links);
}
}
}
}
let multiplier = CONFIGURATION.extensions.len().max(1);
update_stat!(tx_stats, UpdateUsizeField(LinksExtracted, links.len()));
update_stat!(
tx_stats,
UpdateUsizeField(TotalExpected, links.len() * multiplier)
);
log::trace!("exit: extract_robots_txt -> {:?}", links);
links
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::make_request;
use crate::FeroxChannel;
use httpmock::Method::GET;
use httpmock::MockServer;
use reqwest::Client;
use tokio::sync::mpsc;
#[test]
/// extract sub paths from the given url fragment; expect 4 sub paths and that all are
/// in the expected array
fn extractor_get_sub_paths_from_path_with_multiple_paths() {
let path = "homepage/assets/img/icons/handshake.svg";
let paths = get_sub_paths_from_path(&path);
let expected = vec![
"homepage/",
"homepage/assets/",
"homepage/assets/img/",
"homepage/assets/img/icons/",
"homepage/assets/img/icons/handshake.svg",
];
assert_eq!(paths.len(), expected.len());
for expected_path in expected {
assert_eq!(paths.contains(&expected_path.to_string()), true);
}
}
#[test]
/// extract sub paths from the given url fragment; expect 2 sub paths and that all are
/// in the expected array. the fragment is wrapped in slashes to ensure no empty strings are
/// returned
fn extractor_get_sub_paths_from_path_with_enclosing_slashes() {
let path = "/homepage/assets/";
let paths = get_sub_paths_from_path(&path);
let expected = vec!["homepage/", "homepage/assets"];
assert_eq!(paths.len(), expected.len());
for expected_path in expected {
assert_eq!(paths.contains(&expected_path.to_string()), true);
}
}
#[test]
/// extract sub paths from the given url fragment; expect 1 sub path, no forward slashes are
/// included
fn extractor_get_sub_paths_from_path_with_only_a_word() {
let path = "homepage";
let paths = get_sub_paths_from_path(&path);
let expected = vec!["homepage"];
assert_eq!(paths.len(), expected.len());
for expected_path in expected {
assert_eq!(paths.contains(&expected_path.to_string()), true);
}
}
#[test]
/// extract sub paths from the given url fragment; expect 1 sub path, forward slash removed
fn extractor_get_sub_paths_from_path_with_an_absolute_word() {
let path = "/homepage";
let paths = get_sub_paths_from_path(&path);
let expected = vec!["homepage"];
assert_eq!(paths.len(), expected.len());
for expected_path in expected {
assert_eq!(paths.contains(&expected_path.to_string()), true);
}
}
#[test]
/// test that a full url and fragment are joined correctly, then added to the given list
/// i.e. the happy path
fn extractor_add_link_to_set_of_links_happy_path() {
let url = Url::parse("https://localhost").unwrap();
let mut links = HashSet::<String>::new();
let link = "admin";
assert_eq!(links.len(), 0);
add_link_to_set_of_links(link, &url, &mut links);
assert_eq!(links.len(), 1);
assert!(links.contains("https://localhost/admin"));
}
#[test]
/// test that an invalid path fragment doesn't add anything to the set of links
fn extractor_add_link_to_set_of_links_with_non_base_url() {
let url = Url::parse("https://localhost").unwrap();
let mut links = HashSet::<String>::new();
let link = "\\\\\\\\";
assert_eq!(links.len(), 0);
add_link_to_set_of_links(link, &url, &mut links);
assert_eq!(links.len(), 0);
assert!(links.is_empty());
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
/// use make_request to generate a Response, and use the Response to test get_links;
/// the response will contain an absolute path to a domain that is not part of the scanned
/// domain; expect an empty set returned
async fn extractor_get_links_with_absolute_url_that_differs_from_target_domain(
) -> Result<(), Box<dyn std::error::Error>> {
let srv = MockServer::start();
let mock = srv.mock(|when, then|{
when.method(GET)
.path("/some-path");
then.status(200)
.body("\"http://defintely.not.a.thing.probably.com/homepage/assets/img/icons/handshake.svg\"");
});
let client = Client::new();
let url = Url::parse(&srv.url("/some-path")).unwrap();
let (tx, _): FeroxChannel<StatCommand> = mpsc::unbounded_channel();
let response = make_request(&client, &url, tx.clone()).await.unwrap();
let ferox_response = FeroxResponse::from(response, true).await;
let links = get_links(&ferox_response, tx).await;
assert!(links.is_empty());
assert_eq!(mock.hits(), 1);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
/// test that /robots.txt is correctly requested given a base url (happy path)
async fn request_robots_txt_with_and_without_proxy() {
let srv = MockServer::start();
let mock = srv.mock(|when, then| {
when.method(GET).path("/robots.txt");
then.status(200).body("this is a test");
});
let mut config = Configuration::default();
let (tx, _): FeroxChannel<StatCommand> = mpsc::unbounded_channel();
request_robots_txt(&srv.url("/api/users/stuff/things"), &config, tx.clone()).await;
// note: the proxy doesn't actually do anything other than hit a different code branch
// in this unit test; it would however have an effect on an integration test
config.proxy = srv.url("/ima-proxy");
request_robots_txt(&srv.url("/api/different/path"), &config, tx).await;
assert_eq!(mock.hits(), 2);
}
}
| {
log::trace!("exit: request_feroxresponse_from_new_link -> None");
return None;
} | conditional_block |
extractor.rs | use crate::{
client,
config::{Configuration, CONFIGURATION},
scanner::SCANNED_URLS,
statistics::{
StatCommand::{self, UpdateUsizeField},
StatField::{LinksExtracted, TotalExpected},
},
utils::{format_url, make_request},
FeroxResponse,
};
use lazy_static::lazy_static;
use regex::Regex;
use reqwest::Url;
use std::collections::HashSet;
use tokio::sync::mpsc::UnboundedSender;
/// Regular expression used in [LinkFinder](https://github.com/GerbenJavado/LinkFinder)
///
/// Incorporates change from this [Pull Request](https://github.com/GerbenJavado/LinkFinder/pull/66/files)
const LINKFINDER_REGEX: &str = r#"(?:"|')(((?:[a-zA-Z]{1,10}://|//)[^"'/]{1,}\.[a-zA-Z]{2,}[^"']{0,})|((?:/|\.\./|\./)[^"'><,;| *()(%%$^/\\\[\]][^"'><,;|()]{1,})|([a-zA-Z0-9_\-/]{1,}/[a-zA-Z0-9_\-/]{1,}\.(?:[a-zA-Z]{1,4}|action)(?:[\?|#][^"|']{0,}|))|([a-zA-Z0-9_\-/]{1,}/[a-zA-Z0-9_\-/]{3,}(?:[\?|#][^"|']{0,}|))|([a-zA-Z0-9_\-.]{1,}\.(?:php|asp|aspx|jsp|json|action|html|js|txt|xml)(?:[\?|#][^"|']{0,}|)))(?:"|')"#;
/// Regular expression to pull url paths from robots.txt
///
/// ref: https://developers.google.com/search/reference/robots_txt
const ROBOTS_TXT_REGEX: &str =
r#"(?m)^ *(Allow|Disallow): *(?P<url_path>[a-zA-Z0-9._/?#@!&'()+,;%=-]+?)$"#; // multi-line (?m)
lazy_static! {
/// `LINKFINDER_REGEX` as a regex::Regex type
static ref LINKS_REGEX: Regex = Regex::new(LINKFINDER_REGEX).unwrap();
/// `ROBOTS_TXT_REGEX` as a regex::Regex type
static ref ROBOTS_REGEX: Regex = Regex::new(ROBOTS_TXT_REGEX).unwrap();
}
/// Iterate over a given path, return a list of every sub-path found
///
/// example: `path` contains a link fragment `homepage/assets/img/icons/handshake.svg`
/// the following fragments would be returned:
/// - homepage/assets/img/icons/handshake.svg
/// - homepage/assets/img/icons/
/// - homepage/assets/img/
/// - homepage/assets/
/// - homepage/
fn get_sub_paths_from_path(path: &str) -> Vec<String> {
log::trace!("enter: get_sub_paths_from_path({})", path);
let mut paths = vec![];
// filter out any empty strings caused by .split
let mut parts: Vec<&str> = path.split('/').filter(|s| !s.is_empty()).collect();
let length = parts.len();
for i in 0..length {
// iterate over all parts of the path
if parts.is_empty() {
// pop left us with an empty vector, we're done
break;
}
let mut possible_path = parts.join("/");
if possible_path.is_empty() {
// .join can result in an empty string, which we don't need, ignore
continue;
}
if i > 0 {
// this isn't the last index of the parts array
// ex: /buried/misc/stupidfile.php
// this block skips the file but sees all parent folders
possible_path = format!("{}/", possible_path);
}
paths.push(possible_path); // good sub-path found
parts.pop(); // use .pop() to remove the last part of the path and continue iteration
}
log::trace!("exit: get_sub_paths_from_path -> {:?}", paths);
paths
}
/// simple helper to stay DRY, trys to join a url + fragment and add it to the `links` HashSet
fn add_link_to_set_of_links(link: &str, url: &Url, links: &mut HashSet<String>) {
log::trace!(
"enter: add_link_to_set_of_links({}, {}, {:?})",
link,
url.to_string(),
links
);
match url.join(&link) {
Ok(new_url) => {
links.insert(new_url.to_string());
}
Err(e) => {
log::error!("Could not join given url to the base url: {}", e);
}
}
log::trace!("exit: add_link_to_set_of_links");
}
/// Given a `reqwest::Response`, perform the following actions
/// - parse the response's text for links using the linkfinder regex
/// - for every link found take its url path and parse each sub-path
/// - example: Response contains a link fragment `homepage/assets/img/icons/handshake.svg`
/// with a base url of http://localhost, the following urls would be returned:
/// - homepage/assets/img/icons/handshake.svg
/// - homepage/assets/img/icons/
/// - homepage/assets/img/
/// - homepage/assets/
/// - homepage/
pub async fn | (
response: &FeroxResponse,
tx_stats: UnboundedSender<StatCommand>,
) -> HashSet<String> {
log::trace!(
"enter: get_links({}, {:?})",
response.url().as_str(),
tx_stats
);
let mut links = HashSet::<String>::new();
let body = response.text();
for capture in LINKS_REGEX.captures_iter(&body) {
// remove single & double quotes from both ends of the capture
// capture[0] is the entire match, additional capture groups start at [1]
let link = capture[0].trim_matches(|c| c == '\'' || c == '"');
match Url::parse(link) {
Ok(absolute) => {
if absolute.domain() != response.url().domain()
|| absolute.host() != response.url().host()
{
// domains/ips are not the same, don't scan things that aren't part of the original
// target url
continue;
}
add_all_sub_paths(absolute.path(), &response, &mut links);
}
Err(e) => {
// this is the expected error that happens when we try to parse a url fragment
// ex: Url::parse("/login") -> Err("relative URL without a base")
// while this is technically an error, these are good results for us
if e.to_string().contains("relative URL without a base") {
add_all_sub_paths(link, &response, &mut links);
} else {
// unexpected error has occurred
log::error!("Could not parse given url: {}", e);
}
}
}
}
let multiplier = CONFIGURATION.extensions.len().max(1);
update_stat!(tx_stats, UpdateUsizeField(LinksExtracted, links.len()));
update_stat!(
tx_stats,
UpdateUsizeField(TotalExpected, links.len() * multiplier)
);
log::trace!("exit: get_links -> {:?}", links);
links
}
/// take a url fragment like homepage/assets/img/icons/handshake.svg and
/// incrementally add
/// - homepage/assets/img/icons/
/// - homepage/assets/img/
/// - homepage/assets/
/// - homepage/
fn add_all_sub_paths(url_path: &str, response: &FeroxResponse, mut links: &mut HashSet<String>) {
log::trace!(
"enter: add_all_sub_paths({}, {}, {:?})",
url_path,
response,
links
);
for sub_path in get_sub_paths_from_path(url_path) {
log::debug!("Adding {} to {:?}", sub_path, links);
add_link_to_set_of_links(&sub_path, &response.url(), &mut links);
}
log::trace!("exit: add_all_sub_paths");
}
/// Wrapper around link extraction logic
/// currently used in two places:
/// - links from response bodys
/// - links from robots.txt responses
///
/// general steps taken:
/// - create a new Url object based on cli options/args
/// - check if the new Url has already been seen/scanned -> None
/// - make a request to the new Url ? -> Some(response) : None
pub async fn request_feroxresponse_from_new_link(
url: &str,
tx_stats: UnboundedSender<StatCommand>,
) -> Option<FeroxResponse> {
log::trace!(
"enter: request_feroxresponse_from_new_link({}, {:?})",
url,
tx_stats
);
// create a url based on the given command line options, return None on error
let new_url = match format_url(
&url,
&"",
CONFIGURATION.add_slash,
&CONFIGURATION.queries,
None,
tx_stats.clone(),
) {
Ok(url) => url,
Err(_) => {
log::trace!("exit: request_feroxresponse_from_new_link -> None");
return None;
}
};
if SCANNED_URLS.get_scan_by_url(&new_url.to_string()).is_some() {
//we've seen the url before and don't need to scan again
log::trace!("exit: request_feroxresponse_from_new_link -> None");
return None;
}
// make the request and store the response
let new_response = match make_request(&CONFIGURATION.client, &new_url, tx_stats).await {
Ok(resp) => resp,
Err(_) => {
log::trace!("exit: request_feroxresponse_from_new_link -> None");
return None;
}
};
let new_ferox_response = FeroxResponse::from(new_response, true).await;
log::trace!(
"exit: request_feroxresponse_from_new_link -> {:?}",
new_ferox_response
);
Some(new_ferox_response)
}
/// helper function that simply requests /robots.txt on the given url's base url
///
/// example:
/// http://localhost/api/users -> http://localhost/robots.txt
///
/// The length of the given path has no effect on what's requested; it's always
/// base url + /robots.txt
pub async fn request_robots_txt(
base_url: &str,
config: &Configuration,
tx_stats: UnboundedSender<StatCommand>,
) -> Option<FeroxResponse> {
log::trace!(
"enter: get_robots_file({}, CONFIGURATION, {:?})",
base_url,
tx_stats
);
// more often than not, domain/robots.txt will redirect to www.domain/robots.txt or something
// similar; to account for that, create a client that will follow redirects, regardless of
// what the user specified for the scanning client. Other than redirects, it will respect
// all other user specified settings
let follow_redirects = true;
let proxy = if config.proxy.is_empty() {
None
} else {
Some(config.proxy.as_str())
};
let client = client::initialize(
config.timeout,
&config.user_agent,
follow_redirects,
config.insecure,
&config.headers,
proxy,
);
if let Ok(mut url) = Url::parse(base_url) {
url.set_path("/robots.txt"); // overwrite existing path with /robots.txt
if let Ok(response) = make_request(&client, &url, tx_stats).await {
let ferox_response = FeroxResponse::from(response, true).await;
log::trace!("exit: get_robots_file -> {}", ferox_response);
return Some(ferox_response);
}
}
None
}
/// Entry point to perform link extraction from robots.txt
///
/// `base_url` can have paths and subpaths, however robots.txt will be requested from the
/// root of the url
/// given the url:
/// http://localhost/stuff/things
/// this function requests:
/// http://localhost/robots.txt
pub async fn extract_robots_txt(
base_url: &str,
config: &Configuration,
tx_stats: UnboundedSender<StatCommand>,
) -> HashSet<String> {
log::trace!(
"enter: extract_robots_txt({}, CONFIGURATION, {:?})",
base_url,
tx_stats
);
let mut links = HashSet::new();
if let Some(response) = request_robots_txt(&base_url, &config, tx_stats.clone()).await {
for capture in ROBOTS_REGEX.captures_iter(response.text.as_str()) {
if let Some(new_path) = capture.name("url_path") {
if let Ok(mut new_url) = Url::parse(base_url) {
new_url.set_path(new_path.as_str());
add_all_sub_paths(new_url.path(), &response, &mut links);
}
}
}
}
let multiplier = CONFIGURATION.extensions.len().max(1);
update_stat!(tx_stats, UpdateUsizeField(LinksExtracted, links.len()));
update_stat!(
tx_stats,
UpdateUsizeField(TotalExpected, links.len() * multiplier)
);
log::trace!("exit: extract_robots_txt -> {:?}", links);
links
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::make_request;
use crate::FeroxChannel;
use httpmock::Method::GET;
use httpmock::MockServer;
use reqwest::Client;
use tokio::sync::mpsc;
#[test]
/// extract sub paths from the given url fragment; expect 4 sub paths and that all are
/// in the expected array
fn extractor_get_sub_paths_from_path_with_multiple_paths() {
let path = "homepage/assets/img/icons/handshake.svg";
let paths = get_sub_paths_from_path(&path);
let expected = vec![
"homepage/",
"homepage/assets/",
"homepage/assets/img/",
"homepage/assets/img/icons/",
"homepage/assets/img/icons/handshake.svg",
];
assert_eq!(paths.len(), expected.len());
for expected_path in expected {
assert_eq!(paths.contains(&expected_path.to_string()), true);
}
}
#[test]
/// extract sub paths from the given url fragment; expect 2 sub paths and that all are
/// in the expected array. the fragment is wrapped in slashes to ensure no empty strings are
/// returned
fn extractor_get_sub_paths_from_path_with_enclosing_slashes() {
let path = "/homepage/assets/";
let paths = get_sub_paths_from_path(&path);
let expected = vec!["homepage/", "homepage/assets"];
assert_eq!(paths.len(), expected.len());
for expected_path in expected {
assert_eq!(paths.contains(&expected_path.to_string()), true);
}
}
#[test]
/// extract sub paths from the given url fragment; expect 1 sub path, no forward slashes are
/// included
fn extractor_get_sub_paths_from_path_with_only_a_word() {
let path = "homepage";
let paths = get_sub_paths_from_path(&path);
let expected = vec!["homepage"];
assert_eq!(paths.len(), expected.len());
for expected_path in expected {
assert_eq!(paths.contains(&expected_path.to_string()), true);
}
}
#[test]
/// extract sub paths from the given url fragment; expect 1 sub path, forward slash removed
fn extractor_get_sub_paths_from_path_with_an_absolute_word() {
let path = "/homepage";
let paths = get_sub_paths_from_path(&path);
let expected = vec!["homepage"];
assert_eq!(paths.len(), expected.len());
for expected_path in expected {
assert_eq!(paths.contains(&expected_path.to_string()), true);
}
}
#[test]
/// test that a full url and fragment are joined correctly, then added to the given list
/// i.e. the happy path
fn extractor_add_link_to_set_of_links_happy_path() {
let url = Url::parse("https://localhost").unwrap();
let mut links = HashSet::<String>::new();
let link = "admin";
assert_eq!(links.len(), 0);
add_link_to_set_of_links(link, &url, &mut links);
assert_eq!(links.len(), 1);
assert!(links.contains("https://localhost/admin"));
}
#[test]
/// test that an invalid path fragment doesn't add anything to the set of links
fn extractor_add_link_to_set_of_links_with_non_base_url() {
let url = Url::parse("https://localhost").unwrap();
let mut links = HashSet::<String>::new();
let link = "\\\\\\\\";
assert_eq!(links.len(), 0);
add_link_to_set_of_links(link, &url, &mut links);
assert_eq!(links.len(), 0);
assert!(links.is_empty());
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
/// use make_request to generate a Response, and use the Response to test get_links;
/// the response will contain an absolute path to a domain that is not part of the scanned
/// domain; expect an empty set returned
async fn extractor_get_links_with_absolute_url_that_differs_from_target_domain(
) -> Result<(), Box<dyn std::error::Error>> {
let srv = MockServer::start();
let mock = srv.mock(|when, then|{
when.method(GET)
.path("/some-path");
then.status(200)
.body("\"http://defintely.not.a.thing.probably.com/homepage/assets/img/icons/handshake.svg\"");
});
let client = Client::new();
let url = Url::parse(&srv.url("/some-path")).unwrap();
let (tx, _): FeroxChannel<StatCommand> = mpsc::unbounded_channel();
let response = make_request(&client, &url, tx.clone()).await.unwrap();
let ferox_response = FeroxResponse::from(response, true).await;
let links = get_links(&ferox_response, tx).await;
assert!(links.is_empty());
assert_eq!(mock.hits(), 1);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
/// test that /robots.txt is correctly requested given a base url (happy path)
async fn request_robots_txt_with_and_without_proxy() {
let srv = MockServer::start();
let mock = srv.mock(|when, then| {
when.method(GET).path("/robots.txt");
then.status(200).body("this is a test");
});
let mut config = Configuration::default();
let (tx, _): FeroxChannel<StatCommand> = mpsc::unbounded_channel();
request_robots_txt(&srv.url("/api/users/stuff/things"), &config, tx.clone()).await;
// note: the proxy doesn't actually do anything other than hit a different code branch
// in this unit test; it would however have an effect on an integration test
config.proxy = srv.url("/ima-proxy");
request_robots_txt(&srv.url("/api/different/path"), &config, tx).await;
assert_eq!(mock.hits(), 2);
}
}
| get_links | identifier_name |
extractor.rs | use crate::{
client,
config::{Configuration, CONFIGURATION},
scanner::SCANNED_URLS,
statistics::{
StatCommand::{self, UpdateUsizeField},
StatField::{LinksExtracted, TotalExpected},
},
utils::{format_url, make_request},
FeroxResponse,
};
use lazy_static::lazy_static;
use regex::Regex;
use reqwest::Url;
use std::collections::HashSet;
use tokio::sync::mpsc::UnboundedSender;
/// Regular expression used in [LinkFinder](https://github.com/GerbenJavado/LinkFinder)
///
/// Incorporates change from this [Pull Request](https://github.com/GerbenJavado/LinkFinder/pull/66/files)
const LINKFINDER_REGEX: &str = r#"(?:"|')(((?:[a-zA-Z]{1,10}://|//)[^"'/]{1,}\.[a-zA-Z]{2,}[^"']{0,})|((?:/|\.\./|\./)[^"'><,;| *()(%%$^/\\\[\]][^"'><,;|()]{1,})|([a-zA-Z0-9_\-/]{1,}/[a-zA-Z0-9_\-/]{1,}\.(?:[a-zA-Z]{1,4}|action)(?:[\?|#][^"|']{0,}|))|([a-zA-Z0-9_\-/]{1,}/[a-zA-Z0-9_\-/]{3,}(?:[\?|#][^"|']{0,}|))|([a-zA-Z0-9_\-.]{1,}\.(?:php|asp|aspx|jsp|json|action|html|js|txt|xml)(?:[\?|#][^"|']{0,}|)))(?:"|')"#;
/// Regular expression to pull url paths from robots.txt
///
/// ref: https://developers.google.com/search/reference/robots_txt
const ROBOTS_TXT_REGEX: &str =
r#"(?m)^ *(Allow|Disallow): *(?P<url_path>[a-zA-Z0-9._/?#@!&'()+,;%=-]+?)$"#; // multi-line (?m)
lazy_static! {
/// `LINKFINDER_REGEX` as a regex::Regex type
static ref LINKS_REGEX: Regex = Regex::new(LINKFINDER_REGEX).unwrap();
/// `ROBOTS_TXT_REGEX` as a regex::Regex type
static ref ROBOTS_REGEX: Regex = Regex::new(ROBOTS_TXT_REGEX).unwrap();
}
/// Iterate over a given path, return a list of every sub-path found
///
/// example: `path` contains a link fragment `homepage/assets/img/icons/handshake.svg`
/// the following fragments would be returned:
/// - homepage/assets/img/icons/handshake.svg
/// - homepage/assets/img/icons/
/// - homepage/assets/img/
/// - homepage/assets/
/// - homepage/
fn get_sub_paths_from_path(path: &str) -> Vec<String> {
log::trace!("enter: get_sub_paths_from_path({})", path);
let mut paths = vec![];
// filter out any empty strings caused by .split
let mut parts: Vec<&str> = path.split('/').filter(|s| !s.is_empty()).collect();
let length = parts.len();
for i in 0..length {
// iterate over all parts of the path
if parts.is_empty() {
// pop left us with an empty vector, we're done
break;
}
let mut possible_path = parts.join("/");
if possible_path.is_empty() {
// .join can result in an empty string, which we don't need, ignore
continue;
}
if i > 0 {
// this isn't the last index of the parts array
// ex: /buried/misc/stupidfile.php
// this block skips the file but sees all parent folders
possible_path = format!("{}/", possible_path);
}
paths.push(possible_path); // good sub-path found
parts.pop(); // use .pop() to remove the last part of the path and continue iteration
}
log::trace!("exit: get_sub_paths_from_path -> {:?}", paths);
paths
}
/// simple helper to stay DRY, trys to join a url + fragment and add it to the `links` HashSet
fn add_link_to_set_of_links(link: &str, url: &Url, links: &mut HashSet<String>) {
log::trace!(
"enter: add_link_to_set_of_links({}, {}, {:?})",
link,
url.to_string(),
links
);
match url.join(&link) {
Ok(new_url) => {
links.insert(new_url.to_string());
}
Err(e) => {
log::error!("Could not join given url to the base url: {}", e);
}
}
log::trace!("exit: add_link_to_set_of_links");
}
/// Given a `reqwest::Response`, perform the following actions
/// - parse the response's text for links using the linkfinder regex
/// - for every link found take its url path and parse each sub-path
/// - example: Response contains a link fragment `homepage/assets/img/icons/handshake.svg`
/// with a base url of http://localhost, the following urls would be returned:
/// - homepage/assets/img/icons/handshake.svg
/// - homepage/assets/img/icons/
/// - homepage/assets/img/
/// - homepage/assets/
/// - homepage/
pub async fn get_links(
response: &FeroxResponse,
tx_stats: UnboundedSender<StatCommand>,
) -> HashSet<String> {
log::trace!(
"enter: get_links({}, {:?})",
response.url().as_str(),
tx_stats
);
let mut links = HashSet::<String>::new();
let body = response.text();
for capture in LINKS_REGEX.captures_iter(&body) {
// remove single & double quotes from both ends of the capture
// capture[0] is the entire match, additional capture groups start at [1]
let link = capture[0].trim_matches(|c| c == '\'' || c == '"');
match Url::parse(link) {
Ok(absolute) => {
if absolute.domain() != response.url().domain()
|| absolute.host() != response.url().host()
{
// domains/ips are not the same, don't scan things that aren't part of the original
// target url
continue;
}
add_all_sub_paths(absolute.path(), &response, &mut links);
}
Err(e) => {
// this is the expected error that happens when we try to parse a url fragment
// ex: Url::parse("/login") -> Err("relative URL without a base")
// while this is technically an error, these are good results for us
if e.to_string().contains("relative URL without a base") {
add_all_sub_paths(link, &response, &mut links);
} else {
// unexpected error has occurred
log::error!("Could not parse given url: {}", e);
}
}
}
}
let multiplier = CONFIGURATION.extensions.len().max(1);
update_stat!(tx_stats, UpdateUsizeField(LinksExtracted, links.len()));
update_stat!(
tx_stats,
UpdateUsizeField(TotalExpected, links.len() * multiplier)
);
log::trace!("exit: get_links -> {:?}", links);
links
}
/// take a url fragment like homepage/assets/img/icons/handshake.svg and
/// incrementally add
/// - homepage/assets/img/icons/
/// - homepage/assets/img/
/// - homepage/assets/
/// - homepage/
fn add_all_sub_paths(url_path: &str, response: &FeroxResponse, mut links: &mut HashSet<String>) {
log::trace!(
"enter: add_all_sub_paths({}, {}, {:?})",
url_path,
response,
links
);
for sub_path in get_sub_paths_from_path(url_path) {
log::debug!("Adding {} to {:?}", sub_path, links);
add_link_to_set_of_links(&sub_path, &response.url(), &mut links);
}
log::trace!("exit: add_all_sub_paths");
}
/// Wrapper around link extraction logic
/// currently used in two places:
/// - links from response bodys
/// - links from robots.txt responses
///
/// general steps taken:
/// - create a new Url object based on cli options/args
/// - check if the new Url has already been seen/scanned -> None
/// - make a request to the new Url ? -> Some(response) : None
pub async fn request_feroxresponse_from_new_link(
url: &str,
tx_stats: UnboundedSender<StatCommand>,
) -> Option<FeroxResponse> {
log::trace!(
"enter: request_feroxresponse_from_new_link({}, {:?})",
url,
tx_stats
);
// create a url based on the given command line options, return None on error
let new_url = match format_url(
&url,
&"",
CONFIGURATION.add_slash,
&CONFIGURATION.queries,
None,
tx_stats.clone(),
) {
Ok(url) => url,
Err(_) => {
log::trace!("exit: request_feroxresponse_from_new_link -> None");
return None;
}
};
if SCANNED_URLS.get_scan_by_url(&new_url.to_string()).is_some() {
//we've seen the url before and don't need to scan again
log::trace!("exit: request_feroxresponse_from_new_link -> None");
return None;
}
// make the request and store the response
let new_response = match make_request(&CONFIGURATION.client, &new_url, tx_stats).await {
Ok(resp) => resp,
Err(_) => {
log::trace!("exit: request_feroxresponse_from_new_link -> None");
return None;
}
};
let new_ferox_response = FeroxResponse::from(new_response, true).await;
log::trace!(
"exit: request_feroxresponse_from_new_link -> {:?}",
new_ferox_response
);
Some(new_ferox_response)
}
/// helper function that simply requests /robots.txt on the given url's base url
///
/// example:
/// http://localhost/api/users -> http://localhost/robots.txt
///
/// The length of the given path has no effect on what's requested; it's always
/// base url + /robots.txt
pub async fn request_robots_txt(
base_url: &str,
config: &Configuration,
tx_stats: UnboundedSender<StatCommand>,
) -> Option<FeroxResponse> {
log::trace!(
"enter: get_robots_file({}, CONFIGURATION, {:?})",
base_url,
tx_stats
);
// more often than not, domain/robots.txt will redirect to www.domain/robots.txt or something
// similar; to account for that, create a client that will follow redirects, regardless of
// what the user specified for the scanning client. Other than redirects, it will respect
// all other user specified settings
let follow_redirects = true;
let proxy = if config.proxy.is_empty() {
None
} else {
Some(config.proxy.as_str())
};
let client = client::initialize(
config.timeout,
&config.user_agent,
follow_redirects,
config.insecure,
&config.headers,
proxy,
);
if let Ok(mut url) = Url::parse(base_url) {
url.set_path("/robots.txt"); // overwrite existing path with /robots.txt
if let Ok(response) = make_request(&client, &url, tx_stats).await {
let ferox_response = FeroxResponse::from(response, true).await;
log::trace!("exit: get_robots_file -> {}", ferox_response);
return Some(ferox_response);
}
}
None
}
/// Entry point to perform link extraction from robots.txt
///
/// `base_url` can have paths and subpaths, however robots.txt will be requested from the
/// root of the url
/// given the url:
/// http://localhost/stuff/things
/// this function requests:
/// http://localhost/robots.txt
pub async fn extract_robots_txt(
base_url: &str,
config: &Configuration,
tx_stats: UnboundedSender<StatCommand>,
) -> HashSet<String> {
log::trace!(
"enter: extract_robots_txt({}, CONFIGURATION, {:?})",
base_url,
tx_stats
);
let mut links = HashSet::new();
if let Some(response) = request_robots_txt(&base_url, &config, tx_stats.clone()).await {
for capture in ROBOTS_REGEX.captures_iter(response.text.as_str()) {
if let Some(new_path) = capture.name("url_path") {
if let Ok(mut new_url) = Url::parse(base_url) {
new_url.set_path(new_path.as_str());
add_all_sub_paths(new_url.path(), &response, &mut links);
}
}
}
}
let multiplier = CONFIGURATION.extensions.len().max(1);
update_stat!(tx_stats, UpdateUsizeField(LinksExtracted, links.len()));
update_stat!(
tx_stats,
UpdateUsizeField(TotalExpected, links.len() * multiplier)
);
log::trace!("exit: extract_robots_txt -> {:?}", links);
links
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::make_request;
use crate::FeroxChannel;
use httpmock::Method::GET;
use httpmock::MockServer;
use reqwest::Client;
use tokio::sync::mpsc;
#[test]
/// extract sub paths from the given url fragment; expect 4 sub paths and that all are
/// in the expected array
fn extractor_get_sub_paths_from_path_with_multiple_paths() {
let path = "homepage/assets/img/icons/handshake.svg";
let paths = get_sub_paths_from_path(&path);
let expected = vec![
"homepage/",
"homepage/assets/",
"homepage/assets/img/",
"homepage/assets/img/icons/",
"homepage/assets/img/icons/handshake.svg",
];
assert_eq!(paths.len(), expected.len());
for expected_path in expected {
assert_eq!(paths.contains(&expected_path.to_string()), true);
}
}
#[test]
/// extract sub paths from the given url fragment; expect 2 sub paths and that all are
/// in the expected array. the fragment is wrapped in slashes to ensure no empty strings are
/// returned
fn extractor_get_sub_paths_from_path_with_enclosing_slashes() {
let path = "/homepage/assets/";
let paths = get_sub_paths_from_path(&path);
let expected = vec!["homepage/", "homepage/assets"];
assert_eq!(paths.len(), expected.len());
for expected_path in expected {
assert_eq!(paths.contains(&expected_path.to_string()), true);
}
}
#[test]
/// extract sub paths from the given url fragment; expect 1 sub path, no forward slashes are
/// included
fn extractor_get_sub_paths_from_path_with_only_a_word() {
let path = "homepage";
let paths = get_sub_paths_from_path(&path);
let expected = vec!["homepage"];
assert_eq!(paths.len(), expected.len());
for expected_path in expected {
assert_eq!(paths.contains(&expected_path.to_string()), true);
}
}
#[test]
/// extract sub paths from the given url fragment; expect 1 sub path, forward slash removed
fn extractor_get_sub_paths_from_path_with_an_absolute_word() {
let path = "/homepage";
let paths = get_sub_paths_from_path(&path);
let expected = vec!["homepage"];
assert_eq!(paths.len(), expected.len());
for expected_path in expected {
assert_eq!(paths.contains(&expected_path.to_string()), true);
}
}
#[test]
/// test that a full url and fragment are joined correctly, then added to the given list
/// i.e. the happy path
fn extractor_add_link_to_set_of_links_happy_path() {
let url = Url::parse("https://localhost").unwrap();
let mut links = HashSet::<String>::new();
let link = "admin";
assert_eq!(links.len(), 0);
add_link_to_set_of_links(link, &url, &mut links);
assert_eq!(links.len(), 1);
assert!(links.contains("https://localhost/admin"));
}
#[test]
/// test that an invalid path fragment doesn't add anything to the set of links
fn extractor_add_link_to_set_of_links_with_non_base_url() {
let url = Url::parse("https://localhost").unwrap();
let mut links = HashSet::<String>::new();
let link = "\\\\\\\\";
assert_eq!(links.len(), 0);
add_link_to_set_of_links(link, &url, &mut links);
assert_eq!(links.len(), 0);
assert!(links.is_empty());
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
/// use make_request to generate a Response, and use the Response to test get_links;
/// the response will contain an absolute path to a domain that is not part of the scanned
/// domain; expect an empty set returned
async fn extractor_get_links_with_absolute_url_that_differs_from_target_domain(
) -> Result<(), Box<dyn std::error::Error>> {
let srv = MockServer::start();
let mock = srv.mock(|when, then|{
when.method(GET)
.path("/some-path");
then.status(200)
.body("\"http://defintely.not.a.thing.probably.com/homepage/assets/img/icons/handshake.svg\"");
});
let client = Client::new();
let url = Url::parse(&srv.url("/some-path")).unwrap(); |
let links = get_links(&ferox_response, tx).await;
assert!(links.is_empty());
assert_eq!(mock.hits(), 1);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
/// test that /robots.txt is correctly requested given a base url (happy path)
async fn request_robots_txt_with_and_without_proxy() {
let srv = MockServer::start();
let mock = srv.mock(|when, then| {
when.method(GET).path("/robots.txt");
then.status(200).body("this is a test");
});
let mut config = Configuration::default();
let (tx, _): FeroxChannel<StatCommand> = mpsc::unbounded_channel();
request_robots_txt(&srv.url("/api/users/stuff/things"), &config, tx.clone()).await;
// note: the proxy doesn't actually do anything other than hit a different code branch
// in this unit test; it would however have an effect on an integration test
config.proxy = srv.url("/ima-proxy");
request_robots_txt(&srv.url("/api/different/path"), &config, tx).await;
assert_eq!(mock.hits(), 2);
}
} | let (tx, _): FeroxChannel<StatCommand> = mpsc::unbounded_channel();
let response = make_request(&client, &url, tx.clone()).await.unwrap();
let ferox_response = FeroxResponse::from(response, true).await; | random_line_split |
extractor.rs | use crate::{
client,
config::{Configuration, CONFIGURATION},
scanner::SCANNED_URLS,
statistics::{
StatCommand::{self, UpdateUsizeField},
StatField::{LinksExtracted, TotalExpected},
},
utils::{format_url, make_request},
FeroxResponse,
};
use lazy_static::lazy_static;
use regex::Regex;
use reqwest::Url;
use std::collections::HashSet;
use tokio::sync::mpsc::UnboundedSender;
/// Regular expression used in [LinkFinder](https://github.com/GerbenJavado/LinkFinder)
///
/// Incorporates change from this [Pull Request](https://github.com/GerbenJavado/LinkFinder/pull/66/files)
const LINKFINDER_REGEX: &str = r#"(?:"|')(((?:[a-zA-Z]{1,10}://|//)[^"'/]{1,}\.[a-zA-Z]{2,}[^"']{0,})|((?:/|\.\./|\./)[^"'><,;| *()(%%$^/\\\[\]][^"'><,;|()]{1,})|([a-zA-Z0-9_\-/]{1,}/[a-zA-Z0-9_\-/]{1,}\.(?:[a-zA-Z]{1,4}|action)(?:[\?|#][^"|']{0,}|))|([a-zA-Z0-9_\-/]{1,}/[a-zA-Z0-9_\-/]{3,}(?:[\?|#][^"|']{0,}|))|([a-zA-Z0-9_\-.]{1,}\.(?:php|asp|aspx|jsp|json|action|html|js|txt|xml)(?:[\?|#][^"|']{0,}|)))(?:"|')"#;
/// Regular expression to pull url paths from robots.txt
///
/// ref: https://developers.google.com/search/reference/robots_txt
const ROBOTS_TXT_REGEX: &str =
r#"(?m)^ *(Allow|Disallow): *(?P<url_path>[a-zA-Z0-9._/?#@!&'()+,;%=-]+?)$"#; // multi-line (?m)
lazy_static! {
/// `LINKFINDER_REGEX` as a regex::Regex type
static ref LINKS_REGEX: Regex = Regex::new(LINKFINDER_REGEX).unwrap();
/// `ROBOTS_TXT_REGEX` as a regex::Regex type
static ref ROBOTS_REGEX: Regex = Regex::new(ROBOTS_TXT_REGEX).unwrap();
}
/// Iterate over a given path, return a list of every sub-path found
///
/// example: `path` contains a link fragment `homepage/assets/img/icons/handshake.svg`
/// the following fragments would be returned:
/// - homepage/assets/img/icons/handshake.svg
/// - homepage/assets/img/icons/
/// - homepage/assets/img/
/// - homepage/assets/
/// - homepage/
fn get_sub_paths_from_path(path: &str) -> Vec<String> {
log::trace!("enter: get_sub_paths_from_path({})", path);
let mut paths = vec![];
// filter out any empty strings caused by .split
let mut parts: Vec<&str> = path.split('/').filter(|s| !s.is_empty()).collect();
let length = parts.len();
for i in 0..length {
// iterate over all parts of the path
if parts.is_empty() {
// pop left us with an empty vector, we're done
break;
}
let mut possible_path = parts.join("/");
if possible_path.is_empty() {
// .join can result in an empty string, which we don't need, ignore
continue;
}
if i > 0 {
// this isn't the last index of the parts array
// ex: /buried/misc/stupidfile.php
// this block skips the file but sees all parent folders
possible_path = format!("{}/", possible_path);
}
paths.push(possible_path); // good sub-path found
parts.pop(); // use .pop() to remove the last part of the path and continue iteration
}
log::trace!("exit: get_sub_paths_from_path -> {:?}", paths);
paths
}
/// simple helper to stay DRY, trys to join a url + fragment and add it to the `links` HashSet
fn add_link_to_set_of_links(link: &str, url: &Url, links: &mut HashSet<String>) {
log::trace!(
"enter: add_link_to_set_of_links({}, {}, {:?})",
link,
url.to_string(),
links
);
match url.join(&link) {
Ok(new_url) => {
links.insert(new_url.to_string());
}
Err(e) => {
log::error!("Could not join given url to the base url: {}", e);
}
}
log::trace!("exit: add_link_to_set_of_links");
}
/// Given a `reqwest::Response`, perform the following actions
/// - parse the response's text for links using the linkfinder regex
/// - for every link found take its url path and parse each sub-path
/// - example: Response contains a link fragment `homepage/assets/img/icons/handshake.svg`
/// with a base url of http://localhost, the following urls would be returned:
/// - homepage/assets/img/icons/handshake.svg
/// - homepage/assets/img/icons/
/// - homepage/assets/img/
/// - homepage/assets/
/// - homepage/
pub async fn get_links(
response: &FeroxResponse,
tx_stats: UnboundedSender<StatCommand>,
) -> HashSet<String> {
log::trace!(
"enter: get_links({}, {:?})",
response.url().as_str(),
tx_stats
);
let mut links = HashSet::<String>::new();
let body = response.text();
for capture in LINKS_REGEX.captures_iter(&body) {
// remove single & double quotes from both ends of the capture
// capture[0] is the entire match, additional capture groups start at [1]
let link = capture[0].trim_matches(|c| c == '\'' || c == '"');
match Url::parse(link) {
Ok(absolute) => {
if absolute.domain() != response.url().domain()
|| absolute.host() != response.url().host()
{
// domains/ips are not the same, don't scan things that aren't part of the original
// target url
continue;
}
add_all_sub_paths(absolute.path(), &response, &mut links);
}
Err(e) => {
// this is the expected error that happens when we try to parse a url fragment
// ex: Url::parse("/login") -> Err("relative URL without a base")
// while this is technically an error, these are good results for us
if e.to_string().contains("relative URL without a base") {
add_all_sub_paths(link, &response, &mut links);
} else {
// unexpected error has occurred
log::error!("Could not parse given url: {}", e);
}
}
}
}
let multiplier = CONFIGURATION.extensions.len().max(1);
update_stat!(tx_stats, UpdateUsizeField(LinksExtracted, links.len()));
update_stat!(
tx_stats,
UpdateUsizeField(TotalExpected, links.len() * multiplier)
);
log::trace!("exit: get_links -> {:?}", links);
links
}
/// take a url fragment like homepage/assets/img/icons/handshake.svg and
/// incrementally add
/// - homepage/assets/img/icons/
/// - homepage/assets/img/
/// - homepage/assets/
/// - homepage/
fn add_all_sub_paths(url_path: &str, response: &FeroxResponse, mut links: &mut HashSet<String>) {
log::trace!(
"enter: add_all_sub_paths({}, {}, {:?})",
url_path,
response,
links
);
for sub_path in get_sub_paths_from_path(url_path) {
log::debug!("Adding {} to {:?}", sub_path, links);
add_link_to_set_of_links(&sub_path, &response.url(), &mut links);
}
log::trace!("exit: add_all_sub_paths");
}
/// Wrapper around link extraction logic
/// currently used in two places:
/// - links from response bodys
/// - links from robots.txt responses
///
/// general steps taken:
/// - create a new Url object based on cli options/args
/// - check if the new Url has already been seen/scanned -> None
/// - make a request to the new Url ? -> Some(response) : None
pub async fn request_feroxresponse_from_new_link(
url: &str,
tx_stats: UnboundedSender<StatCommand>,
) -> Option<FeroxResponse> {
log::trace!(
"enter: request_feroxresponse_from_new_link({}, {:?})",
url,
tx_stats
);
// create a url based on the given command line options, return None on error
let new_url = match format_url(
&url,
&"",
CONFIGURATION.add_slash,
&CONFIGURATION.queries,
None,
tx_stats.clone(),
) {
Ok(url) => url,
Err(_) => {
log::trace!("exit: request_feroxresponse_from_new_link -> None");
return None;
}
};
if SCANNED_URLS.get_scan_by_url(&new_url.to_string()).is_some() {
//we've seen the url before and don't need to scan again
log::trace!("exit: request_feroxresponse_from_new_link -> None");
return None;
}
// make the request and store the response
let new_response = match make_request(&CONFIGURATION.client, &new_url, tx_stats).await {
Ok(resp) => resp,
Err(_) => {
log::trace!("exit: request_feroxresponse_from_new_link -> None");
return None;
}
};
let new_ferox_response = FeroxResponse::from(new_response, true).await;
log::trace!(
"exit: request_feroxresponse_from_new_link -> {:?}",
new_ferox_response
);
Some(new_ferox_response)
}
/// helper function that simply requests /robots.txt on the given url's base url
///
/// example:
/// http://localhost/api/users -> http://localhost/robots.txt
///
/// The length of the given path has no effect on what's requested; it's always
/// base url + /robots.txt
pub async fn request_robots_txt(
base_url: &str,
config: &Configuration,
tx_stats: UnboundedSender<StatCommand>,
) -> Option<FeroxResponse> {
log::trace!(
"enter: get_robots_file({}, CONFIGURATION, {:?})",
base_url,
tx_stats
);
// more often than not, domain/robots.txt will redirect to www.domain/robots.txt or something
// similar; to account for that, create a client that will follow redirects, regardless of
// what the user specified for the scanning client. Other than redirects, it will respect
// all other user specified settings
let follow_redirects = true;
let proxy = if config.proxy.is_empty() {
None
} else {
Some(config.proxy.as_str())
};
let client = client::initialize(
config.timeout,
&config.user_agent,
follow_redirects,
config.insecure,
&config.headers,
proxy,
);
if let Ok(mut url) = Url::parse(base_url) {
url.set_path("/robots.txt"); // overwrite existing path with /robots.txt
if let Ok(response) = make_request(&client, &url, tx_stats).await {
let ferox_response = FeroxResponse::from(response, true).await;
log::trace!("exit: get_robots_file -> {}", ferox_response);
return Some(ferox_response);
}
}
None
}
/// Entry point to perform link extraction from robots.txt
///
/// `base_url` can have paths and subpaths, however robots.txt will be requested from the
/// root of the url
/// given the url:
/// http://localhost/stuff/things
/// this function requests:
/// http://localhost/robots.txt
pub async fn extract_robots_txt(
base_url: &str,
config: &Configuration,
tx_stats: UnboundedSender<StatCommand>,
) -> HashSet<String> {
log::trace!(
"enter: extract_robots_txt({}, CONFIGURATION, {:?})",
base_url,
tx_stats
);
let mut links = HashSet::new();
if let Some(response) = request_robots_txt(&base_url, &config, tx_stats.clone()).await {
for capture in ROBOTS_REGEX.captures_iter(response.text.as_str()) {
if let Some(new_path) = capture.name("url_path") {
if let Ok(mut new_url) = Url::parse(base_url) {
new_url.set_path(new_path.as_str());
add_all_sub_paths(new_url.path(), &response, &mut links);
}
}
}
}
let multiplier = CONFIGURATION.extensions.len().max(1);
update_stat!(tx_stats, UpdateUsizeField(LinksExtracted, links.len()));
update_stat!(
tx_stats,
UpdateUsizeField(TotalExpected, links.len() * multiplier)
);
log::trace!("exit: extract_robots_txt -> {:?}", links);
links
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::make_request;
use crate::FeroxChannel;
use httpmock::Method::GET;
use httpmock::MockServer;
use reqwest::Client;
use tokio::sync::mpsc;
#[test]
/// extract sub paths from the given url fragment; expect 4 sub paths and that all are
/// in the expected array
fn extractor_get_sub_paths_from_path_with_multiple_paths() {
let path = "homepage/assets/img/icons/handshake.svg";
let paths = get_sub_paths_from_path(&path);
let expected = vec![
"homepage/",
"homepage/assets/",
"homepage/assets/img/",
"homepage/assets/img/icons/",
"homepage/assets/img/icons/handshake.svg",
];
assert_eq!(paths.len(), expected.len());
for expected_path in expected {
assert_eq!(paths.contains(&expected_path.to_string()), true);
}
}
#[test]
/// extract sub paths from the given url fragment; expect 2 sub paths and that all are
/// in the expected array. the fragment is wrapped in slashes to ensure no empty strings are
/// returned
fn extractor_get_sub_paths_from_path_with_enclosing_slashes() {
let path = "/homepage/assets/";
let paths = get_sub_paths_from_path(&path);
let expected = vec!["homepage/", "homepage/assets"];
assert_eq!(paths.len(), expected.len());
for expected_path in expected {
assert_eq!(paths.contains(&expected_path.to_string()), true);
}
}
#[test]
/// extract sub paths from the given url fragment; expect 1 sub path, no forward slashes are
/// included
fn extractor_get_sub_paths_from_path_with_only_a_word() {
let path = "homepage";
let paths = get_sub_paths_from_path(&path);
let expected = vec!["homepage"];
assert_eq!(paths.len(), expected.len());
for expected_path in expected {
assert_eq!(paths.contains(&expected_path.to_string()), true);
}
}
#[test]
/// extract sub paths from the given url fragment; expect 1 sub path, forward slash removed
fn extractor_get_sub_paths_from_path_with_an_absolute_word() {
let path = "/homepage";
let paths = get_sub_paths_from_path(&path);
let expected = vec!["homepage"];
assert_eq!(paths.len(), expected.len());
for expected_path in expected {
assert_eq!(paths.contains(&expected_path.to_string()), true);
}
}
#[test]
/// test that a full url and fragment are joined correctly, then added to the given list
/// i.e. the happy path
fn extractor_add_link_to_set_of_links_happy_path() {
let url = Url::parse("https://localhost").unwrap();
let mut links = HashSet::<String>::new();
let link = "admin";
assert_eq!(links.len(), 0);
add_link_to_set_of_links(link, &url, &mut links);
assert_eq!(links.len(), 1);
assert!(links.contains("https://localhost/admin"));
}
#[test]
/// test that an invalid path fragment doesn't add anything to the set of links
fn extractor_add_link_to_set_of_links_with_non_base_url() {
let url = Url::parse("https://localhost").unwrap();
let mut links = HashSet::<String>::new();
let link = "\\\\\\\\";
assert_eq!(links.len(), 0);
add_link_to_set_of_links(link, &url, &mut links);
assert_eq!(links.len(), 0);
assert!(links.is_empty());
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
/// use make_request to generate a Response, and use the Response to test get_links;
/// the response will contain an absolute path to a domain that is not part of the scanned
/// domain; expect an empty set returned
async fn extractor_get_links_with_absolute_url_that_differs_from_target_domain(
) -> Result<(), Box<dyn std::error::Error>> {
let srv = MockServer::start();
let mock = srv.mock(|when, then|{
when.method(GET)
.path("/some-path");
then.status(200)
.body("\"http://defintely.not.a.thing.probably.com/homepage/assets/img/icons/handshake.svg\"");
});
let client = Client::new();
let url = Url::parse(&srv.url("/some-path")).unwrap();
let (tx, _): FeroxChannel<StatCommand> = mpsc::unbounded_channel();
let response = make_request(&client, &url, tx.clone()).await.unwrap();
let ferox_response = FeroxResponse::from(response, true).await;
let links = get_links(&ferox_response, tx).await;
assert!(links.is_empty());
assert_eq!(mock.hits(), 1);
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
/// test that /robots.txt is correctly requested given a base url (happy path)
async fn request_robots_txt_with_and_without_proxy() |
}
| {
let srv = MockServer::start();
let mock = srv.mock(|when, then| {
when.method(GET).path("/robots.txt");
then.status(200).body("this is a test");
});
let mut config = Configuration::default();
let (tx, _): FeroxChannel<StatCommand> = mpsc::unbounded_channel();
request_robots_txt(&srv.url("/api/users/stuff/things"), &config, tx.clone()).await;
// note: the proxy doesn't actually do anything other than hit a different code branch
// in this unit test; it would however have an effect on an integration test
config.proxy = srv.url("/ima-proxy");
request_robots_txt(&srv.url("/api/different/path"), &config, tx).await;
assert_eq!(mock.hits(), 2);
} | identifier_body |
keyboard.rs | use crate::input::device::{Device, DeviceType};
use crate::input::event_filter::{EventFilter, EventFilterManager};
use crate::input::events::{InputEvent, KeyboardEvent};
use crate::{config::ConfigManager, input::seat::SeatManager};
use log::debug;
use serde::{Deserialize, Serialize};
use std::cell::RefCell;
use std::ops::Deref;
use std::pin::Pin;
use std::rc::{Rc, Weak};
use wlroots_sys::*;
use xkbcommon::xkb;
#[cfg(not(test))]
use xkbcommon::xkb::ffi::xkb_state_ref;
#[derive(Debug, Eq, PartialEq, Clone, Serialize, Deserialize)]
pub struct RepeatRate(u32);
impl Default for RepeatRate {
fn default() -> Self {
RepeatRate(33)
}
}
#[derive(Debug, Eq, PartialEq, Clone, Serialize, Deserialize)]
pub struct RepeatDelay(u32);
impl Default for RepeatDelay {
fn default() -> Self {
RepeatDelay(500)
}
}
#[derive(Default, Debug, Eq, PartialEq, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct KeyboardConfig {
pub xkb_rules: String,
pub xkb_model: String,
pub xkb_layout: String,
pub xkb_variant: String,
pub xkb_options: Option<String>,
pub repeat_rate: RepeatRate,
pub repeat_delay: RepeatDelay,
}
pub struct Keyboard {
seat_manager: Rc<SeatManager>,
event_filter_manager: Rc<EventFilterManager>,
device: Rc<Device>,
keyboard: *mut wlr_keyboard,
xkb_state: RefCell<xkb::State>,
event_manager: RefCell<Option<Pin<Box<KeyboardEventManager>>>>,
}
impl Keyboard {
fn init(
config_manager: Rc<ConfigManager>,
seat_manager: Rc<SeatManager>,
event_filter_manager: Rc<EventFilterManager>,
device: Rc<Device>,
) -> Rc<Keyboard> {
debug!("Keyboard::init: {}", device.name());
let keyboard_ptr = match device.device_type() {
DeviceType::Keyboard(keyboard_ptr) => keyboard_ptr,
_ => panic!("Keyboard::init expects a keyboard device"),
};
let config = &config_manager.config().keyboard;
set_keymap_from_config(keyboard_ptr, config);
let keyboard = Rc::new(Keyboard {
seat_manager,
event_filter_manager,
device: device.clone(),
keyboard: keyboard_ptr,
xkb_state: RefCell::new(unsafe {
xkb::State::from_raw_ptr(xkb_state_ref((*keyboard_ptr).xkb_state))
}),
event_manager: RefCell::new(None),
});
let subscription =
config_manager
.on_config_changed()
.subscribe(listener!(keyboard => move |config| {
set_keymap_from_config(keyboard.raw_ptr(), &config.keyboard);
*keyboard.xkb_state.borrow_mut() = unsafe {
xkb::State::from_raw_ptr(xkb_state_ref((*keyboard_ptr).xkb_state))
};
}));
device.on_destroy.then(listener!(config_manager => move || {
config_manager.on_config_changed().unsubscribe(subscription);
}));
let mut event_manager = KeyboardEventManager::new(Rc::downgrade(&keyboard));
unsafe {
event_manager.modifiers(&mut (*keyboard_ptr).events.modifiers);
event_manager.key(&mut (*keyboard_ptr).events.key);
}
*keyboard.event_manager.borrow_mut() = Some(event_manager);
keyboard
}
pub fn raw_ptr(&self) -> *mut wlr_keyboard {
self.keyboard
}
pub fn device(&self) -> Rc<Device> {
self.device.clone()
}
pub fn xkb_state(&self) -> xkb::State {
self.xkb_state.borrow().clone()
}
}
fn set_keymap_from_config(keyboard_ptr: *mut wlr_keyboard, config: &KeyboardConfig) {
// We need to prepare an XKB keymap and assign it to the keyboard.
let context = xkb::Context::new(xkb::CONTEXT_NO_FLAGS);
let keymap = xkb::Keymap::new_from_names(
&context,
&config.xkb_rules,
&config.xkb_model,
&config.xkb_layout,
&config.xkb_variant,
config.xkb_options.clone(),
xkb::KEYMAP_COMPILE_NO_FLAGS,
)
.expect("xkb::Keymap could not be created");
unsafe {
wlr_keyboard_set_keymap(keyboard_ptr, keymap.get_raw_ptr());
wlr_keyboard_set_repeat_info(
keyboard_ptr,
config.repeat_rate.0 as i32,
config.repeat_delay.0 as i32,
);
}
}
pub(crate) trait KeyboardEventHandler {
fn modifiers(&self);
fn key(&self, event: *const wlr_event_keyboard_key);
}
impl KeyboardEventHandler for Keyboard {
fn modifiers(&self) {
unsafe {
// A seat can only have one keyboard, but this is a limitation of the
// Wayland protocol - not wlroots. We assign all connected keyboards to the
// same seat. You can swap out the underlying wlr_keyboard like this and
// wlr_seat handles this transparently.
wlr_seat_set_keyboard(self.seat_manager.raw_seat(), self.device.raw_ptr());
// Send modifiers to the client.
wlr_seat_keyboard_notify_modifiers(
self.seat_manager.raw_seat(),
&mut (*self.keyboard).modifiers,
);
}
}
fn key(&self, event: *const wlr_event_keyboard_key) {
let event = unsafe { KeyboardEvent::from_ptr(self, event) };
let handled = self.event_filter_manager.handle_keyboard_event(&event);
if !handled |
}
}
wayland_listener!(
KeyboardEventManager,
Weak<Keyboard>,
[
modifiers => modifiers_func: |this: &mut KeyboardEventManager, _data: *mut libc::c_void,| unsafe {
if let Some(handler) = this.data.upgrade() {
handler.modifiers();
}
};
key => key_func: |this: &mut KeyboardEventManager, data: *mut libc::c_void,| unsafe {
if let Some(handler) = this.data.upgrade() {
handler.key(data as _);
}
};
]
);
pub struct KeyboardManager {
config_manager: Rc<ConfigManager>,
seat_manager: Rc<SeatManager>,
event_filter_manager: Rc<EventFilterManager>,
keyboards: RefCell<Vec<Rc<Keyboard>>>,
}
impl KeyboardManager {
pub(crate) fn init(
config_manager: Rc<ConfigManager>,
seat_manager: Rc<SeatManager>,
event_filter_manager: Rc<EventFilterManager>,
) -> Rc<KeyboardManager> {
let keyboard_manager = Rc::new(KeyboardManager {
config_manager,
seat_manager: seat_manager.clone(),
event_filter_manager,
keyboards: RefCell::new(vec![]),
});
seat_manager
.on_new_device
.subscribe(listener!(keyboard_manager => move |device| {
if let DeviceType::Keyboard(_) = device.device_type() {
device.on_destroy.then(listener!(device, keyboard_manager => move || {
keyboard_manager
.keyboards
.borrow_mut()
.retain(|keyboard| keyboard.device.deref() != device.deref());
keyboard_manager
.seat_manager
.set_has_any_keyboard(keyboard_manager.has_keyboard());
}));
unsafe {
wlr_seat_set_keyboard(keyboard_manager.seat_manager.raw_seat(), device.raw_ptr());
}
let keyboard = Keyboard::init(
keyboard_manager.config_manager.clone(),
keyboard_manager.seat_manager.clone(),
keyboard_manager.event_filter_manager.clone(),
device.clone(),
);
keyboard_manager.keyboards.borrow_mut().push(keyboard);
keyboard_manager.seat_manager.set_has_any_keyboard(true);
}
}));
keyboard_manager
}
pub fn has_keyboard(&self) -> bool {
!self.keyboards.borrow().is_empty()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_util::*;
use std::ptr;
use std::rc::Rc;
#[test]
fn it_drops_and_cleans_up_on_destroy() {
let config_manager = Rc::new(ConfigManager::default());
let seat_manager = SeatManager::mock(ptr::null_mut(), ptr::null_mut());
let event_filter_manager = Rc::new(EventFilterManager::new());
let keyboard_manager = Rc::new(KeyboardManager::init(
config_manager,
seat_manager.clone(),
event_filter_manager,
));
let mut raw_keyboard = wlr_keyboard {
impl_: ptr::null(),
group: ptr::null_mut(),
keymap_string: ptr::null_mut(),
keymap_size: 0,
keymap: ptr::null_mut(),
xkb_state: ptr::null_mut(),
led_indexes: [0; 3],
mod_indexes: [0; 8],
keycodes: [0; 32],
num_keycodes: 0,
modifiers: wlr_keyboard_modifiers {
depressed: 0,
latched: 0,
locked: 0,
group: 0,
},
repeat_info: wlr_keyboard__bindgen_ty_1 { rate: 0, delay: 0 },
events: wlr_keyboard__bindgen_ty_2 {
key: new_wl_signal(),
modifiers: new_wl_signal(),
keymap: new_wl_signal(),
repeat_info: new_wl_signal(),
destroy: new_wl_signal(),
},
data: ptr::null_mut(),
};
let mut device = wlr_input_device {
impl_: ptr::null(),
type_: wlr_input_device_type_WLR_INPUT_DEVICE_KEYBOARD,
vendor: 0,
product: 0,
name: ptr::null_mut(),
width_mm: 0.0,
height_mm: 0.0,
output_name: ptr::null_mut(),
__bindgen_anon_1: wlr_input_device__bindgen_ty_1 {
keyboard: &mut raw_keyboard,
},
events: wlr_input_device__bindgen_ty_2 {
destroy: new_wl_signal(),
},
data: ptr::null_mut(),
link: new_wl_list(),
};
let key_signal = WlSignal::from_ptr(&mut raw_keyboard.events.key);
let modifiers_signal = WlSignal::from_ptr(&mut raw_keyboard.events.modifiers);
let keymap_signal = WlSignal::from_ptr(&mut raw_keyboard.events.keymap);
let repeat_info_signal = WlSignal::from_ptr(&mut raw_keyboard.events.repeat_info);
let destroy_signal = WlSignal::from_ptr(&mut device.events.destroy);
let device = Device::init(&mut device);
let weak_device = Rc::downgrade(&device);
seat_manager.on_new_device.fire(device);
let keyboard = keyboard_manager.keyboards.borrow().first().unwrap().clone();
let weak_keyboard = Rc::downgrade(&keyboard);
drop(keyboard);
assert!(weak_device.upgrade().is_some());
assert!(weak_keyboard.upgrade().is_some());
assert!(key_signal.listener_count() == 1);
assert!(modifiers_signal.listener_count() == 1);
assert!(destroy_signal.listener_count() == 1);
assert!(keyboard_manager.has_keyboard());
destroy_signal.emit();
assert!(key_signal.listener_count() == 0);
assert!(modifiers_signal.listener_count() == 0);
assert!(keymap_signal.listener_count() == 0);
assert!(repeat_info_signal.listener_count() == 0);
assert!(destroy_signal.listener_count() == 0);
assert!(!keyboard_manager.has_keyboard());
assert!(weak_keyboard.upgrade().is_none());
assert!(weak_device.upgrade().is_none());
}
}
#[cfg(test)]
use xkbcommon::xkb::ffi::{xkb_keymap, xkb_state};
#[cfg(test)]
unsafe fn wlr_seat_set_keyboard(_: *mut wlr_seat, _: *mut wlr_input_device) {}
#[cfg(test)]
unsafe fn wlr_keyboard_set_keymap(_: *mut wlr_keyboard, _: *mut xkb_keymap) {}
#[cfg(test)]
unsafe fn wlr_keyboard_set_repeat_info(_: *mut wlr_keyboard, _: i32, _: i32) {}
#[cfg(test)]
unsafe fn xkb_state_ref(ptr: *mut xkb_state) -> *mut xkb_state {
ptr
}
| {
unsafe {
// Otherwise, we pass it along to the client.
wlr_seat_set_keyboard(self.seat_manager.raw_seat(), self.device.raw_ptr());
wlr_seat_keyboard_notify_key(
self.seat_manager.raw_seat(),
event.time_msec(),
event.libinput_keycode(),
event.raw_state(),
);
}
} | conditional_block |
keyboard.rs | use crate::input::device::{Device, DeviceType};
use crate::input::event_filter::{EventFilter, EventFilterManager};
use crate::input::events::{InputEvent, KeyboardEvent};
use crate::{config::ConfigManager, input::seat::SeatManager};
use log::debug;
use serde::{Deserialize, Serialize};
use std::cell::RefCell;
use std::ops::Deref;
use std::pin::Pin;
use std::rc::{Rc, Weak};
use wlroots_sys::*;
use xkbcommon::xkb;
#[cfg(not(test))]
use xkbcommon::xkb::ffi::xkb_state_ref;
#[derive(Debug, Eq, PartialEq, Clone, Serialize, Deserialize)]
pub struct RepeatRate(u32);
impl Default for RepeatRate {
fn default() -> Self {
RepeatRate(33)
}
}
#[derive(Debug, Eq, PartialEq, Clone, Serialize, Deserialize)]
pub struct RepeatDelay(u32);
impl Default for RepeatDelay {
fn default() -> Self {
RepeatDelay(500)
}
}
#[derive(Default, Debug, Eq, PartialEq, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct KeyboardConfig {
pub xkb_rules: String,
pub xkb_model: String,
pub xkb_layout: String,
pub xkb_variant: String,
pub xkb_options: Option<String>,
pub repeat_rate: RepeatRate,
pub repeat_delay: RepeatDelay,
}
pub struct | {
seat_manager: Rc<SeatManager>,
event_filter_manager: Rc<EventFilterManager>,
device: Rc<Device>,
keyboard: *mut wlr_keyboard,
xkb_state: RefCell<xkb::State>,
event_manager: RefCell<Option<Pin<Box<KeyboardEventManager>>>>,
}
impl Keyboard {
fn init(
config_manager: Rc<ConfigManager>,
seat_manager: Rc<SeatManager>,
event_filter_manager: Rc<EventFilterManager>,
device: Rc<Device>,
) -> Rc<Keyboard> {
debug!("Keyboard::init: {}", device.name());
let keyboard_ptr = match device.device_type() {
DeviceType::Keyboard(keyboard_ptr) => keyboard_ptr,
_ => panic!("Keyboard::init expects a keyboard device"),
};
let config = &config_manager.config().keyboard;
set_keymap_from_config(keyboard_ptr, config);
let keyboard = Rc::new(Keyboard {
seat_manager,
event_filter_manager,
device: device.clone(),
keyboard: keyboard_ptr,
xkb_state: RefCell::new(unsafe {
xkb::State::from_raw_ptr(xkb_state_ref((*keyboard_ptr).xkb_state))
}),
event_manager: RefCell::new(None),
});
let subscription =
config_manager
.on_config_changed()
.subscribe(listener!(keyboard => move |config| {
set_keymap_from_config(keyboard.raw_ptr(), &config.keyboard);
*keyboard.xkb_state.borrow_mut() = unsafe {
xkb::State::from_raw_ptr(xkb_state_ref((*keyboard_ptr).xkb_state))
};
}));
device.on_destroy.then(listener!(config_manager => move || {
config_manager.on_config_changed().unsubscribe(subscription);
}));
let mut event_manager = KeyboardEventManager::new(Rc::downgrade(&keyboard));
unsafe {
event_manager.modifiers(&mut (*keyboard_ptr).events.modifiers);
event_manager.key(&mut (*keyboard_ptr).events.key);
}
*keyboard.event_manager.borrow_mut() = Some(event_manager);
keyboard
}
pub fn raw_ptr(&self) -> *mut wlr_keyboard {
self.keyboard
}
pub fn device(&self) -> Rc<Device> {
self.device.clone()
}
pub fn xkb_state(&self) -> xkb::State {
self.xkb_state.borrow().clone()
}
}
fn set_keymap_from_config(keyboard_ptr: *mut wlr_keyboard, config: &KeyboardConfig) {
// We need to prepare an XKB keymap and assign it to the keyboard.
let context = xkb::Context::new(xkb::CONTEXT_NO_FLAGS);
let keymap = xkb::Keymap::new_from_names(
&context,
&config.xkb_rules,
&config.xkb_model,
&config.xkb_layout,
&config.xkb_variant,
config.xkb_options.clone(),
xkb::KEYMAP_COMPILE_NO_FLAGS,
)
.expect("xkb::Keymap could not be created");
unsafe {
wlr_keyboard_set_keymap(keyboard_ptr, keymap.get_raw_ptr());
wlr_keyboard_set_repeat_info(
keyboard_ptr,
config.repeat_rate.0 as i32,
config.repeat_delay.0 as i32,
);
}
}
pub(crate) trait KeyboardEventHandler {
fn modifiers(&self);
fn key(&self, event: *const wlr_event_keyboard_key);
}
impl KeyboardEventHandler for Keyboard {
fn modifiers(&self) {
unsafe {
// A seat can only have one keyboard, but this is a limitation of the
// Wayland protocol - not wlroots. We assign all connected keyboards to the
// same seat. You can swap out the underlying wlr_keyboard like this and
// wlr_seat handles this transparently.
wlr_seat_set_keyboard(self.seat_manager.raw_seat(), self.device.raw_ptr());
// Send modifiers to the client.
wlr_seat_keyboard_notify_modifiers(
self.seat_manager.raw_seat(),
&mut (*self.keyboard).modifiers,
);
}
}
fn key(&self, event: *const wlr_event_keyboard_key) {
let event = unsafe { KeyboardEvent::from_ptr(self, event) };
let handled = self.event_filter_manager.handle_keyboard_event(&event);
if !handled {
unsafe {
// Otherwise, we pass it along to the client.
wlr_seat_set_keyboard(self.seat_manager.raw_seat(), self.device.raw_ptr());
wlr_seat_keyboard_notify_key(
self.seat_manager.raw_seat(),
event.time_msec(),
event.libinput_keycode(),
event.raw_state(),
);
}
}
}
}
wayland_listener!(
KeyboardEventManager,
Weak<Keyboard>,
[
modifiers => modifiers_func: |this: &mut KeyboardEventManager, _data: *mut libc::c_void,| unsafe {
if let Some(handler) = this.data.upgrade() {
handler.modifiers();
}
};
key => key_func: |this: &mut KeyboardEventManager, data: *mut libc::c_void,| unsafe {
if let Some(handler) = this.data.upgrade() {
handler.key(data as _);
}
};
]
);
pub struct KeyboardManager {
config_manager: Rc<ConfigManager>,
seat_manager: Rc<SeatManager>,
event_filter_manager: Rc<EventFilterManager>,
keyboards: RefCell<Vec<Rc<Keyboard>>>,
}
impl KeyboardManager {
pub(crate) fn init(
config_manager: Rc<ConfigManager>,
seat_manager: Rc<SeatManager>,
event_filter_manager: Rc<EventFilterManager>,
) -> Rc<KeyboardManager> {
let keyboard_manager = Rc::new(KeyboardManager {
config_manager,
seat_manager: seat_manager.clone(),
event_filter_manager,
keyboards: RefCell::new(vec![]),
});
seat_manager
.on_new_device
.subscribe(listener!(keyboard_manager => move |device| {
if let DeviceType::Keyboard(_) = device.device_type() {
device.on_destroy.then(listener!(device, keyboard_manager => move || {
keyboard_manager
.keyboards
.borrow_mut()
.retain(|keyboard| keyboard.device.deref() != device.deref());
keyboard_manager
.seat_manager
.set_has_any_keyboard(keyboard_manager.has_keyboard());
}));
unsafe {
wlr_seat_set_keyboard(keyboard_manager.seat_manager.raw_seat(), device.raw_ptr());
}
let keyboard = Keyboard::init(
keyboard_manager.config_manager.clone(),
keyboard_manager.seat_manager.clone(),
keyboard_manager.event_filter_manager.clone(),
device.clone(),
);
keyboard_manager.keyboards.borrow_mut().push(keyboard);
keyboard_manager.seat_manager.set_has_any_keyboard(true);
}
}));
keyboard_manager
}
pub fn has_keyboard(&self) -> bool {
!self.keyboards.borrow().is_empty()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_util::*;
use std::ptr;
use std::rc::Rc;
#[test]
fn it_drops_and_cleans_up_on_destroy() {
let config_manager = Rc::new(ConfigManager::default());
let seat_manager = SeatManager::mock(ptr::null_mut(), ptr::null_mut());
let event_filter_manager = Rc::new(EventFilterManager::new());
let keyboard_manager = Rc::new(KeyboardManager::init(
config_manager,
seat_manager.clone(),
event_filter_manager,
));
let mut raw_keyboard = wlr_keyboard {
impl_: ptr::null(),
group: ptr::null_mut(),
keymap_string: ptr::null_mut(),
keymap_size: 0,
keymap: ptr::null_mut(),
xkb_state: ptr::null_mut(),
led_indexes: [0; 3],
mod_indexes: [0; 8],
keycodes: [0; 32],
num_keycodes: 0,
modifiers: wlr_keyboard_modifiers {
depressed: 0,
latched: 0,
locked: 0,
group: 0,
},
repeat_info: wlr_keyboard__bindgen_ty_1 { rate: 0, delay: 0 },
events: wlr_keyboard__bindgen_ty_2 {
key: new_wl_signal(),
modifiers: new_wl_signal(),
keymap: new_wl_signal(),
repeat_info: new_wl_signal(),
destroy: new_wl_signal(),
},
data: ptr::null_mut(),
};
let mut device = wlr_input_device {
impl_: ptr::null(),
type_: wlr_input_device_type_WLR_INPUT_DEVICE_KEYBOARD,
vendor: 0,
product: 0,
name: ptr::null_mut(),
width_mm: 0.0,
height_mm: 0.0,
output_name: ptr::null_mut(),
__bindgen_anon_1: wlr_input_device__bindgen_ty_1 {
keyboard: &mut raw_keyboard,
},
events: wlr_input_device__bindgen_ty_2 {
destroy: new_wl_signal(),
},
data: ptr::null_mut(),
link: new_wl_list(),
};
let key_signal = WlSignal::from_ptr(&mut raw_keyboard.events.key);
let modifiers_signal = WlSignal::from_ptr(&mut raw_keyboard.events.modifiers);
let keymap_signal = WlSignal::from_ptr(&mut raw_keyboard.events.keymap);
let repeat_info_signal = WlSignal::from_ptr(&mut raw_keyboard.events.repeat_info);
let destroy_signal = WlSignal::from_ptr(&mut device.events.destroy);
let device = Device::init(&mut device);
let weak_device = Rc::downgrade(&device);
seat_manager.on_new_device.fire(device);
let keyboard = keyboard_manager.keyboards.borrow().first().unwrap().clone();
let weak_keyboard = Rc::downgrade(&keyboard);
drop(keyboard);
assert!(weak_device.upgrade().is_some());
assert!(weak_keyboard.upgrade().is_some());
assert!(key_signal.listener_count() == 1);
assert!(modifiers_signal.listener_count() == 1);
assert!(destroy_signal.listener_count() == 1);
assert!(keyboard_manager.has_keyboard());
destroy_signal.emit();
assert!(key_signal.listener_count() == 0);
assert!(modifiers_signal.listener_count() == 0);
assert!(keymap_signal.listener_count() == 0);
assert!(repeat_info_signal.listener_count() == 0);
assert!(destroy_signal.listener_count() == 0);
assert!(!keyboard_manager.has_keyboard());
assert!(weak_keyboard.upgrade().is_none());
assert!(weak_device.upgrade().is_none());
}
}
#[cfg(test)]
use xkbcommon::xkb::ffi::{xkb_keymap, xkb_state};
#[cfg(test)]
unsafe fn wlr_seat_set_keyboard(_: *mut wlr_seat, _: *mut wlr_input_device) {}
#[cfg(test)]
unsafe fn wlr_keyboard_set_keymap(_: *mut wlr_keyboard, _: *mut xkb_keymap) {}
#[cfg(test)]
unsafe fn wlr_keyboard_set_repeat_info(_: *mut wlr_keyboard, _: i32, _: i32) {}
#[cfg(test)]
unsafe fn xkb_state_ref(ptr: *mut xkb_state) -> *mut xkb_state {
ptr
}
| Keyboard | identifier_name |
keyboard.rs | use crate::input::device::{Device, DeviceType};
use crate::input::event_filter::{EventFilter, EventFilterManager};
use crate::input::events::{InputEvent, KeyboardEvent};
use crate::{config::ConfigManager, input::seat::SeatManager};
use log::debug;
use serde::{Deserialize, Serialize};
use std::cell::RefCell;
use std::ops::Deref;
use std::pin::Pin;
use std::rc::{Rc, Weak};
use wlroots_sys::*;
use xkbcommon::xkb;
#[cfg(not(test))]
use xkbcommon::xkb::ffi::xkb_state_ref;
#[derive(Debug, Eq, PartialEq, Clone, Serialize, Deserialize)]
pub struct RepeatRate(u32);
impl Default for RepeatRate {
fn default() -> Self {
RepeatRate(33)
}
}
#[derive(Debug, Eq, PartialEq, Clone, Serialize, Deserialize)]
pub struct RepeatDelay(u32);
impl Default for RepeatDelay {
fn default() -> Self {
RepeatDelay(500)
}
}
#[derive(Default, Debug, Eq, PartialEq, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct KeyboardConfig {
pub xkb_rules: String,
pub xkb_model: String,
pub xkb_layout: String,
pub xkb_variant: String,
pub xkb_options: Option<String>,
pub repeat_rate: RepeatRate,
pub repeat_delay: RepeatDelay,
}
pub struct Keyboard {
seat_manager: Rc<SeatManager>,
event_filter_manager: Rc<EventFilterManager>,
device: Rc<Device>,
keyboard: *mut wlr_keyboard,
xkb_state: RefCell<xkb::State>,
event_manager: RefCell<Option<Pin<Box<KeyboardEventManager>>>>,
}
impl Keyboard {
fn init(
config_manager: Rc<ConfigManager>,
seat_manager: Rc<SeatManager>,
event_filter_manager: Rc<EventFilterManager>,
device: Rc<Device>,
) -> Rc<Keyboard> {
debug!("Keyboard::init: {}", device.name());
let keyboard_ptr = match device.device_type() {
DeviceType::Keyboard(keyboard_ptr) => keyboard_ptr,
_ => panic!("Keyboard::init expects a keyboard device"),
};
let config = &config_manager.config().keyboard;
set_keymap_from_config(keyboard_ptr, config);
let keyboard = Rc::new(Keyboard {
seat_manager,
event_filter_manager,
device: device.clone(),
keyboard: keyboard_ptr,
xkb_state: RefCell::new(unsafe {
xkb::State::from_raw_ptr(xkb_state_ref((*keyboard_ptr).xkb_state))
}),
event_manager: RefCell::new(None),
});
let subscription =
config_manager
.on_config_changed()
.subscribe(listener!(keyboard => move |config| {
set_keymap_from_config(keyboard.raw_ptr(), &config.keyboard);
*keyboard.xkb_state.borrow_mut() = unsafe {
xkb::State::from_raw_ptr(xkb_state_ref((*keyboard_ptr).xkb_state))
};
}));
device.on_destroy.then(listener!(config_manager => move || {
config_manager.on_config_changed().unsubscribe(subscription);
}));
let mut event_manager = KeyboardEventManager::new(Rc::downgrade(&keyboard));
unsafe {
event_manager.modifiers(&mut (*keyboard_ptr).events.modifiers);
event_manager.key(&mut (*keyboard_ptr).events.key);
}
*keyboard.event_manager.borrow_mut() = Some(event_manager);
keyboard
}
pub fn raw_ptr(&self) -> *mut wlr_keyboard {
self.keyboard
}
pub fn device(&self) -> Rc<Device> {
self.device.clone()
}
pub fn xkb_state(&self) -> xkb::State {
self.xkb_state.borrow().clone()
}
}
fn set_keymap_from_config(keyboard_ptr: *mut wlr_keyboard, config: &KeyboardConfig) {
// We need to prepare an XKB keymap and assign it to the keyboard.
let context = xkb::Context::new(xkb::CONTEXT_NO_FLAGS);
let keymap = xkb::Keymap::new_from_names(
&context,
&config.xkb_rules,
&config.xkb_model,
&config.xkb_layout,
&config.xkb_variant,
config.xkb_options.clone(),
xkb::KEYMAP_COMPILE_NO_FLAGS,
)
.expect("xkb::Keymap could not be created");
unsafe {
wlr_keyboard_set_keymap(keyboard_ptr, keymap.get_raw_ptr());
wlr_keyboard_set_repeat_info(
keyboard_ptr,
config.repeat_rate.0 as i32, | }
}
pub(crate) trait KeyboardEventHandler {
fn modifiers(&self);
fn key(&self, event: *const wlr_event_keyboard_key);
}
impl KeyboardEventHandler for Keyboard {
fn modifiers(&self) {
unsafe {
// A seat can only have one keyboard, but this is a limitation of the
// Wayland protocol - not wlroots. We assign all connected keyboards to the
// same seat. You can swap out the underlying wlr_keyboard like this and
// wlr_seat handles this transparently.
wlr_seat_set_keyboard(self.seat_manager.raw_seat(), self.device.raw_ptr());
// Send modifiers to the client.
wlr_seat_keyboard_notify_modifiers(
self.seat_manager.raw_seat(),
&mut (*self.keyboard).modifiers,
);
}
}
fn key(&self, event: *const wlr_event_keyboard_key) {
let event = unsafe { KeyboardEvent::from_ptr(self, event) };
let handled = self.event_filter_manager.handle_keyboard_event(&event);
if !handled {
unsafe {
// Otherwise, we pass it along to the client.
wlr_seat_set_keyboard(self.seat_manager.raw_seat(), self.device.raw_ptr());
wlr_seat_keyboard_notify_key(
self.seat_manager.raw_seat(),
event.time_msec(),
event.libinput_keycode(),
event.raw_state(),
);
}
}
}
}
wayland_listener!(
KeyboardEventManager,
Weak<Keyboard>,
[
modifiers => modifiers_func: |this: &mut KeyboardEventManager, _data: *mut libc::c_void,| unsafe {
if let Some(handler) = this.data.upgrade() {
handler.modifiers();
}
};
key => key_func: |this: &mut KeyboardEventManager, data: *mut libc::c_void,| unsafe {
if let Some(handler) = this.data.upgrade() {
handler.key(data as _);
}
};
]
);
pub struct KeyboardManager {
config_manager: Rc<ConfigManager>,
seat_manager: Rc<SeatManager>,
event_filter_manager: Rc<EventFilterManager>,
keyboards: RefCell<Vec<Rc<Keyboard>>>,
}
impl KeyboardManager {
pub(crate) fn init(
config_manager: Rc<ConfigManager>,
seat_manager: Rc<SeatManager>,
event_filter_manager: Rc<EventFilterManager>,
) -> Rc<KeyboardManager> {
let keyboard_manager = Rc::new(KeyboardManager {
config_manager,
seat_manager: seat_manager.clone(),
event_filter_manager,
keyboards: RefCell::new(vec![]),
});
seat_manager
.on_new_device
.subscribe(listener!(keyboard_manager => move |device| {
if let DeviceType::Keyboard(_) = device.device_type() {
device.on_destroy.then(listener!(device, keyboard_manager => move || {
keyboard_manager
.keyboards
.borrow_mut()
.retain(|keyboard| keyboard.device.deref() != device.deref());
keyboard_manager
.seat_manager
.set_has_any_keyboard(keyboard_manager.has_keyboard());
}));
unsafe {
wlr_seat_set_keyboard(keyboard_manager.seat_manager.raw_seat(), device.raw_ptr());
}
let keyboard = Keyboard::init(
keyboard_manager.config_manager.clone(),
keyboard_manager.seat_manager.clone(),
keyboard_manager.event_filter_manager.clone(),
device.clone(),
);
keyboard_manager.keyboards.borrow_mut().push(keyboard);
keyboard_manager.seat_manager.set_has_any_keyboard(true);
}
}));
keyboard_manager
}
pub fn has_keyboard(&self) -> bool {
!self.keyboards.borrow().is_empty()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_util::*;
use std::ptr;
use std::rc::Rc;
#[test]
fn it_drops_and_cleans_up_on_destroy() {
let config_manager = Rc::new(ConfigManager::default());
let seat_manager = SeatManager::mock(ptr::null_mut(), ptr::null_mut());
let event_filter_manager = Rc::new(EventFilterManager::new());
let keyboard_manager = Rc::new(KeyboardManager::init(
config_manager,
seat_manager.clone(),
event_filter_manager,
));
let mut raw_keyboard = wlr_keyboard {
impl_: ptr::null(),
group: ptr::null_mut(),
keymap_string: ptr::null_mut(),
keymap_size: 0,
keymap: ptr::null_mut(),
xkb_state: ptr::null_mut(),
led_indexes: [0; 3],
mod_indexes: [0; 8],
keycodes: [0; 32],
num_keycodes: 0,
modifiers: wlr_keyboard_modifiers {
depressed: 0,
latched: 0,
locked: 0,
group: 0,
},
repeat_info: wlr_keyboard__bindgen_ty_1 { rate: 0, delay: 0 },
events: wlr_keyboard__bindgen_ty_2 {
key: new_wl_signal(),
modifiers: new_wl_signal(),
keymap: new_wl_signal(),
repeat_info: new_wl_signal(),
destroy: new_wl_signal(),
},
data: ptr::null_mut(),
};
let mut device = wlr_input_device {
impl_: ptr::null(),
type_: wlr_input_device_type_WLR_INPUT_DEVICE_KEYBOARD,
vendor: 0,
product: 0,
name: ptr::null_mut(),
width_mm: 0.0,
height_mm: 0.0,
output_name: ptr::null_mut(),
__bindgen_anon_1: wlr_input_device__bindgen_ty_1 {
keyboard: &mut raw_keyboard,
},
events: wlr_input_device__bindgen_ty_2 {
destroy: new_wl_signal(),
},
data: ptr::null_mut(),
link: new_wl_list(),
};
let key_signal = WlSignal::from_ptr(&mut raw_keyboard.events.key);
let modifiers_signal = WlSignal::from_ptr(&mut raw_keyboard.events.modifiers);
let keymap_signal = WlSignal::from_ptr(&mut raw_keyboard.events.keymap);
let repeat_info_signal = WlSignal::from_ptr(&mut raw_keyboard.events.repeat_info);
let destroy_signal = WlSignal::from_ptr(&mut device.events.destroy);
let device = Device::init(&mut device);
let weak_device = Rc::downgrade(&device);
seat_manager.on_new_device.fire(device);
let keyboard = keyboard_manager.keyboards.borrow().first().unwrap().clone();
let weak_keyboard = Rc::downgrade(&keyboard);
drop(keyboard);
assert!(weak_device.upgrade().is_some());
assert!(weak_keyboard.upgrade().is_some());
assert!(key_signal.listener_count() == 1);
assert!(modifiers_signal.listener_count() == 1);
assert!(destroy_signal.listener_count() == 1);
assert!(keyboard_manager.has_keyboard());
destroy_signal.emit();
assert!(key_signal.listener_count() == 0);
assert!(modifiers_signal.listener_count() == 0);
assert!(keymap_signal.listener_count() == 0);
assert!(repeat_info_signal.listener_count() == 0);
assert!(destroy_signal.listener_count() == 0);
assert!(!keyboard_manager.has_keyboard());
assert!(weak_keyboard.upgrade().is_none());
assert!(weak_device.upgrade().is_none());
}
}
#[cfg(test)]
use xkbcommon::xkb::ffi::{xkb_keymap, xkb_state};
#[cfg(test)]
unsafe fn wlr_seat_set_keyboard(_: *mut wlr_seat, _: *mut wlr_input_device) {}
#[cfg(test)]
unsafe fn wlr_keyboard_set_keymap(_: *mut wlr_keyboard, _: *mut xkb_keymap) {}
#[cfg(test)]
unsafe fn wlr_keyboard_set_repeat_info(_: *mut wlr_keyboard, _: i32, _: i32) {}
#[cfg(test)]
unsafe fn xkb_state_ref(ptr: *mut xkb_state) -> *mut xkb_state {
ptr
} | config.repeat_delay.0 as i32,
); | random_line_split |
quic.rs | use {
crossbeam_channel::Sender,
futures_util::stream::StreamExt,
pem::Pem,
pkcs8::{der::Document, AlgorithmIdentifier, ObjectIdentifier},
quinn::{Endpoint, EndpointConfig, ServerConfig},
rcgen::{CertificateParams, DistinguishedName, DnType, SanType},
solana_perf::packet::PacketBatch,
solana_sdk::{
packet::{Packet, PACKET_DATA_SIZE},
signature::Keypair,
},
std::{
error::Error,
net::{IpAddr, SocketAddr, UdpSocket},
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
thread,
time::Duration,
},
tokio::{
runtime::{Builder, Runtime},
time::timeout,
},
};
/// Returns default server configuration along with its PEM certificate chain.
#[allow(clippy::field_reassign_with_default)] // https://github.com/rust-lang/rust-clippy/issues/6527
fn configure_server(
identity_keypair: &Keypair,
gossip_host: IpAddr,
) -> Result<(ServerConfig, String), QuicServerError> {
let (cert_chain, priv_key) =
new_cert(identity_keypair, gossip_host).map_err(|_e| QuicServerError::ConfigureFailed)?;
let cert_chain_pem_parts: Vec<Pem> = cert_chain
.iter()
.map(|cert| Pem {
tag: "CERTIFICATE".to_string(),
contents: cert.0.clone(),
})
.collect();
let cert_chain_pem = pem::encode_many(&cert_chain_pem_parts);
let mut server_config = ServerConfig::with_single_cert(cert_chain, priv_key)
.map_err(|_e| QuicServerError::ConfigureFailed)?;
let config = Arc::get_mut(&mut server_config.transport).unwrap();
const MAX_CONCURRENT_UNI_STREAMS: u32 = 1;
config.max_concurrent_uni_streams(MAX_CONCURRENT_UNI_STREAMS.into());
config.stream_receive_window((PACKET_DATA_SIZE as u32).into());
config.receive_window((PACKET_DATA_SIZE as u32 * MAX_CONCURRENT_UNI_STREAMS).into());
// disable bidi & datagrams
const MAX_CONCURRENT_BIDI_STREAMS: u32 = 0;
config.max_concurrent_bidi_streams(MAX_CONCURRENT_BIDI_STREAMS.into());
config.datagram_receive_buffer_size(None);
Ok((server_config, cert_chain_pem))
}
fn new_cert(
identity_keypair: &Keypair,
san: IpAddr,
) -> Result<(Vec<rustls::Certificate>, rustls::PrivateKey), Box<dyn Error>> {
// Generate a self-signed cert from validator identity key
let cert_params = new_cert_params(identity_keypair, san);
let cert = rcgen::Certificate::from_params(cert_params)?;
let cert_der = cert.serialize_der().unwrap();
let priv_key = cert.serialize_private_key_der();
let priv_key = rustls::PrivateKey(priv_key);
let cert_chain = vec![rustls::Certificate(cert_der)];
Ok((cert_chain, priv_key))
}
fn convert_to_rcgen_keypair(identity_keypair: &Keypair) -> rcgen::KeyPair {
// from https://datatracker.ietf.org/doc/html/rfc8410#section-3
const ED25519_IDENTIFIER: [u32; 4] = [1, 3, 101, 112];
let mut private_key = Vec::<u8>::with_capacity(34);
private_key.extend_from_slice(&[0x04, 0x20]); // ASN.1 OCTET STRING
private_key.extend_from_slice(identity_keypair.secret().as_bytes());
let key_pkcs8 = pkcs8::PrivateKeyInfo {
algorithm: AlgorithmIdentifier {
oid: ObjectIdentifier::from_arcs(&ED25519_IDENTIFIER).unwrap(),
parameters: None,
},
private_key: &private_key,
public_key: None,
};
let key_pkcs8_der = key_pkcs8
.to_der()
.expect("Failed to convert keypair to DER")
.to_der();
// Parse private key into rcgen::KeyPair struct.
rcgen::KeyPair::from_der(&key_pkcs8_der).expect("Failed to parse keypair from DER")
}
fn new_cert_params(identity_keypair: &Keypair, san: IpAddr) -> CertificateParams {
// TODO(terorie): Is it safe to sign the TLS cert with the identity private key?
// Unfortunately, rcgen does not accept a "raw" Ed25519 key.
// We have to convert it to DER and pass it to the library.
// Convert private key into PKCS#8 v1 object.
// RFC 8410, Section 7: Private Key Format
// https://datatracker.ietf.org/doc/html/rfc8410#section-
let keypair = convert_to_rcgen_keypair(identity_keypair);
let mut cert_params = CertificateParams::default();
cert_params.subject_alt_names = vec![SanType::IpAddress(san)];
cert_params.alg = &rcgen::PKCS_ED25519;
cert_params.key_pair = Some(keypair);
cert_params.distinguished_name = DistinguishedName::new();
cert_params
.distinguished_name
.push(DnType::CommonName, "Solana node");
cert_params
}
pub fn rt() -> Runtime {
Builder::new_current_thread().enable_all().build().unwrap()
}
#[derive(thiserror::Error, Debug)]
pub enum | {
#[error("Server configure failed")]
ConfigureFailed,
#[error("Endpoint creation failed")]
EndpointFailed,
}
// Return true if the server should drop the stream
fn handle_chunk(
chunk: &Result<Option<quinn::Chunk>, quinn::ReadError>,
maybe_batch: &mut Option<PacketBatch>,
remote_addr: &SocketAddr,
packet_sender: &Sender<PacketBatch>,
) -> bool {
match chunk {
Ok(maybe_chunk) => {
if let Some(chunk) = maybe_chunk {
trace!("got chunk: {:?}", chunk);
let chunk_len = chunk.bytes.len() as u64;
// shouldn't happen, but sanity check the size and offsets
if chunk.offset > PACKET_DATA_SIZE as u64 || chunk_len > PACKET_DATA_SIZE as u64 {
return true;
}
if chunk.offset + chunk_len > PACKET_DATA_SIZE as u64 {
return true;
}
// chunk looks valid
if maybe_batch.is_none() {
let mut batch = PacketBatch::with_capacity(1);
let mut packet = Packet::default();
packet.meta.set_addr(remote_addr);
batch.packets.push(packet);
*maybe_batch = Some(batch);
}
if let Some(batch) = maybe_batch.as_mut() {
let end = chunk.offset as usize + chunk.bytes.len();
batch.packets[0].data[chunk.offset as usize..end].copy_from_slice(&chunk.bytes);
batch.packets[0].meta.size = std::cmp::max(batch.packets[0].meta.size, end);
}
} else {
trace!("chunk is none");
// done receiving chunks
if let Some(batch) = maybe_batch.take() {
let len = batch.packets[0].meta.size;
if let Err(e) = packet_sender.send(batch) {
info!("send error: {}", e);
} else {
trace!("sent {} byte packet", len);
}
}
return true;
}
}
Err(e) => {
debug!("Received stream error: {:?}", e);
return true;
}
}
false
}
pub fn spawn_server(
sock: UdpSocket,
keypair: &Keypair,
gossip_host: IpAddr,
packet_sender: Sender<PacketBatch>,
exit: Arc<AtomicBool>,
) -> Result<thread::JoinHandle<()>, QuicServerError> {
let (config, _cert) = configure_server(keypair, gossip_host)?;
let runtime = rt();
let (_, mut incoming) = {
let _guard = runtime.enter();
Endpoint::new(EndpointConfig::default(), Some(config), sock)
.map_err(|_e| QuicServerError::EndpointFailed)?
};
let handle = thread::spawn(move || {
let handle = runtime.spawn(async move {
while !exit.load(Ordering::Relaxed) {
const WAIT_FOR_CONNECTION_TIMEOUT_MS: u64 = 1000;
let timeout_connection = timeout(
Duration::from_millis(WAIT_FOR_CONNECTION_TIMEOUT_MS),
incoming.next(),
)
.await;
if let Ok(Some(connection)) = timeout_connection {
if let Ok(new_connection) = connection.await {
let exit = exit.clone();
let quinn::NewConnection {
connection,
mut uni_streams,
..
} = new_connection;
let remote_addr = connection.remote_address();
let packet_sender = packet_sender.clone();
tokio::spawn(async move {
debug!("new connection {}", remote_addr);
while let Some(Ok(mut stream)) = uni_streams.next().await {
let mut maybe_batch = None;
while !exit.load(Ordering::Relaxed) {
if handle_chunk(
&stream.read_chunk(PACKET_DATA_SIZE, false).await,
&mut maybe_batch,
&remote_addr,
&packet_sender,
) {
break;
}
}
}
});
}
}
}
});
if let Err(e) = runtime.block_on(handle) {
warn!("error from runtime.block_on: {:?}", e);
}
});
Ok(handle)
}
#[cfg(test)]
mod test {
use {
super::*,
crossbeam_channel::unbounded,
quinn::{ClientConfig, NewConnection},
std::{net::SocketAddr, time::Instant},
};
struct SkipServerVerification;
impl SkipServerVerification {
fn new() -> Arc<Self> {
Arc::new(Self)
}
}
impl rustls::client::ServerCertVerifier for SkipServerVerification {
fn verify_server_cert(
&self,
_end_entity: &rustls::Certificate,
_intermediates: &[rustls::Certificate],
_server_name: &rustls::ServerName,
_scts: &mut dyn Iterator<Item = &[u8]>,
_ocsp_response: &[u8],
_now: std::time::SystemTime,
) -> Result<rustls::client::ServerCertVerified, rustls::Error> {
Ok(rustls::client::ServerCertVerified::assertion())
}
}
pub fn get_client_config() -> quinn::ClientConfig {
let crypto = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_custom_certificate_verifier(SkipServerVerification::new())
.with_no_client_auth();
ClientConfig::new(Arc::new(crypto))
}
#[test]
fn test_quic_server_exit() {
let s = UdpSocket::bind("127.0.0.1:0").unwrap();
let exit = Arc::new(AtomicBool::new(false));
let (sender, _receiver) = unbounded();
let keypair = Keypair::new();
let ip = "127.0.0.1".parse().unwrap();
let t = spawn_server(s, &keypair, ip, sender, exit.clone()).unwrap();
exit.store(true, Ordering::Relaxed);
t.join().unwrap();
}
fn make_client_endpoint(runtime: &Runtime, addr: &SocketAddr) -> NewConnection {
let client_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
let mut endpoint = quinn::Endpoint::new(EndpointConfig::default(), None, client_socket)
.unwrap()
.0;
endpoint.set_default_client_config(get_client_config());
runtime
.block_on(endpoint.connect(*addr, "localhost").unwrap())
.unwrap()
}
#[test]
fn test_quic_server_multiple_streams() {
solana_logger::setup();
let s = UdpSocket::bind("127.0.0.1:0").unwrap();
let exit = Arc::new(AtomicBool::new(false));
let (sender, receiver) = unbounded();
let keypair = Keypair::new();
let ip = "127.0.0.1".parse().unwrap();
let server_address = s.local_addr().unwrap();
let t = spawn_server(s, &keypair, ip, sender, exit.clone()).unwrap();
let runtime = rt();
let _rt_guard = runtime.enter();
let conn1 = Arc::new(make_client_endpoint(&runtime, &server_address));
let conn2 = Arc::new(make_client_endpoint(&runtime, &server_address));
let mut num_expected_packets = 0;
for i in 0..10 {
info!("sending: {}", i);
let c1 = conn1.clone();
let c2 = conn2.clone();
let handle = runtime.spawn(async move {
let mut s1 = c1.connection.open_uni().await.unwrap();
let mut s2 = c2.connection.open_uni().await.unwrap();
s1.write_all(&[0u8]).await.unwrap();
s1.finish().await.unwrap();
s2.write_all(&[0u8]).await.unwrap();
s2.finish().await.unwrap();
});
runtime.block_on(handle).unwrap();
num_expected_packets += 2;
thread::sleep(Duration::from_millis(200));
}
let mut all_packets = vec![];
let now = Instant::now();
let mut total_packets = 0;
while now.elapsed().as_secs() < 10 {
if let Ok(packets) = receiver.recv_timeout(Duration::from_secs(1)) {
total_packets += packets.packets.len();
all_packets.push(packets)
}
if total_packets == num_expected_packets {
break;
}
}
for batch in all_packets {
for p in &batch.packets {
assert_eq!(p.meta.size, 1);
}
}
assert_eq!(total_packets, num_expected_packets);
exit.store(true, Ordering::Relaxed);
t.join().unwrap();
}
#[test]
fn test_quic_server_multiple_writes() {
solana_logger::setup();
let s = UdpSocket::bind("127.0.0.1:0").unwrap();
let exit = Arc::new(AtomicBool::new(false));
let (sender, receiver) = unbounded();
let keypair = Keypair::new();
let ip = "127.0.0.1".parse().unwrap();
let server_address = s.local_addr().unwrap();
let t = spawn_server(s, &keypair, ip, sender, exit.clone()).unwrap();
let runtime = rt();
let _rt_guard = runtime.enter();
let conn1 = Arc::new(make_client_endpoint(&runtime, &server_address));
// Send a full size packet with single byte writes.
let num_bytes = PACKET_DATA_SIZE;
let num_expected_packets = 1;
let handle = runtime.spawn(async move {
let mut s1 = conn1.connection.open_uni().await.unwrap();
for _ in 0..num_bytes {
s1.write_all(&[0u8]).await.unwrap();
}
s1.finish().await.unwrap();
});
runtime.block_on(handle).unwrap();
let mut all_packets = vec![];
let now = Instant::now();
let mut total_packets = 0;
while now.elapsed().as_secs() < 5 {
if let Ok(packets) = receiver.recv_timeout(Duration::from_secs(1)) {
total_packets += packets.packets.len();
all_packets.push(packets)
}
if total_packets > num_expected_packets {
break;
}
}
for batch in all_packets {
for p in &batch.packets {
assert_eq!(p.meta.size, num_bytes);
}
}
assert_eq!(total_packets, num_expected_packets);
exit.store(true, Ordering::Relaxed);
t.join().unwrap();
}
}
| QuicServerError | identifier_name |
quic.rs | use {
crossbeam_channel::Sender,
futures_util::stream::StreamExt,
pem::Pem,
pkcs8::{der::Document, AlgorithmIdentifier, ObjectIdentifier},
quinn::{Endpoint, EndpointConfig, ServerConfig},
rcgen::{CertificateParams, DistinguishedName, DnType, SanType},
solana_perf::packet::PacketBatch,
solana_sdk::{
packet::{Packet, PACKET_DATA_SIZE},
signature::Keypair,
},
std::{
error::Error,
net::{IpAddr, SocketAddr, UdpSocket},
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
thread,
time::Duration,
},
tokio::{
runtime::{Builder, Runtime},
time::timeout,
},
};
/// Returns default server configuration along with its PEM certificate chain.
#[allow(clippy::field_reassign_with_default)] // https://github.com/rust-lang/rust-clippy/issues/6527
fn configure_server(
identity_keypair: &Keypair,
gossip_host: IpAddr,
) -> Result<(ServerConfig, String), QuicServerError> {
let (cert_chain, priv_key) = | contents: cert.0.clone(),
})
.collect();
let cert_chain_pem = pem::encode_many(&cert_chain_pem_parts);
let mut server_config = ServerConfig::with_single_cert(cert_chain, priv_key)
.map_err(|_e| QuicServerError::ConfigureFailed)?;
let config = Arc::get_mut(&mut server_config.transport).unwrap();
const MAX_CONCURRENT_UNI_STREAMS: u32 = 1;
config.max_concurrent_uni_streams(MAX_CONCURRENT_UNI_STREAMS.into());
config.stream_receive_window((PACKET_DATA_SIZE as u32).into());
config.receive_window((PACKET_DATA_SIZE as u32 * MAX_CONCURRENT_UNI_STREAMS).into());
// disable bidi & datagrams
const MAX_CONCURRENT_BIDI_STREAMS: u32 = 0;
config.max_concurrent_bidi_streams(MAX_CONCURRENT_BIDI_STREAMS.into());
config.datagram_receive_buffer_size(None);
Ok((server_config, cert_chain_pem))
}
fn new_cert(
identity_keypair: &Keypair,
san: IpAddr,
) -> Result<(Vec<rustls::Certificate>, rustls::PrivateKey), Box<dyn Error>> {
// Generate a self-signed cert from validator identity key
let cert_params = new_cert_params(identity_keypair, san);
let cert = rcgen::Certificate::from_params(cert_params)?;
let cert_der = cert.serialize_der().unwrap();
let priv_key = cert.serialize_private_key_der();
let priv_key = rustls::PrivateKey(priv_key);
let cert_chain = vec![rustls::Certificate(cert_der)];
Ok((cert_chain, priv_key))
}
fn convert_to_rcgen_keypair(identity_keypair: &Keypair) -> rcgen::KeyPair {
// from https://datatracker.ietf.org/doc/html/rfc8410#section-3
const ED25519_IDENTIFIER: [u32; 4] = [1, 3, 101, 112];
let mut private_key = Vec::<u8>::with_capacity(34);
private_key.extend_from_slice(&[0x04, 0x20]); // ASN.1 OCTET STRING
private_key.extend_from_slice(identity_keypair.secret().as_bytes());
let key_pkcs8 = pkcs8::PrivateKeyInfo {
algorithm: AlgorithmIdentifier {
oid: ObjectIdentifier::from_arcs(&ED25519_IDENTIFIER).unwrap(),
parameters: None,
},
private_key: &private_key,
public_key: None,
};
let key_pkcs8_der = key_pkcs8
.to_der()
.expect("Failed to convert keypair to DER")
.to_der();
// Parse private key into rcgen::KeyPair struct.
rcgen::KeyPair::from_der(&key_pkcs8_der).expect("Failed to parse keypair from DER")
}
fn new_cert_params(identity_keypair: &Keypair, san: IpAddr) -> CertificateParams {
// TODO(terorie): Is it safe to sign the TLS cert with the identity private key?
// Unfortunately, rcgen does not accept a "raw" Ed25519 key.
// We have to convert it to DER and pass it to the library.
// Convert private key into PKCS#8 v1 object.
// RFC 8410, Section 7: Private Key Format
// https://datatracker.ietf.org/doc/html/rfc8410#section-
let keypair = convert_to_rcgen_keypair(identity_keypair);
let mut cert_params = CertificateParams::default();
cert_params.subject_alt_names = vec![SanType::IpAddress(san)];
cert_params.alg = &rcgen::PKCS_ED25519;
cert_params.key_pair = Some(keypair);
cert_params.distinguished_name = DistinguishedName::new();
cert_params
.distinguished_name
.push(DnType::CommonName, "Solana node");
cert_params
}
pub fn rt() -> Runtime {
Builder::new_current_thread().enable_all().build().unwrap()
}
#[derive(thiserror::Error, Debug)]
pub enum QuicServerError {
#[error("Server configure failed")]
ConfigureFailed,
#[error("Endpoint creation failed")]
EndpointFailed,
}
// Return true if the server should drop the stream
fn handle_chunk(
chunk: &Result<Option<quinn::Chunk>, quinn::ReadError>,
maybe_batch: &mut Option<PacketBatch>,
remote_addr: &SocketAddr,
packet_sender: &Sender<PacketBatch>,
) -> bool {
match chunk {
Ok(maybe_chunk) => {
if let Some(chunk) = maybe_chunk {
trace!("got chunk: {:?}", chunk);
let chunk_len = chunk.bytes.len() as u64;
// shouldn't happen, but sanity check the size and offsets
if chunk.offset > PACKET_DATA_SIZE as u64 || chunk_len > PACKET_DATA_SIZE as u64 {
return true;
}
if chunk.offset + chunk_len > PACKET_DATA_SIZE as u64 {
return true;
}
// chunk looks valid
if maybe_batch.is_none() {
let mut batch = PacketBatch::with_capacity(1);
let mut packet = Packet::default();
packet.meta.set_addr(remote_addr);
batch.packets.push(packet);
*maybe_batch = Some(batch);
}
if let Some(batch) = maybe_batch.as_mut() {
let end = chunk.offset as usize + chunk.bytes.len();
batch.packets[0].data[chunk.offset as usize..end].copy_from_slice(&chunk.bytes);
batch.packets[0].meta.size = std::cmp::max(batch.packets[0].meta.size, end);
}
} else {
trace!("chunk is none");
// done receiving chunks
if let Some(batch) = maybe_batch.take() {
let len = batch.packets[0].meta.size;
if let Err(e) = packet_sender.send(batch) {
info!("send error: {}", e);
} else {
trace!("sent {} byte packet", len);
}
}
return true;
}
}
Err(e) => {
debug!("Received stream error: {:?}", e);
return true;
}
}
false
}
pub fn spawn_server(
sock: UdpSocket,
keypair: &Keypair,
gossip_host: IpAddr,
packet_sender: Sender<PacketBatch>,
exit: Arc<AtomicBool>,
) -> Result<thread::JoinHandle<()>, QuicServerError> {
let (config, _cert) = configure_server(keypair, gossip_host)?;
let runtime = rt();
let (_, mut incoming) = {
let _guard = runtime.enter();
Endpoint::new(EndpointConfig::default(), Some(config), sock)
.map_err(|_e| QuicServerError::EndpointFailed)?
};
let handle = thread::spawn(move || {
let handle = runtime.spawn(async move {
while !exit.load(Ordering::Relaxed) {
const WAIT_FOR_CONNECTION_TIMEOUT_MS: u64 = 1000;
let timeout_connection = timeout(
Duration::from_millis(WAIT_FOR_CONNECTION_TIMEOUT_MS),
incoming.next(),
)
.await;
if let Ok(Some(connection)) = timeout_connection {
if let Ok(new_connection) = connection.await {
let exit = exit.clone();
let quinn::NewConnection {
connection,
mut uni_streams,
..
} = new_connection;
let remote_addr = connection.remote_address();
let packet_sender = packet_sender.clone();
tokio::spawn(async move {
debug!("new connection {}", remote_addr);
while let Some(Ok(mut stream)) = uni_streams.next().await {
let mut maybe_batch = None;
while !exit.load(Ordering::Relaxed) {
if handle_chunk(
&stream.read_chunk(PACKET_DATA_SIZE, false).await,
&mut maybe_batch,
&remote_addr,
&packet_sender,
) {
break;
}
}
}
});
}
}
}
});
if let Err(e) = runtime.block_on(handle) {
warn!("error from runtime.block_on: {:?}", e);
}
});
Ok(handle)
}
#[cfg(test)]
mod test {
use {
super::*,
crossbeam_channel::unbounded,
quinn::{ClientConfig, NewConnection},
std::{net::SocketAddr, time::Instant},
};
struct SkipServerVerification;
impl SkipServerVerification {
fn new() -> Arc<Self> {
Arc::new(Self)
}
}
impl rustls::client::ServerCertVerifier for SkipServerVerification {
fn verify_server_cert(
&self,
_end_entity: &rustls::Certificate,
_intermediates: &[rustls::Certificate],
_server_name: &rustls::ServerName,
_scts: &mut dyn Iterator<Item = &[u8]>,
_ocsp_response: &[u8],
_now: std::time::SystemTime,
) -> Result<rustls::client::ServerCertVerified, rustls::Error> {
Ok(rustls::client::ServerCertVerified::assertion())
}
}
pub fn get_client_config() -> quinn::ClientConfig {
let crypto = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_custom_certificate_verifier(SkipServerVerification::new())
.with_no_client_auth();
ClientConfig::new(Arc::new(crypto))
}
#[test]
fn test_quic_server_exit() {
let s = UdpSocket::bind("127.0.0.1:0").unwrap();
let exit = Arc::new(AtomicBool::new(false));
let (sender, _receiver) = unbounded();
let keypair = Keypair::new();
let ip = "127.0.0.1".parse().unwrap();
let t = spawn_server(s, &keypair, ip, sender, exit.clone()).unwrap();
exit.store(true, Ordering::Relaxed);
t.join().unwrap();
}
fn make_client_endpoint(runtime: &Runtime, addr: &SocketAddr) -> NewConnection {
let client_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
let mut endpoint = quinn::Endpoint::new(EndpointConfig::default(), None, client_socket)
.unwrap()
.0;
endpoint.set_default_client_config(get_client_config());
runtime
.block_on(endpoint.connect(*addr, "localhost").unwrap())
.unwrap()
}
#[test]
fn test_quic_server_multiple_streams() {
solana_logger::setup();
let s = UdpSocket::bind("127.0.0.1:0").unwrap();
let exit = Arc::new(AtomicBool::new(false));
let (sender, receiver) = unbounded();
let keypair = Keypair::new();
let ip = "127.0.0.1".parse().unwrap();
let server_address = s.local_addr().unwrap();
let t = spawn_server(s, &keypair, ip, sender, exit.clone()).unwrap();
let runtime = rt();
let _rt_guard = runtime.enter();
let conn1 = Arc::new(make_client_endpoint(&runtime, &server_address));
let conn2 = Arc::new(make_client_endpoint(&runtime, &server_address));
let mut num_expected_packets = 0;
for i in 0..10 {
info!("sending: {}", i);
let c1 = conn1.clone();
let c2 = conn2.clone();
let handle = runtime.spawn(async move {
let mut s1 = c1.connection.open_uni().await.unwrap();
let mut s2 = c2.connection.open_uni().await.unwrap();
s1.write_all(&[0u8]).await.unwrap();
s1.finish().await.unwrap();
s2.write_all(&[0u8]).await.unwrap();
s2.finish().await.unwrap();
});
runtime.block_on(handle).unwrap();
num_expected_packets += 2;
thread::sleep(Duration::from_millis(200));
}
let mut all_packets = vec![];
let now = Instant::now();
let mut total_packets = 0;
while now.elapsed().as_secs() < 10 {
if let Ok(packets) = receiver.recv_timeout(Duration::from_secs(1)) {
total_packets += packets.packets.len();
all_packets.push(packets)
}
if total_packets == num_expected_packets {
break;
}
}
for batch in all_packets {
for p in &batch.packets {
assert_eq!(p.meta.size, 1);
}
}
assert_eq!(total_packets, num_expected_packets);
exit.store(true, Ordering::Relaxed);
t.join().unwrap();
}
#[test]
fn test_quic_server_multiple_writes() {
solana_logger::setup();
let s = UdpSocket::bind("127.0.0.1:0").unwrap();
let exit = Arc::new(AtomicBool::new(false));
let (sender, receiver) = unbounded();
let keypair = Keypair::new();
let ip = "127.0.0.1".parse().unwrap();
let server_address = s.local_addr().unwrap();
let t = spawn_server(s, &keypair, ip, sender, exit.clone()).unwrap();
let runtime = rt();
let _rt_guard = runtime.enter();
let conn1 = Arc::new(make_client_endpoint(&runtime, &server_address));
// Send a full size packet with single byte writes.
let num_bytes = PACKET_DATA_SIZE;
let num_expected_packets = 1;
let handle = runtime.spawn(async move {
let mut s1 = conn1.connection.open_uni().await.unwrap();
for _ in 0..num_bytes {
s1.write_all(&[0u8]).await.unwrap();
}
s1.finish().await.unwrap();
});
runtime.block_on(handle).unwrap();
let mut all_packets = vec![];
let now = Instant::now();
let mut total_packets = 0;
while now.elapsed().as_secs() < 5 {
if let Ok(packets) = receiver.recv_timeout(Duration::from_secs(1)) {
total_packets += packets.packets.len();
all_packets.push(packets)
}
if total_packets > num_expected_packets {
break;
}
}
for batch in all_packets {
for p in &batch.packets {
assert_eq!(p.meta.size, num_bytes);
}
}
assert_eq!(total_packets, num_expected_packets);
exit.store(true, Ordering::Relaxed);
t.join().unwrap();
}
} | new_cert(identity_keypair, gossip_host).map_err(|_e| QuicServerError::ConfigureFailed)?;
let cert_chain_pem_parts: Vec<Pem> = cert_chain
.iter()
.map(|cert| Pem {
tag: "CERTIFICATE".to_string(), | random_line_split |
quic.rs | use {
crossbeam_channel::Sender,
futures_util::stream::StreamExt,
pem::Pem,
pkcs8::{der::Document, AlgorithmIdentifier, ObjectIdentifier},
quinn::{Endpoint, EndpointConfig, ServerConfig},
rcgen::{CertificateParams, DistinguishedName, DnType, SanType},
solana_perf::packet::PacketBatch,
solana_sdk::{
packet::{Packet, PACKET_DATA_SIZE},
signature::Keypair,
},
std::{
error::Error,
net::{IpAddr, SocketAddr, UdpSocket},
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
thread,
time::Duration,
},
tokio::{
runtime::{Builder, Runtime},
time::timeout,
},
};
/// Returns default server configuration along with its PEM certificate chain.
#[allow(clippy::field_reassign_with_default)] // https://github.com/rust-lang/rust-clippy/issues/6527
fn configure_server(
identity_keypair: &Keypair,
gossip_host: IpAddr,
) -> Result<(ServerConfig, String), QuicServerError> {
let (cert_chain, priv_key) =
new_cert(identity_keypair, gossip_host).map_err(|_e| QuicServerError::ConfigureFailed)?;
let cert_chain_pem_parts: Vec<Pem> = cert_chain
.iter()
.map(|cert| Pem {
tag: "CERTIFICATE".to_string(),
contents: cert.0.clone(),
})
.collect();
let cert_chain_pem = pem::encode_many(&cert_chain_pem_parts);
let mut server_config = ServerConfig::with_single_cert(cert_chain, priv_key)
.map_err(|_e| QuicServerError::ConfigureFailed)?;
let config = Arc::get_mut(&mut server_config.transport).unwrap();
const MAX_CONCURRENT_UNI_STREAMS: u32 = 1;
config.max_concurrent_uni_streams(MAX_CONCURRENT_UNI_STREAMS.into());
config.stream_receive_window((PACKET_DATA_SIZE as u32).into());
config.receive_window((PACKET_DATA_SIZE as u32 * MAX_CONCURRENT_UNI_STREAMS).into());
// disable bidi & datagrams
const MAX_CONCURRENT_BIDI_STREAMS: u32 = 0;
config.max_concurrent_bidi_streams(MAX_CONCURRENT_BIDI_STREAMS.into());
config.datagram_receive_buffer_size(None);
Ok((server_config, cert_chain_pem))
}
fn new_cert(
identity_keypair: &Keypair,
san: IpAddr,
) -> Result<(Vec<rustls::Certificate>, rustls::PrivateKey), Box<dyn Error>> {
// Generate a self-signed cert from validator identity key
let cert_params = new_cert_params(identity_keypair, san);
let cert = rcgen::Certificate::from_params(cert_params)?;
let cert_der = cert.serialize_der().unwrap();
let priv_key = cert.serialize_private_key_der();
let priv_key = rustls::PrivateKey(priv_key);
let cert_chain = vec![rustls::Certificate(cert_der)];
Ok((cert_chain, priv_key))
}
fn convert_to_rcgen_keypair(identity_keypair: &Keypair) -> rcgen::KeyPair {
// from https://datatracker.ietf.org/doc/html/rfc8410#section-3
const ED25519_IDENTIFIER: [u32; 4] = [1, 3, 101, 112];
let mut private_key = Vec::<u8>::with_capacity(34);
private_key.extend_from_slice(&[0x04, 0x20]); // ASN.1 OCTET STRING
private_key.extend_from_slice(identity_keypair.secret().as_bytes());
let key_pkcs8 = pkcs8::PrivateKeyInfo {
algorithm: AlgorithmIdentifier {
oid: ObjectIdentifier::from_arcs(&ED25519_IDENTIFIER).unwrap(),
parameters: None,
},
private_key: &private_key,
public_key: None,
};
let key_pkcs8_der = key_pkcs8
.to_der()
.expect("Failed to convert keypair to DER")
.to_der();
// Parse private key into rcgen::KeyPair struct.
rcgen::KeyPair::from_der(&key_pkcs8_der).expect("Failed to parse keypair from DER")
}
fn new_cert_params(identity_keypair: &Keypair, san: IpAddr) -> CertificateParams {
// TODO(terorie): Is it safe to sign the TLS cert with the identity private key?
// Unfortunately, rcgen does not accept a "raw" Ed25519 key.
// We have to convert it to DER and pass it to the library.
// Convert private key into PKCS#8 v1 object.
// RFC 8410, Section 7: Private Key Format
// https://datatracker.ietf.org/doc/html/rfc8410#section-
let keypair = convert_to_rcgen_keypair(identity_keypair);
let mut cert_params = CertificateParams::default();
cert_params.subject_alt_names = vec![SanType::IpAddress(san)];
cert_params.alg = &rcgen::PKCS_ED25519;
cert_params.key_pair = Some(keypair);
cert_params.distinguished_name = DistinguishedName::new();
cert_params
.distinguished_name
.push(DnType::CommonName, "Solana node");
cert_params
}
pub fn rt() -> Runtime {
Builder::new_current_thread().enable_all().build().unwrap()
}
#[derive(thiserror::Error, Debug)]
pub enum QuicServerError {
#[error("Server configure failed")]
ConfigureFailed,
#[error("Endpoint creation failed")]
EndpointFailed,
}
// Return true if the server should drop the stream
fn handle_chunk(
chunk: &Result<Option<quinn::Chunk>, quinn::ReadError>,
maybe_batch: &mut Option<PacketBatch>,
remote_addr: &SocketAddr,
packet_sender: &Sender<PacketBatch>,
) -> bool {
match chunk {
Ok(maybe_chunk) => {
if let Some(chunk) = maybe_chunk {
trace!("got chunk: {:?}", chunk);
let chunk_len = chunk.bytes.len() as u64;
// shouldn't happen, but sanity check the size and offsets
if chunk.offset > PACKET_DATA_SIZE as u64 || chunk_len > PACKET_DATA_SIZE as u64 {
return true;
}
if chunk.offset + chunk_len > PACKET_DATA_SIZE as u64 {
return true;
}
// chunk looks valid
if maybe_batch.is_none() {
let mut batch = PacketBatch::with_capacity(1);
let mut packet = Packet::default();
packet.meta.set_addr(remote_addr);
batch.packets.push(packet);
*maybe_batch = Some(batch);
}
if let Some(batch) = maybe_batch.as_mut() {
let end = chunk.offset as usize + chunk.bytes.len();
batch.packets[0].data[chunk.offset as usize..end].copy_from_slice(&chunk.bytes);
batch.packets[0].meta.size = std::cmp::max(batch.packets[0].meta.size, end);
}
} else {
trace!("chunk is none");
// done receiving chunks
if let Some(batch) = maybe_batch.take() {
let len = batch.packets[0].meta.size;
if let Err(e) = packet_sender.send(batch) {
info!("send error: {}", e);
} else {
trace!("sent {} byte packet", len);
}
}
return true;
}
}
Err(e) => {
debug!("Received stream error: {:?}", e);
return true;
}
}
false
}
pub fn spawn_server(
sock: UdpSocket,
keypair: &Keypair,
gossip_host: IpAddr,
packet_sender: Sender<PacketBatch>,
exit: Arc<AtomicBool>,
) -> Result<thread::JoinHandle<()>, QuicServerError> {
let (config, _cert) = configure_server(keypair, gossip_host)?;
let runtime = rt();
let (_, mut incoming) = {
let _guard = runtime.enter();
Endpoint::new(EndpointConfig::default(), Some(config), sock)
.map_err(|_e| QuicServerError::EndpointFailed)?
};
let handle = thread::spawn(move || {
let handle = runtime.spawn(async move {
while !exit.load(Ordering::Relaxed) {
const WAIT_FOR_CONNECTION_TIMEOUT_MS: u64 = 1000;
let timeout_connection = timeout(
Duration::from_millis(WAIT_FOR_CONNECTION_TIMEOUT_MS),
incoming.next(),
)
.await;
if let Ok(Some(connection)) = timeout_connection {
if let Ok(new_connection) = connection.await {
let exit = exit.clone();
let quinn::NewConnection {
connection,
mut uni_streams,
..
} = new_connection;
let remote_addr = connection.remote_address();
let packet_sender = packet_sender.clone();
tokio::spawn(async move {
debug!("new connection {}", remote_addr);
while let Some(Ok(mut stream)) = uni_streams.next().await {
let mut maybe_batch = None;
while !exit.load(Ordering::Relaxed) {
if handle_chunk(
&stream.read_chunk(PACKET_DATA_SIZE, false).await,
&mut maybe_batch,
&remote_addr,
&packet_sender,
) {
break;
}
}
}
});
}
}
}
});
if let Err(e) = runtime.block_on(handle) {
warn!("error from runtime.block_on: {:?}", e);
}
});
Ok(handle)
}
#[cfg(test)]
mod test {
use {
super::*,
crossbeam_channel::unbounded,
quinn::{ClientConfig, NewConnection},
std::{net::SocketAddr, time::Instant},
};
struct SkipServerVerification;
impl SkipServerVerification {
fn new() -> Arc<Self> {
Arc::new(Self)
}
}
impl rustls::client::ServerCertVerifier for SkipServerVerification {
fn verify_server_cert(
&self,
_end_entity: &rustls::Certificate,
_intermediates: &[rustls::Certificate],
_server_name: &rustls::ServerName,
_scts: &mut dyn Iterator<Item = &[u8]>,
_ocsp_response: &[u8],
_now: std::time::SystemTime,
) -> Result<rustls::client::ServerCertVerified, rustls::Error> {
Ok(rustls::client::ServerCertVerified::assertion())
}
}
pub fn get_client_config() -> quinn::ClientConfig {
let crypto = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_custom_certificate_verifier(SkipServerVerification::new())
.with_no_client_auth();
ClientConfig::new(Arc::new(crypto))
}
#[test]
fn test_quic_server_exit() {
let s = UdpSocket::bind("127.0.0.1:0").unwrap();
let exit = Arc::new(AtomicBool::new(false));
let (sender, _receiver) = unbounded();
let keypair = Keypair::new();
let ip = "127.0.0.1".parse().unwrap();
let t = spawn_server(s, &keypair, ip, sender, exit.clone()).unwrap();
exit.store(true, Ordering::Relaxed);
t.join().unwrap();
}
fn make_client_endpoint(runtime: &Runtime, addr: &SocketAddr) -> NewConnection {
let client_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
let mut endpoint = quinn::Endpoint::new(EndpointConfig::default(), None, client_socket)
.unwrap()
.0;
endpoint.set_default_client_config(get_client_config());
runtime
.block_on(endpoint.connect(*addr, "localhost").unwrap())
.unwrap()
}
#[test]
fn test_quic_server_multiple_streams() |
#[test]
fn test_quic_server_multiple_writes() {
solana_logger::setup();
let s = UdpSocket::bind("127.0.0.1:0").unwrap();
let exit = Arc::new(AtomicBool::new(false));
let (sender, receiver) = unbounded();
let keypair = Keypair::new();
let ip = "127.0.0.1".parse().unwrap();
let server_address = s.local_addr().unwrap();
let t = spawn_server(s, &keypair, ip, sender, exit.clone()).unwrap();
let runtime = rt();
let _rt_guard = runtime.enter();
let conn1 = Arc::new(make_client_endpoint(&runtime, &server_address));
// Send a full size packet with single byte writes.
let num_bytes = PACKET_DATA_SIZE;
let num_expected_packets = 1;
let handle = runtime.spawn(async move {
let mut s1 = conn1.connection.open_uni().await.unwrap();
for _ in 0..num_bytes {
s1.write_all(&[0u8]).await.unwrap();
}
s1.finish().await.unwrap();
});
runtime.block_on(handle).unwrap();
let mut all_packets = vec![];
let now = Instant::now();
let mut total_packets = 0;
while now.elapsed().as_secs() < 5 {
if let Ok(packets) = receiver.recv_timeout(Duration::from_secs(1)) {
total_packets += packets.packets.len();
all_packets.push(packets)
}
if total_packets > num_expected_packets {
break;
}
}
for batch in all_packets {
for p in &batch.packets {
assert_eq!(p.meta.size, num_bytes);
}
}
assert_eq!(total_packets, num_expected_packets);
exit.store(true, Ordering::Relaxed);
t.join().unwrap();
}
}
| {
solana_logger::setup();
let s = UdpSocket::bind("127.0.0.1:0").unwrap();
let exit = Arc::new(AtomicBool::new(false));
let (sender, receiver) = unbounded();
let keypair = Keypair::new();
let ip = "127.0.0.1".parse().unwrap();
let server_address = s.local_addr().unwrap();
let t = spawn_server(s, &keypair, ip, sender, exit.clone()).unwrap();
let runtime = rt();
let _rt_guard = runtime.enter();
let conn1 = Arc::new(make_client_endpoint(&runtime, &server_address));
let conn2 = Arc::new(make_client_endpoint(&runtime, &server_address));
let mut num_expected_packets = 0;
for i in 0..10 {
info!("sending: {}", i);
let c1 = conn1.clone();
let c2 = conn2.clone();
let handle = runtime.spawn(async move {
let mut s1 = c1.connection.open_uni().await.unwrap();
let mut s2 = c2.connection.open_uni().await.unwrap();
s1.write_all(&[0u8]).await.unwrap();
s1.finish().await.unwrap();
s2.write_all(&[0u8]).await.unwrap();
s2.finish().await.unwrap();
});
runtime.block_on(handle).unwrap();
num_expected_packets += 2;
thread::sleep(Duration::from_millis(200));
}
let mut all_packets = vec![];
let now = Instant::now();
let mut total_packets = 0;
while now.elapsed().as_secs() < 10 {
if let Ok(packets) = receiver.recv_timeout(Duration::from_secs(1)) {
total_packets += packets.packets.len();
all_packets.push(packets)
}
if total_packets == num_expected_packets {
break;
}
}
for batch in all_packets {
for p in &batch.packets {
assert_eq!(p.meta.size, 1);
}
}
assert_eq!(total_packets, num_expected_packets);
exit.store(true, Ordering::Relaxed);
t.join().unwrap();
} | identifier_body |
bob.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import datetime
from django import template
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape as esc
from django.utils.timesince import timesince
from bob.forms.dependency import DependencyForm
register = template.Library()
@register.simple_tag
def bob_icon(name, is_white=False):
"""
Display a bootstrap icon.
:param name: The name of the icon to display.
:param is_white: Whether the icon should be white (for dark background).
"""
white = ' icon-white' if is_white else ''
return mark_safe('<i class="icon-%s%s"></i>' % esc(name, white))
@register.inclusion_tag('bob/main_menu.html')
def main_menu(items, selected, title=None, search=None, white=False,
position='', title_url="/"):
"""
Show main menu bar.
:param items: The list of :class:`bob.menu.MenuItem` instances to show.
:param selected: The :data:`name` of the currently selected item.
:param title: The title to show in the menu bar.
:param search: The URL for the search form.
:param white: If True, the menu bar will be white.
:param position: Empty, or one of ``'fixed'``, ``'static'``, ``'bottom'``.
"""
positions = {
'static': 'navbar-static-top',
'fixed': 'navbar-fixed-top',
'bottom': 'navbar-fixed-bottom',
}
klass = ['navbar', positions.get(position, '')]
if not white:
klass.append('navbar-inverse')
return {
'items': items,
'selected': selected,
'title': title,
'search': search,
'position': position,
'white': bool(white),
'title_url': title_url,
'class': ' '.join(klass),
}
@register.inclusion_tag('bob/dropdown_items.html')
def dropdown_items(items, white=False):
"""
Render dropdown items.
:param items: The list of :class:`bob.menu.MenuItem` instances to show.
:param title: The title to show in the menu bar.
:param white: If True, the menu bar will be white.
"""
return {
'items': items.subitems,
'white': bool(white),
}
@register.simple_tag
def render_cell(column, row):
"""Render the cell for a given column and row."""
return column.render_cell(row)
@register.inclusion_tag('bob/tab_menu.html')
def tab_menu(items, selected, side=None):
"""
Show a menu in form of tabs.
:param items: The list of :class:`bob.menu.MenuItem` instances to show.
:param selected: The :data:`name` of the currently selected item.
:param side: The direction of tabs, may be on of ``"left"``, ``"right"``,
``"top"`` or ``"bottom"``. Defaults to ``"top"``.
"""
return {
'items': items,
'selected': selected,
'side': side,
}
@register.inclusion_tag('bob/sidebar_menu.html')
def sidebar_menu(items, selected):
"""
Show menu in a sidebar.
:param items: The list of :class:`bob.menu.MenuItem` instances to show.
:param selected: The :data:`name` of the currently selected item.
"""
return {
'items': items,
'selected': selected,
}
@register.inclusion_tag('bob/sidebar_menu_subitems.html')
def sidebar_menu_subitems(item, selected):
"""
Show subitems of a menu in a sidebar.
"""
return {
'item': item,
'selected': selected,
}
@register.inclusion_tag('bob/pagination.html')
def pagination(page, show_all=False, show_csv=False,
fugue_icons=False, url_query=None, neighbors=1,
query_variable_name='page', export_variable_name='export'):
"""
Display pagination for a list of items.
:param page: Django's paginator page to display.
:param show_all: Whether to show a link for disabling pagination.
:param show_csv: Whether to show a link to CSV download.
:param fugue_icons: Whether to use Fugue icons or Bootstrap icons.
:param url_query: The query parameters to add to all page links.
:param neighbors: How many neighboring pages to show in paginator.
"""
if not page:
return {
'show_all': show_all,
'show_csv': show_csv,
'fugue_icons': fugue_icons,
'url_query': url_query,
'export_variable_name': export_variable_name,
}
paginator = page.paginator
page_no = page.number
pages = paginator.page_range[
max(0, page_no - 1 - neighbors):
min(paginator.num_pages, page_no + neighbors)
]
if 1 not in pages:
pages.insert(0, 1)
pages.insert(1, '...')
if paginator.num_pages not in pages:
pages.append('...')
pages.append(paginator.num_pages)
urls = []
for item in pages:
if item == '...':
urls.append(changed_url(url_query, query_variable_name, page_no))
else:
urls.append(changed_url(url_query, query_variable_name, item))
url_pages = zip(pages, urls)
return {
'paginator': paginator,
'page_no': page_no,
'page': page,
'pages': pages,
'show_all': show_all,
'show_csv': show_csv,
'fugue_icons': fugue_icons,
'url_query': url_query,
'url_previous_page': changed_url(
url_query,
query_variable_name,
page_no - 1
),
'url_next_page': changed_url(
url_query,
query_variable_name,
page_no + 1
),
'url_pages': url_pages,
'url_all': changed_url(url_query, query_variable_name, 0),
'export_variable_name': export_variable_name,
}
def changed_url(query, name, value):
if not query:
return '%s=%s' % (name, value)
query = query.copy()
if value is not None and value not in ('1', 1):
query[name] = value
else:
try:
del query[name]
except KeyError:
pass
return query.urlencode()
@register.filter
def bob_export(query, export):
"""Modify the query string of an URL to change the ``export`` argument."""
if not query:
return 'export=%s' % export
query = query.copy()
if export:
query['export'] = export
else:
try:
del query['export']
except KeyError:
pass
return query.urlencode()
@register.filter
def timesince_limited(d):
"""
Display time between given date and now in a human-readable form if the
time span is less than a day, otherwise display the date normally.
:param d: The date to display.
"""
today = datetime.datetime.now()
delta = datetime.timedelta
interval = today - d
if today.strftime('%Y-%m-%d') == d.strftime('%Y-%m-%d'):
if interval < delta(days=0, hours=1):
return timesince(d) + ' ago '
else:
|
else:
return d
@register.inclusion_tag('bob/form.html')
def form(form, action="", method="POST", fugue_icons=False,
css_class="form-horizontal", title="", submit_label='Save'):
"""
Render a form.
:param form: The form to render.
:param action: The submit URL.
:param method: The submit method, either ``"GET"`` or ``"POST"``.
:param fugue_icons: Whether to use Fugue or Bootstrap icon.
:param css_class: The CSS class to use for the ``<form>`` tag.
:param title: Form title.
:param submit_label: Submit button label.
"""
return {
'form': form,
'action': action,
'title': title,
'method': method,
'fugue_icons': fugue_icons,
'css_class': css_class,
'submit_label': submit_label,
}
@register.inclusion_tag('bob/form_as_fieldsets.html')
def form_as_fieldsets(form_instance, *args, **kwargs):
if not getattr(form_instance.Meta, 'fieldset', None):
raise Exception(
"{}.Meta.fieldset attribute is UNDEFINED or EMPTY".format(
repr(form_instance)
)
)
return form(form_instance, *args, **kwargs)
@register.inclusion_tag('bob/form.html')
def form_horizontal(*args, **kwargs):
return form(*args, **kwargs)
@register.inclusion_tag('bob/table_header.html')
def table_header(columns=None, url_query=None, sort=None, fugue_icons=False,
sort_variable_name='sort'):
"""
Render a table header with sorted column options
:param columns: a list of objects of
type :py:class:bob.data_table.DataTableColumn
:param url_query: The query parameters to add to all page links
:param sort: means that the column is now sorted
:param fugue_icons: Whether to use Fugue icons or Bootstrap icons.
show_conditions field on column item - func and args which determines
whether the column is to be displayed.
"""
new_columns = []
for column in columns:
if isinstance(column.show_conditions, tuple):
func, arg = column.show_conditions
if func(arg):
new_columns.append(column)
else:
new_columns.append(column)
return {
'columns': new_columns,
'sort': sort,
'url_query': url_query,
'fugue_icons': fugue_icons,
'sort_variable_name': sort_variable_name,
}
@register.simple_tag
def bob_sort_url(query, field, sort_variable_name, type):
"""Modify the query string of an URL to change the ``sort_variable_name``
argument.
"""
query = query.copy()
if type == 'desc':
query[sort_variable_name] = '-' + field
elif type == 'asc':
query[sort_variable_name] = field
return query.urlencode()
@register.simple_tag
def bob_export_url(query, value, export_variable_name='export'):
"""Modify the query string of an URL to change the ``export_variable_name``
argument.
"""
if not query:
return '%s=%s' % (export_variable_name, value)
query = query.copy()
if value:
query[export_variable_name] = value
else:
try:
del query[export_variable_name]
except KeyError:
pass
return query.urlencode()
@register.simple_tag
def dependency_data(form):
"""Render the data-bob-dependencies tag if this is a DependencyForm"""
if not isinstance(form, DependencyForm):
return ''
return 'data-bob-dependencies="{0}"'.format(
esc(json.dumps(form.get_dependencies_for_js())))
@register.inclusion_tag('bob/field_wrapper.html')
def field_wrapper(field):
"""Render the full control-group tag of a field."""
return {'field': field}
@register.filter
def get_item(obj, key):
return obj[key]
| return d.strftime('%H:%M') | conditional_block |
bob.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import datetime
from django import template
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape as esc
from django.utils.timesince import timesince
from bob.forms.dependency import DependencyForm
register = template.Library()
@register.simple_tag
def bob_icon(name, is_white=False):
"""
Display a bootstrap icon.
:param name: The name of the icon to display.
:param is_white: Whether the icon should be white (for dark background).
"""
white = ' icon-white' if is_white else ''
return mark_safe('<i class="icon-%s%s"></i>' % esc(name, white))
@register.inclusion_tag('bob/main_menu.html')
def main_menu(items, selected, title=None, search=None, white=False,
position='', title_url="/"):
"""
Show main menu bar.
:param items: The list of :class:`bob.menu.MenuItem` instances to show.
:param selected: The :data:`name` of the currently selected item.
:param title: The title to show in the menu bar.
:param search: The URL for the search form.
:param white: If True, the menu bar will be white.
:param position: Empty, or one of ``'fixed'``, ``'static'``, ``'bottom'``.
"""
positions = {
'static': 'navbar-static-top',
'fixed': 'navbar-fixed-top',
'bottom': 'navbar-fixed-bottom',
}
klass = ['navbar', positions.get(position, '')]
if not white:
klass.append('navbar-inverse')
return {
'items': items,
'selected': selected,
'title': title,
'search': search,
'position': position,
'white': bool(white),
'title_url': title_url,
'class': ' '.join(klass),
}
@register.inclusion_tag('bob/dropdown_items.html')
def dropdown_items(items, white=False):
"""
Render dropdown items.
:param items: The list of :class:`bob.menu.MenuItem` instances to show.
:param title: The title to show in the menu bar.
:param white: If True, the menu bar will be white.
"""
return {
'items': items.subitems,
'white': bool(white),
}
@register.simple_tag
def render_cell(column, row):
"""Render the cell for a given column and row."""
return column.render_cell(row)
@register.inclusion_tag('bob/tab_menu.html')
def tab_menu(items, selected, side=None):
"""
Show a menu in form of tabs.
:param items: The list of :class:`bob.menu.MenuItem` instances to show.
:param selected: The :data:`name` of the currently selected item.
:param side: The direction of tabs, may be on of ``"left"``, ``"right"``,
``"top"`` or ``"bottom"``. Defaults to ``"top"``.
"""
return {
'items': items,
'selected': selected,
'side': side,
}
@register.inclusion_tag('bob/sidebar_menu.html')
def sidebar_menu(items, selected):
"""
Show menu in a sidebar.
:param items: The list of :class:`bob.menu.MenuItem` instances to show.
:param selected: The :data:`name` of the currently selected item.
"""
return {
'items': items,
'selected': selected,
}
@register.inclusion_tag('bob/sidebar_menu_subitems.html')
def | (item, selected):
"""
Show subitems of a menu in a sidebar.
"""
return {
'item': item,
'selected': selected,
}
@register.inclusion_tag('bob/pagination.html')
def pagination(page, show_all=False, show_csv=False,
fugue_icons=False, url_query=None, neighbors=1,
query_variable_name='page', export_variable_name='export'):
"""
Display pagination for a list of items.
:param page: Django's paginator page to display.
:param show_all: Whether to show a link for disabling pagination.
:param show_csv: Whether to show a link to CSV download.
:param fugue_icons: Whether to use Fugue icons or Bootstrap icons.
:param url_query: The query parameters to add to all page links.
:param neighbors: How many neighboring pages to show in paginator.
"""
if not page:
return {
'show_all': show_all,
'show_csv': show_csv,
'fugue_icons': fugue_icons,
'url_query': url_query,
'export_variable_name': export_variable_name,
}
paginator = page.paginator
page_no = page.number
pages = paginator.page_range[
max(0, page_no - 1 - neighbors):
min(paginator.num_pages, page_no + neighbors)
]
if 1 not in pages:
pages.insert(0, 1)
pages.insert(1, '...')
if paginator.num_pages not in pages:
pages.append('...')
pages.append(paginator.num_pages)
urls = []
for item in pages:
if item == '...':
urls.append(changed_url(url_query, query_variable_name, page_no))
else:
urls.append(changed_url(url_query, query_variable_name, item))
url_pages = zip(pages, urls)
return {
'paginator': paginator,
'page_no': page_no,
'page': page,
'pages': pages,
'show_all': show_all,
'show_csv': show_csv,
'fugue_icons': fugue_icons,
'url_query': url_query,
'url_previous_page': changed_url(
url_query,
query_variable_name,
page_no - 1
),
'url_next_page': changed_url(
url_query,
query_variable_name,
page_no + 1
),
'url_pages': url_pages,
'url_all': changed_url(url_query, query_variable_name, 0),
'export_variable_name': export_variable_name,
}
def changed_url(query, name, value):
if not query:
return '%s=%s' % (name, value)
query = query.copy()
if value is not None and value not in ('1', 1):
query[name] = value
else:
try:
del query[name]
except KeyError:
pass
return query.urlencode()
@register.filter
def bob_export(query, export):
"""Modify the query string of an URL to change the ``export`` argument."""
if not query:
return 'export=%s' % export
query = query.copy()
if export:
query['export'] = export
else:
try:
del query['export']
except KeyError:
pass
return query.urlencode()
@register.filter
def timesince_limited(d):
"""
Display time between given date and now in a human-readable form if the
time span is less than a day, otherwise display the date normally.
:param d: The date to display.
"""
today = datetime.datetime.now()
delta = datetime.timedelta
interval = today - d
if today.strftime('%Y-%m-%d') == d.strftime('%Y-%m-%d'):
if interval < delta(days=0, hours=1):
return timesince(d) + ' ago '
else:
return d.strftime('%H:%M')
else:
return d
@register.inclusion_tag('bob/form.html')
def form(form, action="", method="POST", fugue_icons=False,
css_class="form-horizontal", title="", submit_label='Save'):
"""
Render a form.
:param form: The form to render.
:param action: The submit URL.
:param method: The submit method, either ``"GET"`` or ``"POST"``.
:param fugue_icons: Whether to use Fugue or Bootstrap icon.
:param css_class: The CSS class to use for the ``<form>`` tag.
:param title: Form title.
:param submit_label: Submit button label.
"""
return {
'form': form,
'action': action,
'title': title,
'method': method,
'fugue_icons': fugue_icons,
'css_class': css_class,
'submit_label': submit_label,
}
@register.inclusion_tag('bob/form_as_fieldsets.html')
def form_as_fieldsets(form_instance, *args, **kwargs):
if not getattr(form_instance.Meta, 'fieldset', None):
raise Exception(
"{}.Meta.fieldset attribute is UNDEFINED or EMPTY".format(
repr(form_instance)
)
)
return form(form_instance, *args, **kwargs)
@register.inclusion_tag('bob/form.html')
def form_horizontal(*args, **kwargs):
return form(*args, **kwargs)
@register.inclusion_tag('bob/table_header.html')
def table_header(columns=None, url_query=None, sort=None, fugue_icons=False,
sort_variable_name='sort'):
"""
Render a table header with sorted column options
:param columns: a list of objects of
type :py:class:bob.data_table.DataTableColumn
:param url_query: The query parameters to add to all page links
:param sort: means that the column is now sorted
:param fugue_icons: Whether to use Fugue icons or Bootstrap icons.
show_conditions field on column item - func and args which determines
whether the column is to be displayed.
"""
new_columns = []
for column in columns:
if isinstance(column.show_conditions, tuple):
func, arg = column.show_conditions
if func(arg):
new_columns.append(column)
else:
new_columns.append(column)
return {
'columns': new_columns,
'sort': sort,
'url_query': url_query,
'fugue_icons': fugue_icons,
'sort_variable_name': sort_variable_name,
}
@register.simple_tag
def bob_sort_url(query, field, sort_variable_name, type):
"""Modify the query string of an URL to change the ``sort_variable_name``
argument.
"""
query = query.copy()
if type == 'desc':
query[sort_variable_name] = '-' + field
elif type == 'asc':
query[sort_variable_name] = field
return query.urlencode()
@register.simple_tag
def bob_export_url(query, value, export_variable_name='export'):
"""Modify the query string of an URL to change the ``export_variable_name``
argument.
"""
if not query:
return '%s=%s' % (export_variable_name, value)
query = query.copy()
if value:
query[export_variable_name] = value
else:
try:
del query[export_variable_name]
except KeyError:
pass
return query.urlencode()
@register.simple_tag
def dependency_data(form):
"""Render the data-bob-dependencies tag if this is a DependencyForm"""
if not isinstance(form, DependencyForm):
return ''
return 'data-bob-dependencies="{0}"'.format(
esc(json.dumps(form.get_dependencies_for_js())))
@register.inclusion_tag('bob/field_wrapper.html')
def field_wrapper(field):
"""Render the full control-group tag of a field."""
return {'field': field}
@register.filter
def get_item(obj, key):
return obj[key]
| sidebar_menu_subitems | identifier_name |
bob.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import datetime
from django import template
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape as esc
from django.utils.timesince import timesince
from bob.forms.dependency import DependencyForm
register = template.Library()
@register.simple_tag
def bob_icon(name, is_white=False):
"""
Display a bootstrap icon.
:param name: The name of the icon to display.
:param is_white: Whether the icon should be white (for dark background).
"""
white = ' icon-white' if is_white else ''
return mark_safe('<i class="icon-%s%s"></i>' % esc(name, white))
@register.inclusion_tag('bob/main_menu.html')
def main_menu(items, selected, title=None, search=None, white=False,
position='', title_url="/"):
"""
Show main menu bar.
:param items: The list of :class:`bob.menu.MenuItem` instances to show.
:param selected: The :data:`name` of the currently selected item.
:param title: The title to show in the menu bar.
:param search: The URL for the search form.
:param white: If True, the menu bar will be white.
:param position: Empty, or one of ``'fixed'``, ``'static'``, ``'bottom'``.
"""
positions = {
'static': 'navbar-static-top',
'fixed': 'navbar-fixed-top',
'bottom': 'navbar-fixed-bottom',
}
klass = ['navbar', positions.get(position, '')]
if not white:
klass.append('navbar-inverse')
return {
'items': items,
'selected': selected,
'title': title,
'search': search,
'position': position,
'white': bool(white),
'title_url': title_url,
'class': ' '.join(klass),
}
@register.inclusion_tag('bob/dropdown_items.html')
def dropdown_items(items, white=False):
"""
Render dropdown items.
:param items: The list of :class:`bob.menu.MenuItem` instances to show.
:param title: The title to show in the menu bar.
:param white: If True, the menu bar will be white.
"""
return {
'items': items.subitems,
'white': bool(white),
}
@register.simple_tag
def render_cell(column, row):
"""Render the cell for a given column and row."""
return column.render_cell(row)
@register.inclusion_tag('bob/tab_menu.html')
def tab_menu(items, selected, side=None):
"""
Show a menu in form of tabs.
:param items: The list of :class:`bob.menu.MenuItem` instances to show.
:param selected: The :data:`name` of the currently selected item.
:param side: The direction of tabs, may be on of ``"left"``, ``"right"``,
``"top"`` or ``"bottom"``. Defaults to ``"top"``.
"""
return {
'items': items,
'selected': selected,
'side': side,
}
@register.inclusion_tag('bob/sidebar_menu.html')
def sidebar_menu(items, selected):
"""
Show menu in a sidebar.
:param items: The list of :class:`bob.menu.MenuItem` instances to show.
:param selected: The :data:`name` of the currently selected item.
"""
return {
'items': items,
'selected': selected,
}
@register.inclusion_tag('bob/sidebar_menu_subitems.html')
def sidebar_menu_subitems(item, selected):
"""
Show subitems of a menu in a sidebar.
"""
return {
'item': item,
'selected': selected,
}
@register.inclusion_tag('bob/pagination.html')
def pagination(page, show_all=False, show_csv=False,
fugue_icons=False, url_query=None, neighbors=1,
query_variable_name='page', export_variable_name='export'):
"""
Display pagination for a list of items.
:param page: Django's paginator page to display.
:param show_all: Whether to show a link for disabling pagination.
:param show_csv: Whether to show a link to CSV download.
:param fugue_icons: Whether to use Fugue icons or Bootstrap icons.
:param url_query: The query parameters to add to all page links.
:param neighbors: How many neighboring pages to show in paginator.
"""
if not page:
return {
'show_all': show_all,
'show_csv': show_csv,
'fugue_icons': fugue_icons,
'url_query': url_query,
'export_variable_name': export_variable_name,
}
paginator = page.paginator
page_no = page.number
pages = paginator.page_range[
max(0, page_no - 1 - neighbors):
min(paginator.num_pages, page_no + neighbors)
]
if 1 not in pages:
pages.insert(0, 1)
pages.insert(1, '...')
if paginator.num_pages not in pages:
pages.append('...')
pages.append(paginator.num_pages)
urls = []
for item in pages:
if item == '...':
urls.append(changed_url(url_query, query_variable_name, page_no))
else:
urls.append(changed_url(url_query, query_variable_name, item))
url_pages = zip(pages, urls)
return {
'paginator': paginator,
'page_no': page_no,
'page': page,
'pages': pages,
'show_all': show_all,
'show_csv': show_csv,
'fugue_icons': fugue_icons,
'url_query': url_query,
'url_previous_page': changed_url(
url_query,
query_variable_name,
page_no - 1
),
'url_next_page': changed_url(
url_query,
query_variable_name,
page_no + 1
),
'url_pages': url_pages,
'url_all': changed_url(url_query, query_variable_name, 0),
'export_variable_name': export_variable_name,
}
def changed_url(query, name, value):
if not query:
return '%s=%s' % (name, value)
query = query.copy()
if value is not None and value not in ('1', 1):
query[name] = value
else:
try:
del query[name]
except KeyError:
pass
return query.urlencode()
@register.filter
def bob_export(query, export):
"""Modify the query string of an URL to change the ``export`` argument."""
if not query:
return 'export=%s' % export
query = query.copy()
if export:
query['export'] = export
else:
try:
del query['export']
except KeyError:
pass
return query.urlencode()
@register.filter
def timesince_limited(d):
"""
Display time between given date and now in a human-readable form if the
time span is less than a day, otherwise display the date normally.
:param d: The date to display.
"""
today = datetime.datetime.now()
delta = datetime.timedelta
interval = today - d
if today.strftime('%Y-%m-%d') == d.strftime('%Y-%m-%d'):
if interval < delta(days=0, hours=1):
return timesince(d) + ' ago '
else:
return d.strftime('%H:%M')
else:
return d
@register.inclusion_tag('bob/form.html')
def form(form, action="", method="POST", fugue_icons=False,
css_class="form-horizontal", title="", submit_label='Save'):
"""
Render a form.
:param form: The form to render.
:param action: The submit URL.
:param method: The submit method, either ``"GET"`` or ``"POST"``.
:param fugue_icons: Whether to use Fugue or Bootstrap icon.
:param css_class: The CSS class to use for the ``<form>`` tag.
:param title: Form title.
:param submit_label: Submit button label.
"""
return {
'form': form,
'action': action,
'title': title,
'method': method,
'fugue_icons': fugue_icons,
'css_class': css_class,
'submit_label': submit_label,
}
@register.inclusion_tag('bob/form_as_fieldsets.html')
def form_as_fieldsets(form_instance, *args, **kwargs):
if not getattr(form_instance.Meta, 'fieldset', None):
raise Exception(
"{}.Meta.fieldset attribute is UNDEFINED or EMPTY".format(
repr(form_instance)
)
)
return form(form_instance, *args, **kwargs)
@register.inclusion_tag('bob/form.html')
def form_horizontal(*args, **kwargs):
return form(*args, **kwargs)
@register.inclusion_tag('bob/table_header.html')
def table_header(columns=None, url_query=None, sort=None, fugue_icons=False,
sort_variable_name='sort'):
"""
Render a table header with sorted column options
:param columns: a list of objects of
type :py:class:bob.data_table.DataTableColumn
:param url_query: The query parameters to add to all page links
:param sort: means that the column is now sorted
:param fugue_icons: Whether to use Fugue icons or Bootstrap icons.
show_conditions field on column item - func and args which determines
whether the column is to be displayed.
"""
new_columns = []
for column in columns:
if isinstance(column.show_conditions, tuple):
func, arg = column.show_conditions
if func(arg):
new_columns.append(column)
else:
new_columns.append(column)
return {
'columns': new_columns,
'sort': sort,
'url_query': url_query,
'fugue_icons': fugue_icons, |
@register.simple_tag
def bob_sort_url(query, field, sort_variable_name, type):
"""Modify the query string of an URL to change the ``sort_variable_name``
argument.
"""
query = query.copy()
if type == 'desc':
query[sort_variable_name] = '-' + field
elif type == 'asc':
query[sort_variable_name] = field
return query.urlencode()
@register.simple_tag
def bob_export_url(query, value, export_variable_name='export'):
"""Modify the query string of an URL to change the ``export_variable_name``
argument.
"""
if not query:
return '%s=%s' % (export_variable_name, value)
query = query.copy()
if value:
query[export_variable_name] = value
else:
try:
del query[export_variable_name]
except KeyError:
pass
return query.urlencode()
@register.simple_tag
def dependency_data(form):
"""Render the data-bob-dependencies tag if this is a DependencyForm"""
if not isinstance(form, DependencyForm):
return ''
return 'data-bob-dependencies="{0}"'.format(
esc(json.dumps(form.get_dependencies_for_js())))
@register.inclusion_tag('bob/field_wrapper.html')
def field_wrapper(field):
"""Render the full control-group tag of a field."""
return {'field': field}
@register.filter
def get_item(obj, key):
return obj[key] | 'sort_variable_name': sort_variable_name,
}
| random_line_split |
bob.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import datetime
from django import template
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape as esc
from django.utils.timesince import timesince
from bob.forms.dependency import DependencyForm
register = template.Library()
@register.simple_tag
def bob_icon(name, is_white=False):
"""
Display a bootstrap icon.
:param name: The name of the icon to display.
:param is_white: Whether the icon should be white (for dark background).
"""
white = ' icon-white' if is_white else ''
return mark_safe('<i class="icon-%s%s"></i>' % esc(name, white))
@register.inclusion_tag('bob/main_menu.html')
def main_menu(items, selected, title=None, search=None, white=False,
position='', title_url="/"):
"""
Show main menu bar.
:param items: The list of :class:`bob.menu.MenuItem` instances to show.
:param selected: The :data:`name` of the currently selected item.
:param title: The title to show in the menu bar.
:param search: The URL for the search form.
:param white: If True, the menu bar will be white.
:param position: Empty, or one of ``'fixed'``, ``'static'``, ``'bottom'``.
"""
positions = {
'static': 'navbar-static-top',
'fixed': 'navbar-fixed-top',
'bottom': 'navbar-fixed-bottom',
}
klass = ['navbar', positions.get(position, '')]
if not white:
klass.append('navbar-inverse')
return {
'items': items,
'selected': selected,
'title': title,
'search': search,
'position': position,
'white': bool(white),
'title_url': title_url,
'class': ' '.join(klass),
}
@register.inclusion_tag('bob/dropdown_items.html')
def dropdown_items(items, white=False):
"""
Render dropdown items.
:param items: The list of :class:`bob.menu.MenuItem` instances to show.
:param title: The title to show in the menu bar.
:param white: If True, the menu bar will be white.
"""
return {
'items': items.subitems,
'white': bool(white),
}
@register.simple_tag
def render_cell(column, row):
"""Render the cell for a given column and row."""
return column.render_cell(row)
@register.inclusion_tag('bob/tab_menu.html')
def tab_menu(items, selected, side=None):
|
@register.inclusion_tag('bob/sidebar_menu.html')
def sidebar_menu(items, selected):
"""
Show menu in a sidebar.
:param items: The list of :class:`bob.menu.MenuItem` instances to show.
:param selected: The :data:`name` of the currently selected item.
"""
return {
'items': items,
'selected': selected,
}
@register.inclusion_tag('bob/sidebar_menu_subitems.html')
def sidebar_menu_subitems(item, selected):
"""
Show subitems of a menu in a sidebar.
"""
return {
'item': item,
'selected': selected,
}
@register.inclusion_tag('bob/pagination.html')
def pagination(page, show_all=False, show_csv=False,
fugue_icons=False, url_query=None, neighbors=1,
query_variable_name='page', export_variable_name='export'):
"""
Display pagination for a list of items.
:param page: Django's paginator page to display.
:param show_all: Whether to show a link for disabling pagination.
:param show_csv: Whether to show a link to CSV download.
:param fugue_icons: Whether to use Fugue icons or Bootstrap icons.
:param url_query: The query parameters to add to all page links.
:param neighbors: How many neighboring pages to show in paginator.
"""
if not page:
return {
'show_all': show_all,
'show_csv': show_csv,
'fugue_icons': fugue_icons,
'url_query': url_query,
'export_variable_name': export_variable_name,
}
paginator = page.paginator
page_no = page.number
pages = paginator.page_range[
max(0, page_no - 1 - neighbors):
min(paginator.num_pages, page_no + neighbors)
]
if 1 not in pages:
pages.insert(0, 1)
pages.insert(1, '...')
if paginator.num_pages not in pages:
pages.append('...')
pages.append(paginator.num_pages)
urls = []
for item in pages:
if item == '...':
urls.append(changed_url(url_query, query_variable_name, page_no))
else:
urls.append(changed_url(url_query, query_variable_name, item))
url_pages = zip(pages, urls)
return {
'paginator': paginator,
'page_no': page_no,
'page': page,
'pages': pages,
'show_all': show_all,
'show_csv': show_csv,
'fugue_icons': fugue_icons,
'url_query': url_query,
'url_previous_page': changed_url(
url_query,
query_variable_name,
page_no - 1
),
'url_next_page': changed_url(
url_query,
query_variable_name,
page_no + 1
),
'url_pages': url_pages,
'url_all': changed_url(url_query, query_variable_name, 0),
'export_variable_name': export_variable_name,
}
def changed_url(query, name, value):
if not query:
return '%s=%s' % (name, value)
query = query.copy()
if value is not None and value not in ('1', 1):
query[name] = value
else:
try:
del query[name]
except KeyError:
pass
return query.urlencode()
@register.filter
def bob_export(query, export):
"""Modify the query string of an URL to change the ``export`` argument."""
if not query:
return 'export=%s' % export
query = query.copy()
if export:
query['export'] = export
else:
try:
del query['export']
except KeyError:
pass
return query.urlencode()
@register.filter
def timesince_limited(d):
"""
Display time between given date and now in a human-readable form if the
time span is less than a day, otherwise display the date normally.
:param d: The date to display.
"""
today = datetime.datetime.now()
delta = datetime.timedelta
interval = today - d
if today.strftime('%Y-%m-%d') == d.strftime('%Y-%m-%d'):
if interval < delta(days=0, hours=1):
return timesince(d) + ' ago '
else:
return d.strftime('%H:%M')
else:
return d
@register.inclusion_tag('bob/form.html')
def form(form, action="", method="POST", fugue_icons=False,
css_class="form-horizontal", title="", submit_label='Save'):
"""
Render a form.
:param form: The form to render.
:param action: The submit URL.
:param method: The submit method, either ``"GET"`` or ``"POST"``.
:param fugue_icons: Whether to use Fugue or Bootstrap icon.
:param css_class: The CSS class to use for the ``<form>`` tag.
:param title: Form title.
:param submit_label: Submit button label.
"""
return {
'form': form,
'action': action,
'title': title,
'method': method,
'fugue_icons': fugue_icons,
'css_class': css_class,
'submit_label': submit_label,
}
@register.inclusion_tag('bob/form_as_fieldsets.html')
def form_as_fieldsets(form_instance, *args, **kwargs):
if not getattr(form_instance.Meta, 'fieldset', None):
raise Exception(
"{}.Meta.fieldset attribute is UNDEFINED or EMPTY".format(
repr(form_instance)
)
)
return form(form_instance, *args, **kwargs)
@register.inclusion_tag('bob/form.html')
def form_horizontal(*args, **kwargs):
return form(*args, **kwargs)
@register.inclusion_tag('bob/table_header.html')
def table_header(columns=None, url_query=None, sort=None, fugue_icons=False,
sort_variable_name='sort'):
"""
Render a table header with sorted column options
:param columns: a list of objects of
type :py:class:bob.data_table.DataTableColumn
:param url_query: The query parameters to add to all page links
:param sort: means that the column is now sorted
:param fugue_icons: Whether to use Fugue icons or Bootstrap icons.
show_conditions field on column item - func and args which determines
whether the column is to be displayed.
"""
new_columns = []
for column in columns:
if isinstance(column.show_conditions, tuple):
func, arg = column.show_conditions
if func(arg):
new_columns.append(column)
else:
new_columns.append(column)
return {
'columns': new_columns,
'sort': sort,
'url_query': url_query,
'fugue_icons': fugue_icons,
'sort_variable_name': sort_variable_name,
}
@register.simple_tag
def bob_sort_url(query, field, sort_variable_name, type):
"""Modify the query string of an URL to change the ``sort_variable_name``
argument.
"""
query = query.copy()
if type == 'desc':
query[sort_variable_name] = '-' + field
elif type == 'asc':
query[sort_variable_name] = field
return query.urlencode()
@register.simple_tag
def bob_export_url(query, value, export_variable_name='export'):
"""Modify the query string of an URL to change the ``export_variable_name``
argument.
"""
if not query:
return '%s=%s' % (export_variable_name, value)
query = query.copy()
if value:
query[export_variable_name] = value
else:
try:
del query[export_variable_name]
except KeyError:
pass
return query.urlencode()
@register.simple_tag
def dependency_data(form):
"""Render the data-bob-dependencies tag if this is a DependencyForm"""
if not isinstance(form, DependencyForm):
return ''
return 'data-bob-dependencies="{0}"'.format(
esc(json.dumps(form.get_dependencies_for_js())))
@register.inclusion_tag('bob/field_wrapper.html')
def field_wrapper(field):
"""Render the full control-group tag of a field."""
return {'field': field}
@register.filter
def get_item(obj, key):
return obj[key]
| """
Show a menu in form of tabs.
:param items: The list of :class:`bob.menu.MenuItem` instances to show.
:param selected: The :data:`name` of the currently selected item.
:param side: The direction of tabs, may be on of ``"left"``, ``"right"``,
``"top"`` or ``"bottom"``. Defaults to ``"top"``.
"""
return {
'items': items,
'selected': selected,
'side': side,
} | identifier_body |
render.rs | //! Definitions, constructors, and management for the EnsoGL shapes that are used to draw an edge.
//!
//! The core function of this module is to translate edge layouts into the shape parameters that
//! will implement them.
use crate::prelude::*;
use ensogl::display::shape::*;
use crate::GraphLayers;
use super::layout::Corner;
use super::layout::EdgeSplit;
use super::layout::Oriented;
use super::layout::SplitArc;
use super::layout::TargetAttachment;
use ensogl::data::color;
use ensogl::display;
use ensogl::display::scene::Scene;
use std::f32::consts::FRAC_PI_2;
use std::f32::consts::PI;
use std::f32::consts::TAU;
// =================
// === Constants ===
// =================
const LINE_WIDTH: f32 = 4.0;
const HOVER_EXTENSION: f32 = 10.0;
pub(super) const HOVER_WIDTH: f32 = LINE_WIDTH + HOVER_EXTENSION;
mod arrow {
use super::*;
pub(super) const SIZE: Vector2 = Vector2(18.75, 18.75);
}
mod attachment {
/// Extra length to add to the top and bottom of the target-attachment bit, to ensure that it
/// appears to pass through the top of the node. Without this adjustment, inexact
/// floating-point math and anti-aliasing would cause a 1-pixel gap artifact right where
/// the attachment should meet the corner at the edge of the node.
pub(super) const LENGTH_ADJUSTMENT: f32 = 0.1;
}
// ===================
// === Edge Shapes ===
// ===================
/// The shapes used to render an edge.
#[derive(Debug, Default)]
pub(super) struct Shapes {
/// The individual [`Corner`]s making up the edge. Each is drawn in the focused or unfocused
/// color.
sections: RefCell<Vec<Rectangle>>,
/// A pair of [`arc`] shapes used when the mouse is over the rounded corner, and the edge must
/// must be split into focused and unfocused sides at a certain angle along the arc.
split_arc: RefCell<Option<[arc::View; 2]>>,
/// Wider versions of the [`sections`], for receiving mouse events.
hover_sections: RefCell<Vec<Rectangle>>,
/// The end of the edge that is drawn on top of the node and connects to the target node's
/// input port.
target_attachment: RefCell<Option<Rectangle>>,
/// Arrow drawn on long backward edges to indicate data flow direction.
dataflow_arrow: RefCell<Option<Rectangle>>,
/// An rectangle representing the source node shape when the edge is in detached state. Used
/// to mask out the edge fragment that would otherwise be drawn over the source node.
source_cutout: RefCell<Option<Rectangle>>,
}
impl Shapes {
/// Redraw the arrow used to mark long backward edges.
pub(super) fn redraw_dataflow_arrow(
&self,
parent: &impl ShapeParent,
parameters: RedrawDataflowArrow,
) {
let RedrawDataflowArrow { arrow, source_color, target_color, focus_split, is_attached } =
parameters;
let shape = self.dataflow_arrow.take();
if let Some(arrow_center) = arrow {
// The arrow will have the same color as the target-end of the first corner from the
// source (this is the `arrow_center` point).
let color = match focus_split.map(|split| split.corner_index) {
Some(0) => target_color,
_ => source_color,
};
let shape = shape.unwrap_or_else(|| parent.new_dataflow_arrow());
shape.set_xy(arrow_center - arrow::SIZE / 2.0);
shape.set_color(color);
Self::set_layer(parent, &shape, is_attached, false);
self.dataflow_arrow.replace(Some(shape));
}
}
/// Redraw the invisible mouse-event-catching edges.
pub(super) fn redraw_hover_sections(
&self,
parent: &impl ShapeParent,
corners: &[Oriented<Corner>],
) {
let hover_factory = self
.hover_sections
.take()
.into_iter()
.chain(iter::repeat_with(|| parent.new_hover_section()));
*self.hover_sections.borrow_mut() = corners
.iter()
.zip(hover_factory)
.map(|(corner, shape)| draw_corner(shape, **corner, INVISIBLE_HOVER_COLOR, HOVER_WIDTH))
.collect();
}
/// Redraw the sections, each of which is a [`Rectangle`] implementing a [`Corner`], or multiple
/// [`Rectangle`]s and multiple [`arc::View`]s, if it is a split [`Corner`].
pub(super) fn redraw_sections(&self, parent: &impl ShapeParent, parameters: RedrawSections) {
let RedrawSections { corners, source_color, target_color, focus_split, is_attached } =
parameters;
let corner_index =
focus_split.map(|split| split.corner_index).unwrap_or_else(|| corners.len());
let split_corner = focus_split.map(|split| split.split_corner);
let mut section_factory =
self.sections.take().into_iter().chain(iter::repeat_with(|| parent.new_section()));
let mut new_sections = self.redraw_complete_sections(
&mut section_factory,
corners,
corner_index,
source_color,
target_color,
);
let arc_shapes = self.split_arc.take();
if let Some(split_corner) = split_corner {
if let Some(split_arc) = split_corner.split_arc {
let arc_shapes = arc_shapes.unwrap_or_else(|| [parent.new_arc(), parent.new_arc()]);
let arc_shapes = draw_split_arc(arc_shapes, split_arc);
arc_shapes[0].color.set(source_color.into());
arc_shapes[1].color.set(target_color.into());
self.split_arc.replace(Some(arc_shapes));
}
let (source_shape, target_shape) =
(section_factory.next().unwrap(), section_factory.next().unwrap());
new_sections.extend([
draw_corner(source_shape, *split_corner.source_end, source_color, LINE_WIDTH),
draw_corner(target_shape, *split_corner.target_end, target_color, LINE_WIDTH),
]);
}
for (i, shape) in new_sections.iter().enumerate() {
Self::set_layer(parent, shape, is_attached, i == 0);
}
*self.sections.borrow_mut() = new_sections;
}
pub(crate) fn redraw_cutout(
&self,
parent: &impl ShapeParent,
is_attached: bool,
source_size: Vector2,
) {
let cutout = self.source_cutout.take();
if !is_attached {
let cutout = cutout.unwrap_or_else(|| parent.new_cutout());
cutout.set_xy(-source_size / 2.0);
cutout.set_size(source_size);
self.source_cutout.replace(Some(cutout));
}
}
/// Redraw the sections that aren't split by the focus position.
pub(super) fn redraw_complete_sections(
&self,
section_factory: impl Iterator<Item = Rectangle>,
corners: &[Oriented<Corner>],
corner_index: usize,
source_color: color::Rgba,
target_color: color::Rgba,
) -> Vec<Rectangle> {
corners
.iter()
.enumerate()
.filter_map(|(i, corner)| {
if i == corner_index {
None
} else {
let color = match i < corner_index {
true => source_color,
false => target_color,
};
Some((color, corner))
}
})
.zip(section_factory)
.map(|((color, corner), shape)| draw_corner(shape, **corner, color, LINE_WIDTH))
.collect()
}
/// Redraw the little bit that goes on top of the target node.
pub(super) fn redraw_target_attachment(
&self,
parent: &impl ShapeParent,
target_attachment: Option<TargetAttachment>,
color: color::Rgba,
) {
let shape = self.target_attachment.take();
if let Some(TargetAttachment { target, length }) = target_attachment
&& length > f32::EPSILON {
let shape = shape.unwrap_or_else(|| parent.new_target_attachment());
shape.set_size_y(length + attachment::LENGTH_ADJUSTMENT * 2.0);
let offset = Vector2(-LINE_WIDTH / 2.0, - length - attachment::LENGTH_ADJUSTMENT);
shape.set_xy(target + offset);
shape.set_color(color);
self.target_attachment.replace(Some(shape));
}
}
/// Add the given shape to the appropriate layer depending on whether it is attached.
fn set_layer(
parent: &impl ShapeParent,
shape: &Rectangle,
below_nodes: bool,
near_source: bool,
) {
let layers = parent.layers();
let layer = if below_nodes {
&layers.edge_below_nodes
} else if near_source {
&layers.masked_edge_above_nodes
} else {
&layers.edge_above_nodes
};
layer.add(shape);
}
}
// === Redraw parameters ====
/// Arguments passed to [`Shapes::redraw_sections`].
pub(super) struct RedrawSections<'a> {
/// The corners to be redrawn.
pub(super) corners: &'a [Oriented<Corner>],
/// The color to use for the part of the edge closer to the source.
pub(super) source_color: color::Rgba,
/// The color to use for the part of the edge closer to the target.
pub(super) target_color: color::Rgba,
/// Where the edge should be split into differently-colored source and target parts.
pub(super) focus_split: Option<EdgeSplit>,
/// Whether the edge is fully-attached.
pub(super) is_attached: bool,
}
/// Arguments passed to [`Shapes::redraw_dataflow_arrow`].
pub(super) struct RedrawDataflowArrow {
/// The center of the arrow, if the arrow should be drawn.
pub(super) arrow: Option<Vector2>,
/// The color to use for the part of the edge closer to the source.
pub(super) source_color: color::Rgba,
/// The color to use for the part of the edge closer to the target.
pub(super) target_color: color::Rgba,
/// Where the edge should be split into differently-colored source and target parts.
pub(super) focus_split: Option<EdgeSplit>,
/// Whether the edge is fully-attached.
pub(super) is_attached: bool,
}
// =========================
// === Shape Definitions ===
// =========================
/// An arc around the origin. `outer_radius` determines the distance from the origin to the outer
/// edge of the arc, `stroke_width` the width of the arc. The arc starts at `start_angle`, relative
/// to the origin. Its radial size is `sector_angle`. The ends are flat, not rounded as in
/// [`RoundedArc`].
mod arc {
use super::*;
ensogl::shape! {
pointer_events = false;
(
style: Style,
color: Vector4,
outer_radius: f32,
stroke_width: f32,
start_angle: f32,
sector_angle: f32,
) {
let circle = Circle(outer_radius.px()) - Circle((outer_radius - stroke_width).px());
let angle_adjust = Var::<f32>::from(FRAC_PI_2);
let rotate_angle = -start_angle + angle_adjust - §or_angle / 2.0;
let angle = PlaneAngleFast(sector_angle).rotate(rotate_angle);
let angle = angle.grow(0.5.px());
let shape = circle * angle;
let shape = shape.fill(color);
shape.into()
}
}
}
// ======================
// === Shape Creation ===
// ======================
pub(super) trait ShapeParent: display::Object {
fn scene(&self) -> &Scene;
fn layers(&self) -> &GraphLayers;
/// Create a shape object to render one of the [`Corner`]s making up the edge.
fn new_section(&self) -> Rectangle {
let new = Rectangle::new();
new.set_inner_border(LINE_WIDTH, 0.0);
new.set_color(color::Rgba::transparent());
new.set_pointer_events(false);
self.display_object().add_child(&new);
new
}
/// Create a shape object to render the invisible hover area corresponding to one of the
/// [`Corner`]s making up the edge.
fn new_hover_section(&self) -> Rectangle {
let new = Rectangle::new();
new.set_inner_border(HOVER_WIDTH, 0.0);
new.set_color(color::Rgba::transparent());
self.display_object().add_child(&new);
self.layers().edge_below_nodes.add(&new);
new
}
/// Create a shape object to render an arbitrary-angle arc. This is used when the focus is split
/// in the rounded part of a [`Corner`].
fn new_arc(&self) -> arc::View {
let arc = arc::View::new(); |
/// Create a shape object to render the little bit at the target end of the edge, that draws on
/// top of the node.
fn new_target_attachment(&self) -> Rectangle {
let new = Rectangle::new();
new.set_size_x(LINE_WIDTH);
new.set_border_color(color::Rgba::transparent());
new.set_pointer_events(false);
self.display_object().add_child(&new);
self.layers().edge_above_nodes.add(&new);
new
}
/// Create a shape object to render the arrow that is drawn on long backward edges to show the
/// direction of data flow.
fn new_dataflow_arrow(&self) -> Rectangle {
let new = SimpleTriangle::from_size(arrow::SIZE);
new.set_pointer_events(false);
self.display_object().add_child(&new);
new.into()
}
/// Create a shape object to render the cutout mask for the edge nearby the source node.
fn new_cutout(&self) -> Rectangle {
let cutout = Rectangle::new();
self.display_object().add_child(&cutout);
// FIXME (temporary assumption): Currently we assume that the node background is a rectangle
// with always rounded corners. Ideally we would somehow use actual source node's background
// shape for this.
cutout.set_corner_radius(crate::component::node::CORNER_RADIUS);
self.layers().edge_above_nodes_cutout.add(&cutout);
// Pointer events must be enabled, so that the hover area is masked out as well.
cutout.set_pointer_events(true);
cutout
}
}
// =========================
// === Rendering Corners ===
// =========================
/// Set the given [`Rectangle`]'s geometry to draw this corner shape.
///
/// Note that the shape's `inset` and `border` should be the same value as the provided
/// [`line_width`]. They are not set here as an optimization: When shapes are reused, the value does
/// not need to be set again, reducing needed GPU uploads.
pub(super) fn draw_corner(
shape: Rectangle,
corner: Corner,
color: color::Rgba,
line_width: f32,
) -> Rectangle {
shape.set_xy(corner.origin(line_width));
shape.set_size(corner.size(line_width));
shape.set_clip(corner.clip());
shape.set_corner_radius(corner.radius(line_width));
shape.set_border_color(color);
shape
}
// ==============================
// === Rendering Partial Arcs ===
// ==============================
/// Apply the specified arc-splitting parameters to the given arc shapes.
pub(super) fn draw_split_arc(arc_shapes: [arc::View; 2], split_arc: SplitArc) -> [arc::View; 2] {
let outer_radius = split_arc.radius + LINE_WIDTH / 2.0;
let arc_box = Vector2(outer_radius * 2.0, outer_radius * 2.0);
let arc_offset = Vector2(-outer_radius, -outer_radius);
let geometry = ArcGeometry::bisection(
split_arc.source_end_angle,
split_arc.split_angle,
split_arc.target_end_angle,
);
for (shape, geometry) in arc_shapes.iter().zip(&geometry) {
shape.set_xy(split_arc.origin + arc_offset);
shape.set_size(arc_box);
shape.outer_radius.set(outer_radius);
shape.start_angle.set(geometry.start);
shape.sector_angle.set(geometry.sector);
}
arc_shapes
}
// === Arc geometry ===
#[derive(Debug, Copy, Clone, PartialEq)]
struct ArcGeometry {
start: f32,
sector: f32,
}
impl ArcGeometry {
fn bisection(a: f32, b: f32, c: f32) -> [Self; 2] {
[Self::new_minor(a, b), Self::new_minor(b, c)]
}
fn new_minor(a: f32, b: f32) -> Self {
let start = minor_arc_start(a, b);
let sector = minor_arc_sector(a, b);
Self { start, sector }
}
}
fn minor_arc_start(a: f32, b: f32) -> f32 {
let a = a.rem_euclid(TAU);
let b = b.rem_euclid(TAU);
let wrapped = (a - b).abs() >= PI;
if wrapped {
if a < f32::EPSILON {
b
} else {
a
}
} else {
min(a, b)
}
}
fn minor_arc_sector(a: f32, b: f32) -> f32 {
let a = a.abs();
let b = b.abs();
let ab = (a - b).abs();
min(ab, TAU - ab)
} | arc.stroke_width.set(LINE_WIDTH);
self.display_object().add_child(&arc);
self.layers().edge_below_nodes.add(&arc);
arc
} | random_line_split |
render.rs | //! Definitions, constructors, and management for the EnsoGL shapes that are used to draw an edge.
//!
//! The core function of this module is to translate edge layouts into the shape parameters that
//! will implement them.
use crate::prelude::*;
use ensogl::display::shape::*;
use crate::GraphLayers;
use super::layout::Corner;
use super::layout::EdgeSplit;
use super::layout::Oriented;
use super::layout::SplitArc;
use super::layout::TargetAttachment;
use ensogl::data::color;
use ensogl::display;
use ensogl::display::scene::Scene;
use std::f32::consts::FRAC_PI_2;
use std::f32::consts::PI;
use std::f32::consts::TAU;
// =================
// === Constants ===
// =================
const LINE_WIDTH: f32 = 4.0;
const HOVER_EXTENSION: f32 = 10.0;
pub(super) const HOVER_WIDTH: f32 = LINE_WIDTH + HOVER_EXTENSION;
mod arrow {
use super::*;
pub(super) const SIZE: Vector2 = Vector2(18.75, 18.75);
}
mod attachment {
/// Extra length to add to the top and bottom of the target-attachment bit, to ensure that it
/// appears to pass through the top of the node. Without this adjustment, inexact
/// floating-point math and anti-aliasing would cause a 1-pixel gap artifact right where
/// the attachment should meet the corner at the edge of the node.
pub(super) const LENGTH_ADJUSTMENT: f32 = 0.1;
}
// ===================
// === Edge Shapes ===
// ===================
/// The shapes used to render an edge.
#[derive(Debug, Default)]
pub(super) struct Shapes {
/// The individual [`Corner`]s making up the edge. Each is drawn in the focused or unfocused
/// color.
sections: RefCell<Vec<Rectangle>>,
/// A pair of [`arc`] shapes used when the mouse is over the rounded corner, and the edge must
/// must be split into focused and unfocused sides at a certain angle along the arc.
split_arc: RefCell<Option<[arc::View; 2]>>,
/// Wider versions of the [`sections`], for receiving mouse events.
hover_sections: RefCell<Vec<Rectangle>>,
/// The end of the edge that is drawn on top of the node and connects to the target node's
/// input port.
target_attachment: RefCell<Option<Rectangle>>,
/// Arrow drawn on long backward edges to indicate data flow direction.
dataflow_arrow: RefCell<Option<Rectangle>>,
/// An rectangle representing the source node shape when the edge is in detached state. Used
/// to mask out the edge fragment that would otherwise be drawn over the source node.
source_cutout: RefCell<Option<Rectangle>>,
}
impl Shapes {
/// Redraw the arrow used to mark long backward edges.
pub(super) fn redraw_dataflow_arrow(
&self,
parent: &impl ShapeParent,
parameters: RedrawDataflowArrow,
) {
let RedrawDataflowArrow { arrow, source_color, target_color, focus_split, is_attached } =
parameters;
let shape = self.dataflow_arrow.take();
if let Some(arrow_center) = arrow {
// The arrow will have the same color as the target-end of the first corner from the
// source (this is the `arrow_center` point).
let color = match focus_split.map(|split| split.corner_index) {
Some(0) => target_color,
_ => source_color,
};
let shape = shape.unwrap_or_else(|| parent.new_dataflow_arrow());
shape.set_xy(arrow_center - arrow::SIZE / 2.0);
shape.set_color(color);
Self::set_layer(parent, &shape, is_attached, false);
self.dataflow_arrow.replace(Some(shape));
}
}
/// Redraw the invisible mouse-event-catching edges.
pub(super) fn | (
&self,
parent: &impl ShapeParent,
corners: &[Oriented<Corner>],
) {
let hover_factory = self
.hover_sections
.take()
.into_iter()
.chain(iter::repeat_with(|| parent.new_hover_section()));
*self.hover_sections.borrow_mut() = corners
.iter()
.zip(hover_factory)
.map(|(corner, shape)| draw_corner(shape, **corner, INVISIBLE_HOVER_COLOR, HOVER_WIDTH))
.collect();
}
/// Redraw the sections, each of which is a [`Rectangle`] implementing a [`Corner`], or multiple
/// [`Rectangle`]s and multiple [`arc::View`]s, if it is a split [`Corner`].
pub(super) fn redraw_sections(&self, parent: &impl ShapeParent, parameters: RedrawSections) {
let RedrawSections { corners, source_color, target_color, focus_split, is_attached } =
parameters;
let corner_index =
focus_split.map(|split| split.corner_index).unwrap_or_else(|| corners.len());
let split_corner = focus_split.map(|split| split.split_corner);
let mut section_factory =
self.sections.take().into_iter().chain(iter::repeat_with(|| parent.new_section()));
let mut new_sections = self.redraw_complete_sections(
&mut section_factory,
corners,
corner_index,
source_color,
target_color,
);
let arc_shapes = self.split_arc.take();
if let Some(split_corner) = split_corner {
if let Some(split_arc) = split_corner.split_arc {
let arc_shapes = arc_shapes.unwrap_or_else(|| [parent.new_arc(), parent.new_arc()]);
let arc_shapes = draw_split_arc(arc_shapes, split_arc);
arc_shapes[0].color.set(source_color.into());
arc_shapes[1].color.set(target_color.into());
self.split_arc.replace(Some(arc_shapes));
}
let (source_shape, target_shape) =
(section_factory.next().unwrap(), section_factory.next().unwrap());
new_sections.extend([
draw_corner(source_shape, *split_corner.source_end, source_color, LINE_WIDTH),
draw_corner(target_shape, *split_corner.target_end, target_color, LINE_WIDTH),
]);
}
for (i, shape) in new_sections.iter().enumerate() {
Self::set_layer(parent, shape, is_attached, i == 0);
}
*self.sections.borrow_mut() = new_sections;
}
pub(crate) fn redraw_cutout(
&self,
parent: &impl ShapeParent,
is_attached: bool,
source_size: Vector2,
) {
let cutout = self.source_cutout.take();
if !is_attached {
let cutout = cutout.unwrap_or_else(|| parent.new_cutout());
cutout.set_xy(-source_size / 2.0);
cutout.set_size(source_size);
self.source_cutout.replace(Some(cutout));
}
}
/// Redraw the sections that aren't split by the focus position.
pub(super) fn redraw_complete_sections(
&self,
section_factory: impl Iterator<Item = Rectangle>,
corners: &[Oriented<Corner>],
corner_index: usize,
source_color: color::Rgba,
target_color: color::Rgba,
) -> Vec<Rectangle> {
corners
.iter()
.enumerate()
.filter_map(|(i, corner)| {
if i == corner_index {
None
} else {
let color = match i < corner_index {
true => source_color,
false => target_color,
};
Some((color, corner))
}
})
.zip(section_factory)
.map(|((color, corner), shape)| draw_corner(shape, **corner, color, LINE_WIDTH))
.collect()
}
/// Redraw the little bit that goes on top of the target node.
pub(super) fn redraw_target_attachment(
&self,
parent: &impl ShapeParent,
target_attachment: Option<TargetAttachment>,
color: color::Rgba,
) {
let shape = self.target_attachment.take();
if let Some(TargetAttachment { target, length }) = target_attachment
&& length > f32::EPSILON {
let shape = shape.unwrap_or_else(|| parent.new_target_attachment());
shape.set_size_y(length + attachment::LENGTH_ADJUSTMENT * 2.0);
let offset = Vector2(-LINE_WIDTH / 2.0, - length - attachment::LENGTH_ADJUSTMENT);
shape.set_xy(target + offset);
shape.set_color(color);
self.target_attachment.replace(Some(shape));
}
}
/// Add the given shape to the appropriate layer depending on whether it is attached.
fn set_layer(
parent: &impl ShapeParent,
shape: &Rectangle,
below_nodes: bool,
near_source: bool,
) {
let layers = parent.layers();
let layer = if below_nodes {
&layers.edge_below_nodes
} else if near_source {
&layers.masked_edge_above_nodes
} else {
&layers.edge_above_nodes
};
layer.add(shape);
}
}
// === Redraw parameters ====
/// Arguments passed to [`Shapes::redraw_sections`].
pub(super) struct RedrawSections<'a> {
/// The corners to be redrawn.
pub(super) corners: &'a [Oriented<Corner>],
/// The color to use for the part of the edge closer to the source.
pub(super) source_color: color::Rgba,
/// The color to use for the part of the edge closer to the target.
pub(super) target_color: color::Rgba,
/// Where the edge should be split into differently-colored source and target parts.
pub(super) focus_split: Option<EdgeSplit>,
/// Whether the edge is fully-attached.
pub(super) is_attached: bool,
}
/// Arguments passed to [`Shapes::redraw_dataflow_arrow`].
pub(super) struct RedrawDataflowArrow {
/// The center of the arrow, if the arrow should be drawn.
pub(super) arrow: Option<Vector2>,
/// The color to use for the part of the edge closer to the source.
pub(super) source_color: color::Rgba,
/// The color to use for the part of the edge closer to the target.
pub(super) target_color: color::Rgba,
/// Where the edge should be split into differently-colored source and target parts.
pub(super) focus_split: Option<EdgeSplit>,
/// Whether the edge is fully-attached.
pub(super) is_attached: bool,
}
// =========================
// === Shape Definitions ===
// =========================
/// An arc around the origin. `outer_radius` determines the distance from the origin to the outer
/// edge of the arc, `stroke_width` the width of the arc. The arc starts at `start_angle`, relative
/// to the origin. Its radial size is `sector_angle`. The ends are flat, not rounded as in
/// [`RoundedArc`].
mod arc {
use super::*;
ensogl::shape! {
pointer_events = false;
(
style: Style,
color: Vector4,
outer_radius: f32,
stroke_width: f32,
start_angle: f32,
sector_angle: f32,
) {
let circle = Circle(outer_radius.px()) - Circle((outer_radius - stroke_width).px());
let angle_adjust = Var::<f32>::from(FRAC_PI_2);
let rotate_angle = -start_angle + angle_adjust - §or_angle / 2.0;
let angle = PlaneAngleFast(sector_angle).rotate(rotate_angle);
let angle = angle.grow(0.5.px());
let shape = circle * angle;
let shape = shape.fill(color);
shape.into()
}
}
}
// ======================
// === Shape Creation ===
// ======================
pub(super) trait ShapeParent: display::Object {
fn scene(&self) -> &Scene;
fn layers(&self) -> &GraphLayers;
/// Create a shape object to render one of the [`Corner`]s making up the edge.
fn new_section(&self) -> Rectangle {
let new = Rectangle::new();
new.set_inner_border(LINE_WIDTH, 0.0);
new.set_color(color::Rgba::transparent());
new.set_pointer_events(false);
self.display_object().add_child(&new);
new
}
/// Create a shape object to render the invisible hover area corresponding to one of the
/// [`Corner`]s making up the edge.
fn new_hover_section(&self) -> Rectangle {
let new = Rectangle::new();
new.set_inner_border(HOVER_WIDTH, 0.0);
new.set_color(color::Rgba::transparent());
self.display_object().add_child(&new);
self.layers().edge_below_nodes.add(&new);
new
}
/// Create a shape object to render an arbitrary-angle arc. This is used when the focus is split
/// in the rounded part of a [`Corner`].
fn new_arc(&self) -> arc::View {
let arc = arc::View::new();
arc.stroke_width.set(LINE_WIDTH);
self.display_object().add_child(&arc);
self.layers().edge_below_nodes.add(&arc);
arc
}
/// Create a shape object to render the little bit at the target end of the edge, that draws on
/// top of the node.
fn new_target_attachment(&self) -> Rectangle {
let new = Rectangle::new();
new.set_size_x(LINE_WIDTH);
new.set_border_color(color::Rgba::transparent());
new.set_pointer_events(false);
self.display_object().add_child(&new);
self.layers().edge_above_nodes.add(&new);
new
}
/// Create a shape object to render the arrow that is drawn on long backward edges to show the
/// direction of data flow.
fn new_dataflow_arrow(&self) -> Rectangle {
let new = SimpleTriangle::from_size(arrow::SIZE);
new.set_pointer_events(false);
self.display_object().add_child(&new);
new.into()
}
/// Create a shape object to render the cutout mask for the edge nearby the source node.
fn new_cutout(&self) -> Rectangle {
let cutout = Rectangle::new();
self.display_object().add_child(&cutout);
// FIXME (temporary assumption): Currently we assume that the node background is a rectangle
// with always rounded corners. Ideally we would somehow use actual source node's background
// shape for this.
cutout.set_corner_radius(crate::component::node::CORNER_RADIUS);
self.layers().edge_above_nodes_cutout.add(&cutout);
// Pointer events must be enabled, so that the hover area is masked out as well.
cutout.set_pointer_events(true);
cutout
}
}
// =========================
// === Rendering Corners ===
// =========================
/// Set the given [`Rectangle`]'s geometry to draw this corner shape.
///
/// Note that the shape's `inset` and `border` should be the same value as the provided
/// [`line_width`]. They are not set here as an optimization: When shapes are reused, the value does
/// not need to be set again, reducing needed GPU uploads.
pub(super) fn draw_corner(
shape: Rectangle,
corner: Corner,
color: color::Rgba,
line_width: f32,
) -> Rectangle {
shape.set_xy(corner.origin(line_width));
shape.set_size(corner.size(line_width));
shape.set_clip(corner.clip());
shape.set_corner_radius(corner.radius(line_width));
shape.set_border_color(color);
shape
}
// ==============================
// === Rendering Partial Arcs ===
// ==============================
/// Apply the specified arc-splitting parameters to the given arc shapes.
pub(super) fn draw_split_arc(arc_shapes: [arc::View; 2], split_arc: SplitArc) -> [arc::View; 2] {
let outer_radius = split_arc.radius + LINE_WIDTH / 2.0;
let arc_box = Vector2(outer_radius * 2.0, outer_radius * 2.0);
let arc_offset = Vector2(-outer_radius, -outer_radius);
let geometry = ArcGeometry::bisection(
split_arc.source_end_angle,
split_arc.split_angle,
split_arc.target_end_angle,
);
for (shape, geometry) in arc_shapes.iter().zip(&geometry) {
shape.set_xy(split_arc.origin + arc_offset);
shape.set_size(arc_box);
shape.outer_radius.set(outer_radius);
shape.start_angle.set(geometry.start);
shape.sector_angle.set(geometry.sector);
}
arc_shapes
}
// === Arc geometry ===
#[derive(Debug, Copy, Clone, PartialEq)]
struct ArcGeometry {
start: f32,
sector: f32,
}
impl ArcGeometry {
fn bisection(a: f32, b: f32, c: f32) -> [Self; 2] {
[Self::new_minor(a, b), Self::new_minor(b, c)]
}
fn new_minor(a: f32, b: f32) -> Self {
let start = minor_arc_start(a, b);
let sector = minor_arc_sector(a, b);
Self { start, sector }
}
}
fn minor_arc_start(a: f32, b: f32) -> f32 {
let a = a.rem_euclid(TAU);
let b = b.rem_euclid(TAU);
let wrapped = (a - b).abs() >= PI;
if wrapped {
if a < f32::EPSILON {
b
} else {
a
}
} else {
min(a, b)
}
}
fn minor_arc_sector(a: f32, b: f32) -> f32 {
let a = a.abs();
let b = b.abs();
let ab = (a - b).abs();
min(ab, TAU - ab)
}
| redraw_hover_sections | identifier_name |
render.rs | //! Definitions, constructors, and management for the EnsoGL shapes that are used to draw an edge.
//!
//! The core function of this module is to translate edge layouts into the shape parameters that
//! will implement them.
use crate::prelude::*;
use ensogl::display::shape::*;
use crate::GraphLayers;
use super::layout::Corner;
use super::layout::EdgeSplit;
use super::layout::Oriented;
use super::layout::SplitArc;
use super::layout::TargetAttachment;
use ensogl::data::color;
use ensogl::display;
use ensogl::display::scene::Scene;
use std::f32::consts::FRAC_PI_2;
use std::f32::consts::PI;
use std::f32::consts::TAU;
// =================
// === Constants ===
// =================
const LINE_WIDTH: f32 = 4.0;
const HOVER_EXTENSION: f32 = 10.0;
pub(super) const HOVER_WIDTH: f32 = LINE_WIDTH + HOVER_EXTENSION;
mod arrow {
use super::*;
pub(super) const SIZE: Vector2 = Vector2(18.75, 18.75);
}
mod attachment {
/// Extra length to add to the top and bottom of the target-attachment bit, to ensure that it
/// appears to pass through the top of the node. Without this adjustment, inexact
/// floating-point math and anti-aliasing would cause a 1-pixel gap artifact right where
/// the attachment should meet the corner at the edge of the node.
pub(super) const LENGTH_ADJUSTMENT: f32 = 0.1;
}
// ===================
// === Edge Shapes ===
// ===================
/// The shapes used to render an edge.
#[derive(Debug, Default)]
pub(super) struct Shapes {
/// The individual [`Corner`]s making up the edge. Each is drawn in the focused or unfocused
/// color.
sections: RefCell<Vec<Rectangle>>,
/// A pair of [`arc`] shapes used when the mouse is over the rounded corner, and the edge must
/// must be split into focused and unfocused sides at a certain angle along the arc.
split_arc: RefCell<Option<[arc::View; 2]>>,
/// Wider versions of the [`sections`], for receiving mouse events.
hover_sections: RefCell<Vec<Rectangle>>,
/// The end of the edge that is drawn on top of the node and connects to the target node's
/// input port.
target_attachment: RefCell<Option<Rectangle>>,
/// Arrow drawn on long backward edges to indicate data flow direction.
dataflow_arrow: RefCell<Option<Rectangle>>,
/// An rectangle representing the source node shape when the edge is in detached state. Used
/// to mask out the edge fragment that would otherwise be drawn over the source node.
source_cutout: RefCell<Option<Rectangle>>,
}
impl Shapes {
/// Redraw the arrow used to mark long backward edges.
pub(super) fn redraw_dataflow_arrow(
&self,
parent: &impl ShapeParent,
parameters: RedrawDataflowArrow,
) {
let RedrawDataflowArrow { arrow, source_color, target_color, focus_split, is_attached } =
parameters;
let shape = self.dataflow_arrow.take();
if let Some(arrow_center) = arrow {
// The arrow will have the same color as the target-end of the first corner from the
// source (this is the `arrow_center` point).
let color = match focus_split.map(|split| split.corner_index) {
Some(0) => target_color,
_ => source_color,
};
let shape = shape.unwrap_or_else(|| parent.new_dataflow_arrow());
shape.set_xy(arrow_center - arrow::SIZE / 2.0);
shape.set_color(color);
Self::set_layer(parent, &shape, is_attached, false);
self.dataflow_arrow.replace(Some(shape));
}
}
/// Redraw the invisible mouse-event-catching edges.
pub(super) fn redraw_hover_sections(
&self,
parent: &impl ShapeParent,
corners: &[Oriented<Corner>],
) {
let hover_factory = self
.hover_sections
.take()
.into_iter()
.chain(iter::repeat_with(|| parent.new_hover_section()));
*self.hover_sections.borrow_mut() = corners
.iter()
.zip(hover_factory)
.map(|(corner, shape)| draw_corner(shape, **corner, INVISIBLE_HOVER_COLOR, HOVER_WIDTH))
.collect();
}
/// Redraw the sections, each of which is a [`Rectangle`] implementing a [`Corner`], or multiple
/// [`Rectangle`]s and multiple [`arc::View`]s, if it is a split [`Corner`].
pub(super) fn redraw_sections(&self, parent: &impl ShapeParent, parameters: RedrawSections) {
let RedrawSections { corners, source_color, target_color, focus_split, is_attached } =
parameters;
let corner_index =
focus_split.map(|split| split.corner_index).unwrap_or_else(|| corners.len());
let split_corner = focus_split.map(|split| split.split_corner);
let mut section_factory =
self.sections.take().into_iter().chain(iter::repeat_with(|| parent.new_section()));
let mut new_sections = self.redraw_complete_sections(
&mut section_factory,
corners,
corner_index,
source_color,
target_color,
);
let arc_shapes = self.split_arc.take();
if let Some(split_corner) = split_corner {
if let Some(split_arc) = split_corner.split_arc {
let arc_shapes = arc_shapes.unwrap_or_else(|| [parent.new_arc(), parent.new_arc()]);
let arc_shapes = draw_split_arc(arc_shapes, split_arc);
arc_shapes[0].color.set(source_color.into());
arc_shapes[1].color.set(target_color.into());
self.split_arc.replace(Some(arc_shapes));
}
let (source_shape, target_shape) =
(section_factory.next().unwrap(), section_factory.next().unwrap());
new_sections.extend([
draw_corner(source_shape, *split_corner.source_end, source_color, LINE_WIDTH),
draw_corner(target_shape, *split_corner.target_end, target_color, LINE_WIDTH),
]);
}
for (i, shape) in new_sections.iter().enumerate() {
Self::set_layer(parent, shape, is_attached, i == 0);
}
*self.sections.borrow_mut() = new_sections;
}
pub(crate) fn redraw_cutout(
&self,
parent: &impl ShapeParent,
is_attached: bool,
source_size: Vector2,
) {
let cutout = self.source_cutout.take();
if !is_attached {
let cutout = cutout.unwrap_or_else(|| parent.new_cutout());
cutout.set_xy(-source_size / 2.0);
cutout.set_size(source_size);
self.source_cutout.replace(Some(cutout));
}
}
/// Redraw the sections that aren't split by the focus position.
pub(super) fn redraw_complete_sections(
&self,
section_factory: impl Iterator<Item = Rectangle>,
corners: &[Oriented<Corner>],
corner_index: usize,
source_color: color::Rgba,
target_color: color::Rgba,
) -> Vec<Rectangle> {
corners
.iter()
.enumerate()
.filter_map(|(i, corner)| {
if i == corner_index {
None
} else {
let color = match i < corner_index {
true => source_color,
false => target_color,
};
Some((color, corner))
}
})
.zip(section_factory)
.map(|((color, corner), shape)| draw_corner(shape, **corner, color, LINE_WIDTH))
.collect()
}
/// Redraw the little bit that goes on top of the target node.
pub(super) fn redraw_target_attachment(
&self,
parent: &impl ShapeParent,
target_attachment: Option<TargetAttachment>,
color: color::Rgba,
) {
let shape = self.target_attachment.take();
if let Some(TargetAttachment { target, length }) = target_attachment
&& length > f32::EPSILON {
let shape = shape.unwrap_or_else(|| parent.new_target_attachment());
shape.set_size_y(length + attachment::LENGTH_ADJUSTMENT * 2.0);
let offset = Vector2(-LINE_WIDTH / 2.0, - length - attachment::LENGTH_ADJUSTMENT);
shape.set_xy(target + offset);
shape.set_color(color);
self.target_attachment.replace(Some(shape));
}
}
/// Add the given shape to the appropriate layer depending on whether it is attached.
fn set_layer(
parent: &impl ShapeParent,
shape: &Rectangle,
below_nodes: bool,
near_source: bool,
) {
let layers = parent.layers();
let layer = if below_nodes {
&layers.edge_below_nodes
} else if near_source {
&layers.masked_edge_above_nodes
} else {
&layers.edge_above_nodes
};
layer.add(shape);
}
}
// === Redraw parameters ====
/// Arguments passed to [`Shapes::redraw_sections`].
pub(super) struct RedrawSections<'a> {
/// The corners to be redrawn.
pub(super) corners: &'a [Oriented<Corner>],
/// The color to use for the part of the edge closer to the source.
pub(super) source_color: color::Rgba,
/// The color to use for the part of the edge closer to the target.
pub(super) target_color: color::Rgba,
/// Where the edge should be split into differently-colored source and target parts.
pub(super) focus_split: Option<EdgeSplit>,
/// Whether the edge is fully-attached.
pub(super) is_attached: bool,
}
/// Arguments passed to [`Shapes::redraw_dataflow_arrow`].
pub(super) struct RedrawDataflowArrow {
/// The center of the arrow, if the arrow should be drawn.
pub(super) arrow: Option<Vector2>,
/// The color to use for the part of the edge closer to the source.
pub(super) source_color: color::Rgba,
/// The color to use for the part of the edge closer to the target.
pub(super) target_color: color::Rgba,
/// Where the edge should be split into differently-colored source and target parts.
pub(super) focus_split: Option<EdgeSplit>,
/// Whether the edge is fully-attached.
pub(super) is_attached: bool,
}
// =========================
// === Shape Definitions ===
// =========================
/// An arc around the origin. `outer_radius` determines the distance from the origin to the outer
/// edge of the arc, `stroke_width` the width of the arc. The arc starts at `start_angle`, relative
/// to the origin. Its radial size is `sector_angle`. The ends are flat, not rounded as in
/// [`RoundedArc`].
mod arc {
use super::*;
ensogl::shape! {
pointer_events = false;
(
style: Style,
color: Vector4,
outer_radius: f32,
stroke_width: f32,
start_angle: f32,
sector_angle: f32,
) {
let circle = Circle(outer_radius.px()) - Circle((outer_radius - stroke_width).px());
let angle_adjust = Var::<f32>::from(FRAC_PI_2);
let rotate_angle = -start_angle + angle_adjust - §or_angle / 2.0;
let angle = PlaneAngleFast(sector_angle).rotate(rotate_angle);
let angle = angle.grow(0.5.px());
let shape = circle * angle;
let shape = shape.fill(color);
shape.into()
}
}
}
// ======================
// === Shape Creation ===
// ======================
pub(super) trait ShapeParent: display::Object {
fn scene(&self) -> &Scene;
fn layers(&self) -> &GraphLayers;
/// Create a shape object to render one of the [`Corner`]s making up the edge.
fn new_section(&self) -> Rectangle {
let new = Rectangle::new();
new.set_inner_border(LINE_WIDTH, 0.0);
new.set_color(color::Rgba::transparent());
new.set_pointer_events(false);
self.display_object().add_child(&new);
new
}
/// Create a shape object to render the invisible hover area corresponding to one of the
/// [`Corner`]s making up the edge.
fn new_hover_section(&self) -> Rectangle {
let new = Rectangle::new();
new.set_inner_border(HOVER_WIDTH, 0.0);
new.set_color(color::Rgba::transparent());
self.display_object().add_child(&new);
self.layers().edge_below_nodes.add(&new);
new
}
/// Create a shape object to render an arbitrary-angle arc. This is used when the focus is split
/// in the rounded part of a [`Corner`].
fn new_arc(&self) -> arc::View {
let arc = arc::View::new();
arc.stroke_width.set(LINE_WIDTH);
self.display_object().add_child(&arc);
self.layers().edge_below_nodes.add(&arc);
arc
}
/// Create a shape object to render the little bit at the target end of the edge, that draws on
/// top of the node.
fn new_target_attachment(&self) -> Rectangle {
let new = Rectangle::new();
new.set_size_x(LINE_WIDTH);
new.set_border_color(color::Rgba::transparent());
new.set_pointer_events(false);
self.display_object().add_child(&new);
self.layers().edge_above_nodes.add(&new);
new
}
/// Create a shape object to render the arrow that is drawn on long backward edges to show the
/// direction of data flow.
fn new_dataflow_arrow(&self) -> Rectangle |
/// Create a shape object to render the cutout mask for the edge nearby the source node.
fn new_cutout(&self) -> Rectangle {
let cutout = Rectangle::new();
self.display_object().add_child(&cutout);
// FIXME (temporary assumption): Currently we assume that the node background is a rectangle
// with always rounded corners. Ideally we would somehow use actual source node's background
// shape for this.
cutout.set_corner_radius(crate::component::node::CORNER_RADIUS);
self.layers().edge_above_nodes_cutout.add(&cutout);
// Pointer events must be enabled, so that the hover area is masked out as well.
cutout.set_pointer_events(true);
cutout
}
}
// =========================
// === Rendering Corners ===
// =========================
/// Set the given [`Rectangle`]'s geometry to draw this corner shape.
///
/// Note that the shape's `inset` and `border` should be the same value as the provided
/// [`line_width`]. They are not set here as an optimization: When shapes are reused, the value does
/// not need to be set again, reducing needed GPU uploads.
pub(super) fn draw_corner(
shape: Rectangle,
corner: Corner,
color: color::Rgba,
line_width: f32,
) -> Rectangle {
shape.set_xy(corner.origin(line_width));
shape.set_size(corner.size(line_width));
shape.set_clip(corner.clip());
shape.set_corner_radius(corner.radius(line_width));
shape.set_border_color(color);
shape
}
// ==============================
// === Rendering Partial Arcs ===
// ==============================
/// Apply the specified arc-splitting parameters to the given arc shapes.
pub(super) fn draw_split_arc(arc_shapes: [arc::View; 2], split_arc: SplitArc) -> [arc::View; 2] {
let outer_radius = split_arc.radius + LINE_WIDTH / 2.0;
let arc_box = Vector2(outer_radius * 2.0, outer_radius * 2.0);
let arc_offset = Vector2(-outer_radius, -outer_radius);
let geometry = ArcGeometry::bisection(
split_arc.source_end_angle,
split_arc.split_angle,
split_arc.target_end_angle,
);
for (shape, geometry) in arc_shapes.iter().zip(&geometry) {
shape.set_xy(split_arc.origin + arc_offset);
shape.set_size(arc_box);
shape.outer_radius.set(outer_radius);
shape.start_angle.set(geometry.start);
shape.sector_angle.set(geometry.sector);
}
arc_shapes
}
// === Arc geometry ===
#[derive(Debug, Copy, Clone, PartialEq)]
struct ArcGeometry {
start: f32,
sector: f32,
}
impl ArcGeometry {
fn bisection(a: f32, b: f32, c: f32) -> [Self; 2] {
[Self::new_minor(a, b), Self::new_minor(b, c)]
}
fn new_minor(a: f32, b: f32) -> Self {
let start = minor_arc_start(a, b);
let sector = minor_arc_sector(a, b);
Self { start, sector }
}
}
fn minor_arc_start(a: f32, b: f32) -> f32 {
let a = a.rem_euclid(TAU);
let b = b.rem_euclid(TAU);
let wrapped = (a - b).abs() >= PI;
if wrapped {
if a < f32::EPSILON {
b
} else {
a
}
} else {
min(a, b)
}
}
fn minor_arc_sector(a: f32, b: f32) -> f32 {
let a = a.abs();
let b = b.abs();
let ab = (a - b).abs();
min(ab, TAU - ab)
}
| {
let new = SimpleTriangle::from_size(arrow::SIZE);
new.set_pointer_events(false);
self.display_object().add_child(&new);
new.into()
} | identifier_body |
render.rs | //! Definitions, constructors, and management for the EnsoGL shapes that are used to draw an edge.
//!
//! The core function of this module is to translate edge layouts into the shape parameters that
//! will implement them.
use crate::prelude::*;
use ensogl::display::shape::*;
use crate::GraphLayers;
use super::layout::Corner;
use super::layout::EdgeSplit;
use super::layout::Oriented;
use super::layout::SplitArc;
use super::layout::TargetAttachment;
use ensogl::data::color;
use ensogl::display;
use ensogl::display::scene::Scene;
use std::f32::consts::FRAC_PI_2;
use std::f32::consts::PI;
use std::f32::consts::TAU;
// =================
// === Constants ===
// =================
const LINE_WIDTH: f32 = 4.0;
const HOVER_EXTENSION: f32 = 10.0;
pub(super) const HOVER_WIDTH: f32 = LINE_WIDTH + HOVER_EXTENSION;
mod arrow {
use super::*;
pub(super) const SIZE: Vector2 = Vector2(18.75, 18.75);
}
mod attachment {
/// Extra length to add to the top and bottom of the target-attachment bit, to ensure that it
/// appears to pass through the top of the node. Without this adjustment, inexact
/// floating-point math and anti-aliasing would cause a 1-pixel gap artifact right where
/// the attachment should meet the corner at the edge of the node.
pub(super) const LENGTH_ADJUSTMENT: f32 = 0.1;
}
// ===================
// === Edge Shapes ===
// ===================
/// The shapes used to render an edge.
#[derive(Debug, Default)]
pub(super) struct Shapes {
/// The individual [`Corner`]s making up the edge. Each is drawn in the focused or unfocused
/// color.
sections: RefCell<Vec<Rectangle>>,
/// A pair of [`arc`] shapes used when the mouse is over the rounded corner, and the edge must
/// must be split into focused and unfocused sides at a certain angle along the arc.
split_arc: RefCell<Option<[arc::View; 2]>>,
/// Wider versions of the [`sections`], for receiving mouse events.
hover_sections: RefCell<Vec<Rectangle>>,
/// The end of the edge that is drawn on top of the node and connects to the target node's
/// input port.
target_attachment: RefCell<Option<Rectangle>>,
/// Arrow drawn on long backward edges to indicate data flow direction.
dataflow_arrow: RefCell<Option<Rectangle>>,
/// An rectangle representing the source node shape when the edge is in detached state. Used
/// to mask out the edge fragment that would otherwise be drawn over the source node.
source_cutout: RefCell<Option<Rectangle>>,
}
impl Shapes {
/// Redraw the arrow used to mark long backward edges.
pub(super) fn redraw_dataflow_arrow(
&self,
parent: &impl ShapeParent,
parameters: RedrawDataflowArrow,
) {
let RedrawDataflowArrow { arrow, source_color, target_color, focus_split, is_attached } =
parameters;
let shape = self.dataflow_arrow.take();
if let Some(arrow_center) = arrow {
// The arrow will have the same color as the target-end of the first corner from the
// source (this is the `arrow_center` point).
let color = match focus_split.map(|split| split.corner_index) {
Some(0) => target_color,
_ => source_color,
};
let shape = shape.unwrap_or_else(|| parent.new_dataflow_arrow());
shape.set_xy(arrow_center - arrow::SIZE / 2.0);
shape.set_color(color);
Self::set_layer(parent, &shape, is_attached, false);
self.dataflow_arrow.replace(Some(shape));
}
}
/// Redraw the invisible mouse-event-catching edges.
pub(super) fn redraw_hover_sections(
&self,
parent: &impl ShapeParent,
corners: &[Oriented<Corner>],
) {
let hover_factory = self
.hover_sections
.take()
.into_iter()
.chain(iter::repeat_with(|| parent.new_hover_section()));
*self.hover_sections.borrow_mut() = corners
.iter()
.zip(hover_factory)
.map(|(corner, shape)| draw_corner(shape, **corner, INVISIBLE_HOVER_COLOR, HOVER_WIDTH))
.collect();
}
/// Redraw the sections, each of which is a [`Rectangle`] implementing a [`Corner`], or multiple
/// [`Rectangle`]s and multiple [`arc::View`]s, if it is a split [`Corner`].
pub(super) fn redraw_sections(&self, parent: &impl ShapeParent, parameters: RedrawSections) {
let RedrawSections { corners, source_color, target_color, focus_split, is_attached } =
parameters;
let corner_index =
focus_split.map(|split| split.corner_index).unwrap_or_else(|| corners.len());
let split_corner = focus_split.map(|split| split.split_corner);
let mut section_factory =
self.sections.take().into_iter().chain(iter::repeat_with(|| parent.new_section()));
let mut new_sections = self.redraw_complete_sections(
&mut section_factory,
corners,
corner_index,
source_color,
target_color,
);
let arc_shapes = self.split_arc.take();
if let Some(split_corner) = split_corner {
if let Some(split_arc) = split_corner.split_arc {
let arc_shapes = arc_shapes.unwrap_or_else(|| [parent.new_arc(), parent.new_arc()]);
let arc_shapes = draw_split_arc(arc_shapes, split_arc);
arc_shapes[0].color.set(source_color.into());
arc_shapes[1].color.set(target_color.into());
self.split_arc.replace(Some(arc_shapes));
}
let (source_shape, target_shape) =
(section_factory.next().unwrap(), section_factory.next().unwrap());
new_sections.extend([
draw_corner(source_shape, *split_corner.source_end, source_color, LINE_WIDTH),
draw_corner(target_shape, *split_corner.target_end, target_color, LINE_WIDTH),
]);
}
for (i, shape) in new_sections.iter().enumerate() {
Self::set_layer(parent, shape, is_attached, i == 0);
}
*self.sections.borrow_mut() = new_sections;
}
pub(crate) fn redraw_cutout(
&self,
parent: &impl ShapeParent,
is_attached: bool,
source_size: Vector2,
) {
let cutout = self.source_cutout.take();
if !is_attached {
let cutout = cutout.unwrap_or_else(|| parent.new_cutout());
cutout.set_xy(-source_size / 2.0);
cutout.set_size(source_size);
self.source_cutout.replace(Some(cutout));
}
}
/// Redraw the sections that aren't split by the focus position.
pub(super) fn redraw_complete_sections(
&self,
section_factory: impl Iterator<Item = Rectangle>,
corners: &[Oriented<Corner>],
corner_index: usize,
source_color: color::Rgba,
target_color: color::Rgba,
) -> Vec<Rectangle> {
corners
.iter()
.enumerate()
.filter_map(|(i, corner)| {
if i == corner_index {
None
} else {
let color = match i < corner_index {
true => source_color,
false => target_color,
};
Some((color, corner))
}
})
.zip(section_factory)
.map(|((color, corner), shape)| draw_corner(shape, **corner, color, LINE_WIDTH))
.collect()
}
/// Redraw the little bit that goes on top of the target node.
pub(super) fn redraw_target_attachment(
&self,
parent: &impl ShapeParent,
target_attachment: Option<TargetAttachment>,
color: color::Rgba,
) {
let shape = self.target_attachment.take();
if let Some(TargetAttachment { target, length }) = target_attachment
&& length > f32::EPSILON {
let shape = shape.unwrap_or_else(|| parent.new_target_attachment());
shape.set_size_y(length + attachment::LENGTH_ADJUSTMENT * 2.0);
let offset = Vector2(-LINE_WIDTH / 2.0, - length - attachment::LENGTH_ADJUSTMENT);
shape.set_xy(target + offset);
shape.set_color(color);
self.target_attachment.replace(Some(shape));
}
}
/// Add the given shape to the appropriate layer depending on whether it is attached.
fn set_layer(
parent: &impl ShapeParent,
shape: &Rectangle,
below_nodes: bool,
near_source: bool,
) {
let layers = parent.layers();
let layer = if below_nodes {
&layers.edge_below_nodes
} else if near_source {
&layers.masked_edge_above_nodes
} else {
&layers.edge_above_nodes
};
layer.add(shape);
}
}
// === Redraw parameters ====
/// Arguments passed to [`Shapes::redraw_sections`].
pub(super) struct RedrawSections<'a> {
/// The corners to be redrawn.
pub(super) corners: &'a [Oriented<Corner>],
/// The color to use for the part of the edge closer to the source.
pub(super) source_color: color::Rgba,
/// The color to use for the part of the edge closer to the target.
pub(super) target_color: color::Rgba,
/// Where the edge should be split into differently-colored source and target parts.
pub(super) focus_split: Option<EdgeSplit>,
/// Whether the edge is fully-attached.
pub(super) is_attached: bool,
}
/// Arguments passed to [`Shapes::redraw_dataflow_arrow`].
pub(super) struct RedrawDataflowArrow {
/// The center of the arrow, if the arrow should be drawn.
pub(super) arrow: Option<Vector2>,
/// The color to use for the part of the edge closer to the source.
pub(super) source_color: color::Rgba,
/// The color to use for the part of the edge closer to the target.
pub(super) target_color: color::Rgba,
/// Where the edge should be split into differently-colored source and target parts.
pub(super) focus_split: Option<EdgeSplit>,
/// Whether the edge is fully-attached.
pub(super) is_attached: bool,
}
// =========================
// === Shape Definitions ===
// =========================
/// An arc around the origin. `outer_radius` determines the distance from the origin to the outer
/// edge of the arc, `stroke_width` the width of the arc. The arc starts at `start_angle`, relative
/// to the origin. Its radial size is `sector_angle`. The ends are flat, not rounded as in
/// [`RoundedArc`].
mod arc {
use super::*;
ensogl::shape! {
pointer_events = false;
(
style: Style,
color: Vector4,
outer_radius: f32,
stroke_width: f32,
start_angle: f32,
sector_angle: f32,
) {
let circle = Circle(outer_radius.px()) - Circle((outer_radius - stroke_width).px());
let angle_adjust = Var::<f32>::from(FRAC_PI_2);
let rotate_angle = -start_angle + angle_adjust - §or_angle / 2.0;
let angle = PlaneAngleFast(sector_angle).rotate(rotate_angle);
let angle = angle.grow(0.5.px());
let shape = circle * angle;
let shape = shape.fill(color);
shape.into()
}
}
}
// ======================
// === Shape Creation ===
// ======================
pub(super) trait ShapeParent: display::Object {
fn scene(&self) -> &Scene;
fn layers(&self) -> &GraphLayers;
/// Create a shape object to render one of the [`Corner`]s making up the edge.
fn new_section(&self) -> Rectangle {
let new = Rectangle::new();
new.set_inner_border(LINE_WIDTH, 0.0);
new.set_color(color::Rgba::transparent());
new.set_pointer_events(false);
self.display_object().add_child(&new);
new
}
/// Create a shape object to render the invisible hover area corresponding to one of the
/// [`Corner`]s making up the edge.
fn new_hover_section(&self) -> Rectangle {
let new = Rectangle::new();
new.set_inner_border(HOVER_WIDTH, 0.0);
new.set_color(color::Rgba::transparent());
self.display_object().add_child(&new);
self.layers().edge_below_nodes.add(&new);
new
}
/// Create a shape object to render an arbitrary-angle arc. This is used when the focus is split
/// in the rounded part of a [`Corner`].
fn new_arc(&self) -> arc::View {
let arc = arc::View::new();
arc.stroke_width.set(LINE_WIDTH);
self.display_object().add_child(&arc);
self.layers().edge_below_nodes.add(&arc);
arc
}
/// Create a shape object to render the little bit at the target end of the edge, that draws on
/// top of the node.
fn new_target_attachment(&self) -> Rectangle {
let new = Rectangle::new();
new.set_size_x(LINE_WIDTH);
new.set_border_color(color::Rgba::transparent());
new.set_pointer_events(false);
self.display_object().add_child(&new);
self.layers().edge_above_nodes.add(&new);
new
}
/// Create a shape object to render the arrow that is drawn on long backward edges to show the
/// direction of data flow.
fn new_dataflow_arrow(&self) -> Rectangle {
let new = SimpleTriangle::from_size(arrow::SIZE);
new.set_pointer_events(false);
self.display_object().add_child(&new);
new.into()
}
/// Create a shape object to render the cutout mask for the edge nearby the source node.
fn new_cutout(&self) -> Rectangle {
let cutout = Rectangle::new();
self.display_object().add_child(&cutout);
// FIXME (temporary assumption): Currently we assume that the node background is a rectangle
// with always rounded corners. Ideally we would somehow use actual source node's background
// shape for this.
cutout.set_corner_radius(crate::component::node::CORNER_RADIUS);
self.layers().edge_above_nodes_cutout.add(&cutout);
// Pointer events must be enabled, so that the hover area is masked out as well.
cutout.set_pointer_events(true);
cutout
}
}
// =========================
// === Rendering Corners ===
// =========================
/// Set the given [`Rectangle`]'s geometry to draw this corner shape.
///
/// Note that the shape's `inset` and `border` should be the same value as the provided
/// [`line_width`]. They are not set here as an optimization: When shapes are reused, the value does
/// not need to be set again, reducing needed GPU uploads.
pub(super) fn draw_corner(
shape: Rectangle,
corner: Corner,
color: color::Rgba,
line_width: f32,
) -> Rectangle {
shape.set_xy(corner.origin(line_width));
shape.set_size(corner.size(line_width));
shape.set_clip(corner.clip());
shape.set_corner_radius(corner.radius(line_width));
shape.set_border_color(color);
shape
}
// ==============================
// === Rendering Partial Arcs ===
// ==============================
/// Apply the specified arc-splitting parameters to the given arc shapes.
pub(super) fn draw_split_arc(arc_shapes: [arc::View; 2], split_arc: SplitArc) -> [arc::View; 2] {
let outer_radius = split_arc.radius + LINE_WIDTH / 2.0;
let arc_box = Vector2(outer_radius * 2.0, outer_radius * 2.0);
let arc_offset = Vector2(-outer_radius, -outer_radius);
let geometry = ArcGeometry::bisection(
split_arc.source_end_angle,
split_arc.split_angle,
split_arc.target_end_angle,
);
for (shape, geometry) in arc_shapes.iter().zip(&geometry) {
shape.set_xy(split_arc.origin + arc_offset);
shape.set_size(arc_box);
shape.outer_radius.set(outer_radius);
shape.start_angle.set(geometry.start);
shape.sector_angle.set(geometry.sector);
}
arc_shapes
}
// === Arc geometry ===
#[derive(Debug, Copy, Clone, PartialEq)]
struct ArcGeometry {
start: f32,
sector: f32,
}
impl ArcGeometry {
fn bisection(a: f32, b: f32, c: f32) -> [Self; 2] {
[Self::new_minor(a, b), Self::new_minor(b, c)]
}
fn new_minor(a: f32, b: f32) -> Self {
let start = minor_arc_start(a, b);
let sector = minor_arc_sector(a, b);
Self { start, sector }
}
}
fn minor_arc_start(a: f32, b: f32) -> f32 {
let a = a.rem_euclid(TAU);
let b = b.rem_euclid(TAU);
let wrapped = (a - b).abs() >= PI;
if wrapped {
if a < f32::EPSILON {
b
} else |
} else {
min(a, b)
}
}
fn minor_arc_sector(a: f32, b: f32) -> f32 {
let a = a.abs();
let b = b.abs();
let ab = (a - b).abs();
min(ab, TAU - ab)
}
| {
a
} | conditional_block |
PDDSP_encoder.py | """ Predictive Encoder"""
import sys
sys.path.append('APC')
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import librosa
import ddsp.core as core
import PLP_model.PDDSP_spectral_ops as PDDSP_spectral_ops
def np_diff(a, n=1, axis=-1):
"""Tensorflow implementation of np.diff"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return np_diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def audio_to_spectralflux_tf(audio, N, H, Fs):
""" computes novelty via differences in the spectral energy between adjacent frames """
X = tf.transpose(PDDSP_spectral_ops.stft(audio, N, H,
fft_length=N, pad_end=False, center=True, # todo centering?
window_fn=tf.signal.hann_window))
gamma = 10 # todo tune compression prior, make adaptive?
Y = tf.math.log(1 + gamma * np.abs(X))
Y_diff = np_diff(Y, n=1)
Y_diff = tf.clip_by_value(Y_diff, clip_value_min=0., clip_value_max=1000000.) # todo
# todo replace the audio filtering with adaptive weighting of FT bins:
nov = tf.reduce_mean(Y_diff, axis=0) # todo tune aggregation function
nov = tf.concat([nov, np.array([0])], axis=0)
Fs_nov = Fs / H
nov -= tf.math.reduce_mean(nov) # todo tune output normalization
nov = tf.clip_by_value(nov, clip_value_min=0., clip_value_max=1000000.)
nov /= tf.math.reduce_max(nov) # normalize
return nov, Fs_nov
def get_slope(prev, cur):
return tf.cond(prev[0] < cur, lambda: (cur, ascending_or_valley(prev, cur)), lambda: (cur, descending_or_peak(prev, cur)))
def ascending_or_valley(prev, cur):
return tf.cond(tf.logical_or(tf.equal(prev[1], 'A'), tf.equal(prev[1], 'V')), lambda: np.array('A'), lambda: np.array('V'))
def descending_or_peak(prev, cur):
return tf.cond(tf.logical_or(tf.equal(prev[1], 'A'), tf.equal(prev[1], 'V')), lambda: np.array('P'), lambda: np.array('D'))
def label_local_extrema(tens):
"""Return a vector of chars indicating ascending, descending, peak, or valley slopes"""
initializer = (np.array(0, dtype=np.float32), np.array('A'))
slope = tf.scan(get_slope, tens, initializer)
return slope[1][1:]
def find_local_maxima(tens):
"""Tensorflow peak picking via local maxima
Returns the indices of the local maxima of the first dimension of the tensor
Based on https://stackoverflow.com/questions/48178286/finding-local-maxima-with-tensorflow
"""
return tf.squeeze(tf.where(tf.equal(label_local_extrema(tens), 'P')))
def fft_frequencies(sr=22050, n_fft=2048):
"""Tensorflow-based implementation of np.fft.fftfreq """
# TODO endpoint=True
return tf.linspace(0, tf.cast(sr/2., dtype=tf.int32), tf.cast(1. + n_fft // 2., dtype=tf.int32))
def fourier_tempo_frequencies(sr=22050, win_length=384, hop_length=512):
|
def bandpass_filter_audio(audio, f_low=400, f_high=450):
"""Bandpass filters audio to given frequency range"""
filtered_audio = core.sinc_filter(audio, f_low, window_size=256, high_pass=True)
filtered_audio = core.sinc_filter(filtered_audio, f_high, window_size=256, high_pass=False)
return tf.squeeze(filtered_audio)
def plp_tf(
y,
sr=22050,
tempo_min=30,
tempo_max=300,
hop_length=1,
win_length=512,
hop_length_novelty=256,
win_length_novelty=1024,
loudness_min=0.1,
loudness_max=1.,
prior=None):
"""Tensorflow-based implementation of librosa.beat.plp
Process chain: audio -> spectral flux novelty -> Fourier tempogram -> local pulse """
y = tf.squeeze(y)
# get spectral flux novelty
oenv, sr_ = audio_to_spectralflux_tf(y, win_length_novelty, hop_length_novelty, sr)
# get fourier tempogram
tempogram = tf.transpose(PDDSP_spectral_ops.stft(oenv, win_length,
frame_step=hop_length,
fft_length=win_length, pad_end=False,
center=True, window_fn=tf.signal.hann_window))
# restrict to tempo range prior
tempo_frequencies = tf.cast(fourier_tempo_frequencies(sr=sr_,
hop_length=hop_length,
win_length=win_length), dtype=tf.float32)
mask = tempo_frequencies < tempo_max
mask = tf.tile(mask[:, tf.newaxis], [1, tempogram.shape[1]])
tempogram = tempogram * tf.cast(mask, dtype=tempogram.dtype)
mask = tempo_frequencies > tempo_min
mask = tf.tile(mask[:, tf.newaxis], [1, tempogram.shape[1]])
tempogram = tempogram * tf.cast(mask, dtype=tempogram.dtype)
# discard everything below the peak
ftmag = tf.math.log1p(1e6 * np.abs(tempogram))
if prior is not None:
log_prob = tf.squeeze(prior.log_prob(tempo_frequencies))
log_prob = tf.tile(log_prob[:, tf.newaxis], [1, ftmag.shape[1]])
ftmag += log_prob
peak_values = tf.math.reduce_max(ftmag, axis=0, keepdims=True)
peak_values = tf.tile(peak_values, [ftmag.shape[0], 1])
tempogram = tf.cast(ftmag >= peak_values, dtype=tempogram.dtype) * tempogram
# todo keep only phase
#ftgram = tempogram.numpy()
#import librosa
#ftgram /= librosa.util.tiny(ftgram) ** 0.5 + np.abs(ftgram.max(axis=0, keepdims=True))
#tempogram = tf.cast(ftgram, dtype=tf.complex64)
# Compute pulse by inverting the tempogram
pulse = PDDSP_spectral_ops.inverse_stft(
tf.transpose(tempogram), win_length, hop_length, fft_length=win_length, center=True,
window_fn=tf.signal.inverse_stft_window_fn(hop_length, forward_window_fn=tf.signal.hann_window))
# retain only the positive part and normalize
pulse /= tf.math.reduce_max(pulse)
pulse -= tf.math.reduce_mean(pulse)
pulse = tf.clip_by_value(pulse, clip_value_min=0, clip_value_max=100000)
# compute mean period and expected next onset position
F_mean = dominant_freq_from_tempogram(tempogram, tempo_frequencies)
period_mean, mean_offset, next_onset_shift, peaks = period_from_pulse(pulse, F_mean,
sr=sr_, loudness_min=loudness_min,
loudness_max=loudness_max)
period_mean, next_onset_shift, mean_offset = (period_mean/sr_)*sr, (next_onset_shift/sr_)*sr, (mean_offset/sr_)*sr
return pulse, tempogram, oenv, sr_, F_mean, period_mean, mean_offset, next_onset_shift
def period_from_pulse(pulse, F_mean_in_Hz, sr, loudness_min=0.1, loudness_max=1.):
"""Compute mean period and the next expected onset position"""
# Find last peak in the pulse
peaks = find_local_maxima(tf.clip_by_value(pulse, clip_value_min=loudness_min,
clip_value_max=loudness_max))[1:]
first_peak = tf.cast(peaks[0], dtype=tf.float32) if peaks.shape[0] > 1 else 0.
last_peak = tf.cast(peaks[-1], dtype=tf.float32) if peaks.shape[0] > 1 else 0.
# return average offset for each peak
mean_offset = tf.math.reduce_mean(tf.cast([tf.math.floormod(tf.cast(peak, dtype=tf.int64), tf.cast(sr, dtype=tf.int64))
for peak in peaks], dtype=tf.float32))
# Compute mean period
period_mean = (1/F_mean_in_Hz) * sr
# Predict the first onset in the next audio input
next_onset_shift = tf.abs(period_mean - (tf.cast(pulse.shape[0], dtype=tf.float32) - last_peak))
next_onset_shift = tf.math.floormod(next_onset_shift, period_mean)
return period_mean, mean_offset, next_onset_shift, peaks
def dominant_freq_from_tempogram(tempogram, tempo_frequencies, return_Hz = True):
"""Calculate dominant frequency from tempogram."""
tempo_BPM_max = tempo_frequencies \
* tf.cast(tf.math.abs(tempogram[:, 0])
== tf.math.reduce_max(tf.math.abs(tempogram[:, 0])),
tempo_frequencies.dtype)
if return_Hz:
dominant_tempo = tf.cast(tf.math.reduce_max(tempo_BPM_max)/60, dtype=tf.float32)
else:
dominant_tempo = tf.cast(tf.math.reduce_max(tempo_BPM_max), dtype=tf.float32)
weights = tf.cast(tf.math.abs(tempogram[:, 0]), dtype=tf.float32)
weighted_mean = tf.nn.weighted_moments(tempo_frequencies, axes=[0], frequency_weights=weights)[0]
if return_Hz:
weighted_mean_tempo = tf.expand_dims(tf.cast(weighted_mean/60, dtype=tf.float32), axis = 0)
else:
weighted_mean_tempo = tf.expand_dims(tf.cast(weighted_mean, dtype=tf.float32), axis = 0)
dominant_tempo = tf.expand_dims(dominant_tempo, axis=0)
out = tf.concat([dominant_tempo, weighted_mean_tempo], axis=0)
return tf.cast(out, dtype=tf.float32)
def encode_song(y, sr, chunks=8,
tempo_min=60,
tempo_max=300,
f_low=400, f_high=450,
loudness_min=0.1, loudness_max=1,
filter=False, plot=True,
padding_seconds=4,
frame_step=0.1):
"""Run PLP encoder over all chunks in a song"""
if chunks != 0:
y_list = tf.signal.frame(y, sr*chunks, int(sr*frame_step), pad_end=True, pad_value=0, axis=-1) # TODO padding
else:
y_list = [tf.cast(y, dtype=tf.float32)]
tempo_mean_list, period_mean_list, beats_list = None, None, None
for y, index in zip(y_list, range(len(y_list))):
# Bandpass filter audio
if filter:
y = bandpass_filter_audio(y[tf.newaxis,:], f_low=f_low, f_high=f_high)
# Compute phase and period
pulse, tempogram, oenv, sr_, F_mean, period_mean, mean_offset, next_onset_shift = plp_tf(
y=y, sr=sr,
tempo_min=tempo_min,
tempo_max=tempo_max,
hop_length=1,
win_length=512,
hop_length_novelty=256,
win_length_novelty=1024,
loudness_min=0.2,
loudness_max=1.)
if tempo_mean_list is None:
tempo_mean_list = [F_mean] # in Hz
period_mean_list = [mean_offset/sr] # in seconds
else:
tempo_mean_list.append(F_mean) # in Hz
period_mean_list.append(mean_offset/sr) # in seconds
# Compute beat positions via local maxima
beats = find_local_maxima(tf.clip_by_value(pulse,
clip_value_min=loudness_min,
clip_value_max=loudness_max))[1:]
# correct timing in each chunk
beats = tf.cast(beats, dtype=tf.float32) + (tf.cast(index, dtype=tf.float32) * pulse.shape[0])
beats = beats - padding_seconds*sr_ #remove padding #TODO fix
if beats_list is None:
beats_list = beats
else:
beats_list = np.concatenate([beats_list, beats], axis=0)
# Optionally plot tempogram and pulse for each input
if plot:
plot_tempogram_and_pulse(tempogram, pulse, oenv, sr_, 1)
plot_librosa_tempogram(y.numpy(), sr)
# samples to time
beats_list = np.asarray(beats_list) / sr_
return tempo_mean_list, period_mean_list, beats_list, oenv.numpy()
"""Helper functions"""
def plot_librosa_tempogram(y, sr, hop_length = 512):
oenv = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length)
tempogram = librosa.feature.fourier_tempogram(onset_envelope=oenv, sr=sr,
hop_length=hop_length)
librosa.display.specshow(np.abs(tempogram), sr=sr, hop_length=hop_length,
x_axis='time', y_axis='fourier_tempo', cmap='magma')
plt.title('Librosa Fourier tempogram')
plt.show()
def plot_tempogram_and_pulse(tempogram, pulse, oenv, sr_, hop_length, plot_pulse=True):
"""Plots tempogram and local pulse."""
tempogram = tempogram.numpy()
librosa.display.specshow(np.abs(tempogram), sr=sr_, hop_length=hop_length,
x_axis='time', y_axis='fourier_tempo', cmap='magma')
plt.show()
peaks = find_local_maxima(tf.clip_by_value(pulse, clip_value_min=0.1,
clip_value_max=1.))[1:]
if plot_pulse:
oenv = oenv.numpy()
pulse = pulse.numpy()
plt.plot(oenv, color="black")
plt.plot(pulse, color="blue")
plt.plot(peaks, pulse[peaks.numpy()], "ro")
plt.show()
| """Tensorflow-based implementation of librosa.core.fourier_tempo_frequencies"""
return fft_frequencies(sr=sr * 60 / float(hop_length), n_fft=win_length) | identifier_body |
PDDSP_encoder.py | """ Predictive Encoder"""
import sys
sys.path.append('APC')
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import librosa
import ddsp.core as core
import PLP_model.PDDSP_spectral_ops as PDDSP_spectral_ops
def np_diff(a, n=1, axis=-1):
"""Tensorflow implementation of np.diff"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return np_diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def audio_to_spectralflux_tf(audio, N, H, Fs):
""" computes novelty via differences in the spectral energy between adjacent frames """
X = tf.transpose(PDDSP_spectral_ops.stft(audio, N, H,
fft_length=N, pad_end=False, center=True, # todo centering?
window_fn=tf.signal.hann_window))
gamma = 10 # todo tune compression prior, make adaptive?
Y = tf.math.log(1 + gamma * np.abs(X))
Y_diff = np_diff(Y, n=1)
Y_diff = tf.clip_by_value(Y_diff, clip_value_min=0., clip_value_max=1000000.) # todo
# todo replace the audio filtering with adaptive weighting of FT bins:
nov = tf.reduce_mean(Y_diff, axis=0) # todo tune aggregation function
nov = tf.concat([nov, np.array([0])], axis=0)
Fs_nov = Fs / H
nov -= tf.math.reduce_mean(nov) # todo tune output normalization
nov = tf.clip_by_value(nov, clip_value_min=0., clip_value_max=1000000.)
nov /= tf.math.reduce_max(nov) # normalize
return nov, Fs_nov
def get_slope(prev, cur):
return tf.cond(prev[0] < cur, lambda: (cur, ascending_or_valley(prev, cur)), lambda: (cur, descending_or_peak(prev, cur)))
def ascending_or_valley(prev, cur):
return tf.cond(tf.logical_or(tf.equal(prev[1], 'A'), tf.equal(prev[1], 'V')), lambda: np.array('A'), lambda: np.array('V'))
def descending_or_peak(prev, cur):
return tf.cond(tf.logical_or(tf.equal(prev[1], 'A'), tf.equal(prev[1], 'V')), lambda: np.array('P'), lambda: np.array('D'))
def label_local_extrema(tens):
"""Return a vector of chars indicating ascending, descending, peak, or valley slopes"""
initializer = (np.array(0, dtype=np.float32), np.array('A'))
slope = tf.scan(get_slope, tens, initializer)
return slope[1][1:]
def find_local_maxima(tens):
"""Tensorflow peak picking via local maxima
Returns the indices of the local maxima of the first dimension of the tensor
Based on https://stackoverflow.com/questions/48178286/finding-local-maxima-with-tensorflow
"""
return tf.squeeze(tf.where(tf.equal(label_local_extrema(tens), 'P')))
def fft_frequencies(sr=22050, n_fft=2048):
"""Tensorflow-based implementation of np.fft.fftfreq """
# TODO endpoint=True
return tf.linspace(0, tf.cast(sr/2., dtype=tf.int32), tf.cast(1. + n_fft // 2., dtype=tf.int32))
def fourier_tempo_frequencies(sr=22050, win_length=384, hop_length=512):
"""Tensorflow-based implementation of librosa.core.fourier_tempo_frequencies"""
return fft_frequencies(sr=sr * 60 / float(hop_length), n_fft=win_length)
def bandpass_filter_audio(audio, f_low=400, f_high=450):
"""Bandpass filters audio to given frequency range"""
filtered_audio = core.sinc_filter(audio, f_low, window_size=256, high_pass=True)
filtered_audio = core.sinc_filter(filtered_audio, f_high, window_size=256, high_pass=False)
return tf.squeeze(filtered_audio)
def plp_tf(
y,
sr=22050,
tempo_min=30,
tempo_max=300,
hop_length=1,
win_length=512,
hop_length_novelty=256,
win_length_novelty=1024,
loudness_min=0.1,
loudness_max=1.,
prior=None):
"""Tensorflow-based implementation of librosa.beat.plp
Process chain: audio -> spectral flux novelty -> Fourier tempogram -> local pulse """
y = tf.squeeze(y)
# get spectral flux novelty
oenv, sr_ = audio_to_spectralflux_tf(y, win_length_novelty, hop_length_novelty, sr)
# get fourier tempogram
tempogram = tf.transpose(PDDSP_spectral_ops.stft(oenv, win_length,
frame_step=hop_length,
fft_length=win_length, pad_end=False,
center=True, window_fn=tf.signal.hann_window))
# restrict to tempo range prior
tempo_frequencies = tf.cast(fourier_tempo_frequencies(sr=sr_,
hop_length=hop_length,
win_length=win_length), dtype=tf.float32)
mask = tempo_frequencies < tempo_max
mask = tf.tile(mask[:, tf.newaxis], [1, tempogram.shape[1]])
tempogram = tempogram * tf.cast(mask, dtype=tempogram.dtype)
mask = tempo_frequencies > tempo_min
mask = tf.tile(mask[:, tf.newaxis], [1, tempogram.shape[1]])
tempogram = tempogram * tf.cast(mask, dtype=tempogram.dtype)
# discard everything below the peak
ftmag = tf.math.log1p(1e6 * np.abs(tempogram))
if prior is not None:
log_prob = tf.squeeze(prior.log_prob(tempo_frequencies))
log_prob = tf.tile(log_prob[:, tf.newaxis], [1, ftmag.shape[1]])
ftmag += log_prob
peak_values = tf.math.reduce_max(ftmag, axis=0, keepdims=True)
peak_values = tf.tile(peak_values, [ftmag.shape[0], 1])
tempogram = tf.cast(ftmag >= peak_values, dtype=tempogram.dtype) * tempogram
# todo keep only phase
#ftgram = tempogram.numpy()
#import librosa
#ftgram /= librosa.util.tiny(ftgram) ** 0.5 + np.abs(ftgram.max(axis=0, keepdims=True))
#tempogram = tf.cast(ftgram, dtype=tf.complex64)
# Compute pulse by inverting the tempogram
pulse = PDDSP_spectral_ops.inverse_stft(
tf.transpose(tempogram), win_length, hop_length, fft_length=win_length, center=True,
window_fn=tf.signal.inverse_stft_window_fn(hop_length, forward_window_fn=tf.signal.hann_window))
# retain only the positive part and normalize
pulse /= tf.math.reduce_max(pulse)
pulse -= tf.math.reduce_mean(pulse)
pulse = tf.clip_by_value(pulse, clip_value_min=0, clip_value_max=100000)
# compute mean period and expected next onset position
F_mean = dominant_freq_from_tempogram(tempogram, tempo_frequencies)
period_mean, mean_offset, next_onset_shift, peaks = period_from_pulse(pulse, F_mean,
sr=sr_, loudness_min=loudness_min,
loudness_max=loudness_max)
period_mean, next_onset_shift, mean_offset = (period_mean/sr_)*sr, (next_onset_shift/sr_)*sr, (mean_offset/sr_)*sr
return pulse, tempogram, oenv, sr_, F_mean, period_mean, mean_offset, next_onset_shift
def period_from_pulse(pulse, F_mean_in_Hz, sr, loudness_min=0.1, loudness_max=1.):
"""Compute mean period and the next expected onset position"""
# Find last peak in the pulse
peaks = find_local_maxima(tf.clip_by_value(pulse, clip_value_min=loudness_min,
clip_value_max=loudness_max))[1:]
first_peak = tf.cast(peaks[0], dtype=tf.float32) if peaks.shape[0] > 1 else 0.
last_peak = tf.cast(peaks[-1], dtype=tf.float32) if peaks.shape[0] > 1 else 0.
# return average offset for each peak
mean_offset = tf.math.reduce_mean(tf.cast([tf.math.floormod(tf.cast(peak, dtype=tf.int64), tf.cast(sr, dtype=tf.int64))
for peak in peaks], dtype=tf.float32))
# Compute mean period
period_mean = (1/F_mean_in_Hz) * sr
# Predict the first onset in the next audio input
next_onset_shift = tf.abs(period_mean - (tf.cast(pulse.shape[0], dtype=tf.float32) - last_peak))
next_onset_shift = tf.math.floormod(next_onset_shift, period_mean)
return period_mean, mean_offset, next_onset_shift, peaks
def dominant_freq_from_tempogram(tempogram, tempo_frequencies, return_Hz = True):
"""Calculate dominant frequency from tempogram."""
tempo_BPM_max = tempo_frequencies \
* tf.cast(tf.math.abs(tempogram[:, 0])
== tf.math.reduce_max(tf.math.abs(tempogram[:, 0])),
tempo_frequencies.dtype)
if return_Hz:
dominant_tempo = tf.cast(tf.math.reduce_max(tempo_BPM_max)/60, dtype=tf.float32)
else:
dominant_tempo = tf.cast(tf.math.reduce_max(tempo_BPM_max), dtype=tf.float32)
weights = tf.cast(tf.math.abs(tempogram[:, 0]), dtype=tf.float32)
weighted_mean = tf.nn.weighted_moments(tempo_frequencies, axes=[0], frequency_weights=weights)[0]
if return_Hz:
weighted_mean_tempo = tf.expand_dims(tf.cast(weighted_mean/60, dtype=tf.float32), axis = 0)
else:
|
dominant_tempo = tf.expand_dims(dominant_tempo, axis=0)
out = tf.concat([dominant_tempo, weighted_mean_tempo], axis=0)
return tf.cast(out, dtype=tf.float32)
def encode_song(y, sr, chunks=8,
tempo_min=60,
tempo_max=300,
f_low=400, f_high=450,
loudness_min=0.1, loudness_max=1,
filter=False, plot=True,
padding_seconds=4,
frame_step=0.1):
"""Run PLP encoder over all chunks in a song"""
if chunks != 0:
y_list = tf.signal.frame(y, sr*chunks, int(sr*frame_step), pad_end=True, pad_value=0, axis=-1) # TODO padding
else:
y_list = [tf.cast(y, dtype=tf.float32)]
tempo_mean_list, period_mean_list, beats_list = None, None, None
for y, index in zip(y_list, range(len(y_list))):
# Bandpass filter audio
if filter:
y = bandpass_filter_audio(y[tf.newaxis,:], f_low=f_low, f_high=f_high)
# Compute phase and period
pulse, tempogram, oenv, sr_, F_mean, period_mean, mean_offset, next_onset_shift = plp_tf(
y=y, sr=sr,
tempo_min=tempo_min,
tempo_max=tempo_max,
hop_length=1,
win_length=512,
hop_length_novelty=256,
win_length_novelty=1024,
loudness_min=0.2,
loudness_max=1.)
if tempo_mean_list is None:
tempo_mean_list = [F_mean] # in Hz
period_mean_list = [mean_offset/sr] # in seconds
else:
tempo_mean_list.append(F_mean) # in Hz
period_mean_list.append(mean_offset/sr) # in seconds
# Compute beat positions via local maxima
beats = find_local_maxima(tf.clip_by_value(pulse,
clip_value_min=loudness_min,
clip_value_max=loudness_max))[1:]
# correct timing in each chunk
beats = tf.cast(beats, dtype=tf.float32) + (tf.cast(index, dtype=tf.float32) * pulse.shape[0])
beats = beats - padding_seconds*sr_ #remove padding #TODO fix
if beats_list is None:
beats_list = beats
else:
beats_list = np.concatenate([beats_list, beats], axis=0)
# Optionally plot tempogram and pulse for each input
if plot:
plot_tempogram_and_pulse(tempogram, pulse, oenv, sr_, 1)
plot_librosa_tempogram(y.numpy(), sr)
# samples to time
beats_list = np.asarray(beats_list) / sr_
return tempo_mean_list, period_mean_list, beats_list, oenv.numpy()
"""Helper functions"""
def plot_librosa_tempogram(y, sr, hop_length = 512):
oenv = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length)
tempogram = librosa.feature.fourier_tempogram(onset_envelope=oenv, sr=sr,
hop_length=hop_length)
librosa.display.specshow(np.abs(tempogram), sr=sr, hop_length=hop_length,
x_axis='time', y_axis='fourier_tempo', cmap='magma')
plt.title('Librosa Fourier tempogram')
plt.show()
def plot_tempogram_and_pulse(tempogram, pulse, oenv, sr_, hop_length, plot_pulse=True):
"""Plots tempogram and local pulse."""
tempogram = tempogram.numpy()
librosa.display.specshow(np.abs(tempogram), sr=sr_, hop_length=hop_length,
x_axis='time', y_axis='fourier_tempo', cmap='magma')
plt.show()
peaks = find_local_maxima(tf.clip_by_value(pulse, clip_value_min=0.1,
clip_value_max=1.))[1:]
if plot_pulse:
oenv = oenv.numpy()
pulse = pulse.numpy()
plt.plot(oenv, color="black")
plt.plot(pulse, color="blue")
plt.plot(peaks, pulse[peaks.numpy()], "ro")
plt.show()
| weighted_mean_tempo = tf.expand_dims(tf.cast(weighted_mean, dtype=tf.float32), axis = 0) | conditional_block |
PDDSP_encoder.py | """ Predictive Encoder"""
import sys
sys.path.append('APC')
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import librosa
import ddsp.core as core
import PLP_model.PDDSP_spectral_ops as PDDSP_spectral_ops
def np_diff(a, n=1, axis=-1):
"""Tensorflow implementation of np.diff"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return np_diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def audio_to_spectralflux_tf(audio, N, H, Fs):
""" computes novelty via differences in the spectral energy between adjacent frames """
X = tf.transpose(PDDSP_spectral_ops.stft(audio, N, H,
fft_length=N, pad_end=False, center=True, # todo centering?
window_fn=tf.signal.hann_window))
gamma = 10 # todo tune compression prior, make adaptive?
Y = tf.math.log(1 + gamma * np.abs(X))
Y_diff = np_diff(Y, n=1)
Y_diff = tf.clip_by_value(Y_diff, clip_value_min=0., clip_value_max=1000000.) # todo
# todo replace the audio filtering with adaptive weighting of FT bins:
nov = tf.reduce_mean(Y_diff, axis=0) # todo tune aggregation function
nov = tf.concat([nov, np.array([0])], axis=0)
Fs_nov = Fs / H
nov -= tf.math.reduce_mean(nov) # todo tune output normalization
nov = tf.clip_by_value(nov, clip_value_min=0., clip_value_max=1000000.)
nov /= tf.math.reduce_max(nov) # normalize
return nov, Fs_nov
def get_slope(prev, cur):
return tf.cond(prev[0] < cur, lambda: (cur, ascending_or_valley(prev, cur)), lambda: (cur, descending_or_peak(prev, cur)))
def ascending_or_valley(prev, cur):
return tf.cond(tf.logical_or(tf.equal(prev[1], 'A'), tf.equal(prev[1], 'V')), lambda: np.array('A'), lambda: np.array('V'))
def descending_or_peak(prev, cur):
return tf.cond(tf.logical_or(tf.equal(prev[1], 'A'), tf.equal(prev[1], 'V')), lambda: np.array('P'), lambda: np.array('D'))
def label_local_extrema(tens):
"""Return a vector of chars indicating ascending, descending, peak, or valley slopes"""
initializer = (np.array(0, dtype=np.float32), np.array('A'))
slope = tf.scan(get_slope, tens, initializer)
return slope[1][1:]
def | (tens):
"""Tensorflow peak picking via local maxima
Returns the indices of the local maxima of the first dimension of the tensor
Based on https://stackoverflow.com/questions/48178286/finding-local-maxima-with-tensorflow
"""
return tf.squeeze(tf.where(tf.equal(label_local_extrema(tens), 'P')))
def fft_frequencies(sr=22050, n_fft=2048):
"""Tensorflow-based implementation of np.fft.fftfreq """
# TODO endpoint=True
return tf.linspace(0, tf.cast(sr/2., dtype=tf.int32), tf.cast(1. + n_fft // 2., dtype=tf.int32))
def fourier_tempo_frequencies(sr=22050, win_length=384, hop_length=512):
"""Tensorflow-based implementation of librosa.core.fourier_tempo_frequencies"""
return fft_frequencies(sr=sr * 60 / float(hop_length), n_fft=win_length)
def bandpass_filter_audio(audio, f_low=400, f_high=450):
"""Bandpass filters audio to given frequency range"""
filtered_audio = core.sinc_filter(audio, f_low, window_size=256, high_pass=True)
filtered_audio = core.sinc_filter(filtered_audio, f_high, window_size=256, high_pass=False)
return tf.squeeze(filtered_audio)
def plp_tf(
y,
sr=22050,
tempo_min=30,
tempo_max=300,
hop_length=1,
win_length=512,
hop_length_novelty=256,
win_length_novelty=1024,
loudness_min=0.1,
loudness_max=1.,
prior=None):
"""Tensorflow-based implementation of librosa.beat.plp
Process chain: audio -> spectral flux novelty -> Fourier tempogram -> local pulse """
y = tf.squeeze(y)
# get spectral flux novelty
oenv, sr_ = audio_to_spectralflux_tf(y, win_length_novelty, hop_length_novelty, sr)
# get fourier tempogram
tempogram = tf.transpose(PDDSP_spectral_ops.stft(oenv, win_length,
frame_step=hop_length,
fft_length=win_length, pad_end=False,
center=True, window_fn=tf.signal.hann_window))
# restrict to tempo range prior
tempo_frequencies = tf.cast(fourier_tempo_frequencies(sr=sr_,
hop_length=hop_length,
win_length=win_length), dtype=tf.float32)
mask = tempo_frequencies < tempo_max
mask = tf.tile(mask[:, tf.newaxis], [1, tempogram.shape[1]])
tempogram = tempogram * tf.cast(mask, dtype=tempogram.dtype)
mask = tempo_frequencies > tempo_min
mask = tf.tile(mask[:, tf.newaxis], [1, tempogram.shape[1]])
tempogram = tempogram * tf.cast(mask, dtype=tempogram.dtype)
# discard everything below the peak
ftmag = tf.math.log1p(1e6 * np.abs(tempogram))
if prior is not None:
log_prob = tf.squeeze(prior.log_prob(tempo_frequencies))
log_prob = tf.tile(log_prob[:, tf.newaxis], [1, ftmag.shape[1]])
ftmag += log_prob
peak_values = tf.math.reduce_max(ftmag, axis=0, keepdims=True)
peak_values = tf.tile(peak_values, [ftmag.shape[0], 1])
tempogram = tf.cast(ftmag >= peak_values, dtype=tempogram.dtype) * tempogram
# todo keep only phase
#ftgram = tempogram.numpy()
#import librosa
#ftgram /= librosa.util.tiny(ftgram) ** 0.5 + np.abs(ftgram.max(axis=0, keepdims=True))
#tempogram = tf.cast(ftgram, dtype=tf.complex64)
# Compute pulse by inverting the tempogram
pulse = PDDSP_spectral_ops.inverse_stft(
tf.transpose(tempogram), win_length, hop_length, fft_length=win_length, center=True,
window_fn=tf.signal.inverse_stft_window_fn(hop_length, forward_window_fn=tf.signal.hann_window))
# retain only the positive part and normalize
pulse /= tf.math.reduce_max(pulse)
pulse -= tf.math.reduce_mean(pulse)
pulse = tf.clip_by_value(pulse, clip_value_min=0, clip_value_max=100000)
# compute mean period and expected next onset position
F_mean = dominant_freq_from_tempogram(tempogram, tempo_frequencies)
period_mean, mean_offset, next_onset_shift, peaks = period_from_pulse(pulse, F_mean,
sr=sr_, loudness_min=loudness_min,
loudness_max=loudness_max)
period_mean, next_onset_shift, mean_offset = (period_mean/sr_)*sr, (next_onset_shift/sr_)*sr, (mean_offset/sr_)*sr
return pulse, tempogram, oenv, sr_, F_mean, period_mean, mean_offset, next_onset_shift
def period_from_pulse(pulse, F_mean_in_Hz, sr, loudness_min=0.1, loudness_max=1.):
"""Compute mean period and the next expected onset position"""
# Find last peak in the pulse
peaks = find_local_maxima(tf.clip_by_value(pulse, clip_value_min=loudness_min,
clip_value_max=loudness_max))[1:]
first_peak = tf.cast(peaks[0], dtype=tf.float32) if peaks.shape[0] > 1 else 0.
last_peak = tf.cast(peaks[-1], dtype=tf.float32) if peaks.shape[0] > 1 else 0.
# return average offset for each peak
mean_offset = tf.math.reduce_mean(tf.cast([tf.math.floormod(tf.cast(peak, dtype=tf.int64), tf.cast(sr, dtype=tf.int64))
for peak in peaks], dtype=tf.float32))
# Compute mean period
period_mean = (1/F_mean_in_Hz) * sr
# Predict the first onset in the next audio input
next_onset_shift = tf.abs(period_mean - (tf.cast(pulse.shape[0], dtype=tf.float32) - last_peak))
next_onset_shift = tf.math.floormod(next_onset_shift, period_mean)
return period_mean, mean_offset, next_onset_shift, peaks
def dominant_freq_from_tempogram(tempogram, tempo_frequencies, return_Hz = True):
"""Calculate dominant frequency from tempogram."""
tempo_BPM_max = tempo_frequencies \
* tf.cast(tf.math.abs(tempogram[:, 0])
== tf.math.reduce_max(tf.math.abs(tempogram[:, 0])),
tempo_frequencies.dtype)
if return_Hz:
dominant_tempo = tf.cast(tf.math.reduce_max(tempo_BPM_max)/60, dtype=tf.float32)
else:
dominant_tempo = tf.cast(tf.math.reduce_max(tempo_BPM_max), dtype=tf.float32)
weights = tf.cast(tf.math.abs(tempogram[:, 0]), dtype=tf.float32)
weighted_mean = tf.nn.weighted_moments(tempo_frequencies, axes=[0], frequency_weights=weights)[0]
if return_Hz:
weighted_mean_tempo = tf.expand_dims(tf.cast(weighted_mean/60, dtype=tf.float32), axis = 0)
else:
weighted_mean_tempo = tf.expand_dims(tf.cast(weighted_mean, dtype=tf.float32), axis = 0)
dominant_tempo = tf.expand_dims(dominant_tempo, axis=0)
out = tf.concat([dominant_tempo, weighted_mean_tempo], axis=0)
return tf.cast(out, dtype=tf.float32)
def encode_song(y, sr, chunks=8,
tempo_min=60,
tempo_max=300,
f_low=400, f_high=450,
loudness_min=0.1, loudness_max=1,
filter=False, plot=True,
padding_seconds=4,
frame_step=0.1):
"""Run PLP encoder over all chunks in a song"""
if chunks != 0:
y_list = tf.signal.frame(y, sr*chunks, int(sr*frame_step), pad_end=True, pad_value=0, axis=-1) # TODO padding
else:
y_list = [tf.cast(y, dtype=tf.float32)]
tempo_mean_list, period_mean_list, beats_list = None, None, None
for y, index in zip(y_list, range(len(y_list))):
# Bandpass filter audio
if filter:
y = bandpass_filter_audio(y[tf.newaxis,:], f_low=f_low, f_high=f_high)
# Compute phase and period
pulse, tempogram, oenv, sr_, F_mean, period_mean, mean_offset, next_onset_shift = plp_tf(
y=y, sr=sr,
tempo_min=tempo_min,
tempo_max=tempo_max,
hop_length=1,
win_length=512,
hop_length_novelty=256,
win_length_novelty=1024,
loudness_min=0.2,
loudness_max=1.)
if tempo_mean_list is None:
tempo_mean_list = [F_mean] # in Hz
period_mean_list = [mean_offset/sr] # in seconds
else:
tempo_mean_list.append(F_mean) # in Hz
period_mean_list.append(mean_offset/sr) # in seconds
# Compute beat positions via local maxima
beats = find_local_maxima(tf.clip_by_value(pulse,
clip_value_min=loudness_min,
clip_value_max=loudness_max))[1:]
# correct timing in each chunk
beats = tf.cast(beats, dtype=tf.float32) + (tf.cast(index, dtype=tf.float32) * pulse.shape[0])
beats = beats - padding_seconds*sr_ #remove padding #TODO fix
if beats_list is None:
beats_list = beats
else:
beats_list = np.concatenate([beats_list, beats], axis=0)
# Optionally plot tempogram and pulse for each input
if plot:
plot_tempogram_and_pulse(tempogram, pulse, oenv, sr_, 1)
plot_librosa_tempogram(y.numpy(), sr)
# samples to time
beats_list = np.asarray(beats_list) / sr_
return tempo_mean_list, period_mean_list, beats_list, oenv.numpy()
"""Helper functions"""
def plot_librosa_tempogram(y, sr, hop_length = 512):
oenv = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length)
tempogram = librosa.feature.fourier_tempogram(onset_envelope=oenv, sr=sr,
hop_length=hop_length)
librosa.display.specshow(np.abs(tempogram), sr=sr, hop_length=hop_length,
x_axis='time', y_axis='fourier_tempo', cmap='magma')
plt.title('Librosa Fourier tempogram')
plt.show()
def plot_tempogram_and_pulse(tempogram, pulse, oenv, sr_, hop_length, plot_pulse=True):
"""Plots tempogram and local pulse."""
tempogram = tempogram.numpy()
librosa.display.specshow(np.abs(tempogram), sr=sr_, hop_length=hop_length,
x_axis='time', y_axis='fourier_tempo', cmap='magma')
plt.show()
peaks = find_local_maxima(tf.clip_by_value(pulse, clip_value_min=0.1,
clip_value_max=1.))[1:]
if plot_pulse:
oenv = oenv.numpy()
pulse = pulse.numpy()
plt.plot(oenv, color="black")
plt.plot(pulse, color="blue")
plt.plot(peaks, pulse[peaks.numpy()], "ro")
plt.show()
| find_local_maxima | identifier_name |
PDDSP_encoder.py | """ Predictive Encoder"""
import sys
sys.path.append('APC')
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import librosa
import ddsp.core as core
import PLP_model.PDDSP_spectral_ops as PDDSP_spectral_ops
def np_diff(a, n=1, axis=-1):
"""Tensorflow implementation of np.diff"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return np_diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def audio_to_spectralflux_tf(audio, N, H, Fs):
""" computes novelty via differences in the spectral energy between adjacent frames """
X = tf.transpose(PDDSP_spectral_ops.stft(audio, N, H,
fft_length=N, pad_end=False, center=True, # todo centering?
window_fn=tf.signal.hann_window))
gamma = 10 # todo tune compression prior, make adaptive?
Y = tf.math.log(1 + gamma * np.abs(X))
Y_diff = np_diff(Y, n=1)
Y_diff = tf.clip_by_value(Y_diff, clip_value_min=0., clip_value_max=1000000.) # todo
# todo replace the audio filtering with adaptive weighting of FT bins:
nov = tf.reduce_mean(Y_diff, axis=0) # todo tune aggregation function
nov = tf.concat([nov, np.array([0])], axis=0)
Fs_nov = Fs / H
nov -= tf.math.reduce_mean(nov) # todo tune output normalization
nov = tf.clip_by_value(nov, clip_value_min=0., clip_value_max=1000000.)
nov /= tf.math.reduce_max(nov) # normalize
return nov, Fs_nov
def get_slope(prev, cur):
return tf.cond(prev[0] < cur, lambda: (cur, ascending_or_valley(prev, cur)), lambda: (cur, descending_or_peak(prev, cur)))
def ascending_or_valley(prev, cur):
return tf.cond(tf.logical_or(tf.equal(prev[1], 'A'), tf.equal(prev[1], 'V')), lambda: np.array('A'), lambda: np.array('V'))
def descending_or_peak(prev, cur):
return tf.cond(tf.logical_or(tf.equal(prev[1], 'A'), tf.equal(prev[1], 'V')), lambda: np.array('P'), lambda: np.array('D'))
def label_local_extrema(tens):
"""Return a vector of chars indicating ascending, descending, peak, or valley slopes"""
initializer = (np.array(0, dtype=np.float32), np.array('A'))
slope = tf.scan(get_slope, tens, initializer)
return slope[1][1:]
def find_local_maxima(tens):
"""Tensorflow peak picking via local maxima
Returns the indices of the local maxima of the first dimension of the tensor
Based on https://stackoverflow.com/questions/48178286/finding-local-maxima-with-tensorflow
"""
return tf.squeeze(tf.where(tf.equal(label_local_extrema(tens), 'P')))
def fft_frequencies(sr=22050, n_fft=2048):
"""Tensorflow-based implementation of np.fft.fftfreq """
# TODO endpoint=True
return tf.linspace(0, tf.cast(sr/2., dtype=tf.int32), tf.cast(1. + n_fft // 2., dtype=tf.int32))
def fourier_tempo_frequencies(sr=22050, win_length=384, hop_length=512):
"""Tensorflow-based implementation of librosa.core.fourier_tempo_frequencies"""
return fft_frequencies(sr=sr * 60 / float(hop_length), n_fft=win_length)
def bandpass_filter_audio(audio, f_low=400, f_high=450):
"""Bandpass filters audio to given frequency range"""
filtered_audio = core.sinc_filter(audio, f_low, window_size=256, high_pass=True)
filtered_audio = core.sinc_filter(filtered_audio, f_high, window_size=256, high_pass=False)
return tf.squeeze(filtered_audio)
def plp_tf(
y,
sr=22050,
tempo_min=30,
tempo_max=300,
hop_length=1,
win_length=512,
hop_length_novelty=256,
win_length_novelty=1024,
loudness_min=0.1,
loudness_max=1.,
prior=None):
"""Tensorflow-based implementation of librosa.beat.plp
Process chain: audio -> spectral flux novelty -> Fourier tempogram -> local pulse """
y = tf.squeeze(y)
# get spectral flux novelty
oenv, sr_ = audio_to_spectralflux_tf(y, win_length_novelty, hop_length_novelty, sr)
# get fourier tempogram
tempogram = tf.transpose(PDDSP_spectral_ops.stft(oenv, win_length,
frame_step=hop_length,
fft_length=win_length, pad_end=False,
center=True, window_fn=tf.signal.hann_window))
# restrict to tempo range prior
tempo_frequencies = tf.cast(fourier_tempo_frequencies(sr=sr_,
hop_length=hop_length,
win_length=win_length), dtype=tf.float32)
mask = tempo_frequencies < tempo_max
mask = tf.tile(mask[:, tf.newaxis], [1, tempogram.shape[1]])
tempogram = tempogram * tf.cast(mask, dtype=tempogram.dtype)
mask = tempo_frequencies > tempo_min
mask = tf.tile(mask[:, tf.newaxis], [1, tempogram.shape[1]])
tempogram = tempogram * tf.cast(mask, dtype=tempogram.dtype)
# discard everything below the peak
ftmag = tf.math.log1p(1e6 * np.abs(tempogram))
if prior is not None:
log_prob = tf.squeeze(prior.log_prob(tempo_frequencies))
log_prob = tf.tile(log_prob[:, tf.newaxis], [1, ftmag.shape[1]])
ftmag += log_prob
peak_values = tf.math.reduce_max(ftmag, axis=0, keepdims=True)
peak_values = tf.tile(peak_values, [ftmag.shape[0], 1])
tempogram = tf.cast(ftmag >= peak_values, dtype=tempogram.dtype) * tempogram
# todo keep only phase
#ftgram = tempogram.numpy()
#import librosa
#ftgram /= librosa.util.tiny(ftgram) ** 0.5 + np.abs(ftgram.max(axis=0, keepdims=True))
#tempogram = tf.cast(ftgram, dtype=tf.complex64)
# Compute pulse by inverting the tempogram
pulse = PDDSP_spectral_ops.inverse_stft(
tf.transpose(tempogram), win_length, hop_length, fft_length=win_length, center=True,
window_fn=tf.signal.inverse_stft_window_fn(hop_length, forward_window_fn=tf.signal.hann_window))
# retain only the positive part and normalize
pulse /= tf.math.reduce_max(pulse)
pulse -= tf.math.reduce_mean(pulse)
pulse = tf.clip_by_value(pulse, clip_value_min=0, clip_value_max=100000)
# compute mean period and expected next onset position
F_mean = dominant_freq_from_tempogram(tempogram, tempo_frequencies)
period_mean, mean_offset, next_onset_shift, peaks = period_from_pulse(pulse, F_mean,
sr=sr_, loudness_min=loudness_min,
loudness_max=loudness_max)
period_mean, next_onset_shift, mean_offset = (period_mean/sr_)*sr, (next_onset_shift/sr_)*sr, (mean_offset/sr_)*sr
return pulse, tempogram, oenv, sr_, F_mean, period_mean, mean_offset, next_onset_shift
| def period_from_pulse(pulse, F_mean_in_Hz, sr, loudness_min=0.1, loudness_max=1.):
"""Compute mean period and the next expected onset position"""
# Find last peak in the pulse
peaks = find_local_maxima(tf.clip_by_value(pulse, clip_value_min=loudness_min,
clip_value_max=loudness_max))[1:]
first_peak = tf.cast(peaks[0], dtype=tf.float32) if peaks.shape[0] > 1 else 0.
last_peak = tf.cast(peaks[-1], dtype=tf.float32) if peaks.shape[0] > 1 else 0.
# return average offset for each peak
mean_offset = tf.math.reduce_mean(tf.cast([tf.math.floormod(tf.cast(peak, dtype=tf.int64), tf.cast(sr, dtype=tf.int64))
for peak in peaks], dtype=tf.float32))
# Compute mean period
period_mean = (1/F_mean_in_Hz) * sr
# Predict the first onset in the next audio input
next_onset_shift = tf.abs(period_mean - (tf.cast(pulse.shape[0], dtype=tf.float32) - last_peak))
next_onset_shift = tf.math.floormod(next_onset_shift, period_mean)
return period_mean, mean_offset, next_onset_shift, peaks
def dominant_freq_from_tempogram(tempogram, tempo_frequencies, return_Hz = True):
"""Calculate dominant frequency from tempogram."""
tempo_BPM_max = tempo_frequencies \
* tf.cast(tf.math.abs(tempogram[:, 0])
== tf.math.reduce_max(tf.math.abs(tempogram[:, 0])),
tempo_frequencies.dtype)
if return_Hz:
dominant_tempo = tf.cast(tf.math.reduce_max(tempo_BPM_max)/60, dtype=tf.float32)
else:
dominant_tempo = tf.cast(tf.math.reduce_max(tempo_BPM_max), dtype=tf.float32)
weights = tf.cast(tf.math.abs(tempogram[:, 0]), dtype=tf.float32)
weighted_mean = tf.nn.weighted_moments(tempo_frequencies, axes=[0], frequency_weights=weights)[0]
if return_Hz:
weighted_mean_tempo = tf.expand_dims(tf.cast(weighted_mean/60, dtype=tf.float32), axis = 0)
else:
weighted_mean_tempo = tf.expand_dims(tf.cast(weighted_mean, dtype=tf.float32), axis = 0)
dominant_tempo = tf.expand_dims(dominant_tempo, axis=0)
out = tf.concat([dominant_tempo, weighted_mean_tempo], axis=0)
return tf.cast(out, dtype=tf.float32)
def encode_song(y, sr, chunks=8,
tempo_min=60,
tempo_max=300,
f_low=400, f_high=450,
loudness_min=0.1, loudness_max=1,
filter=False, plot=True,
padding_seconds=4,
frame_step=0.1):
"""Run PLP encoder over all chunks in a song"""
if chunks != 0:
y_list = tf.signal.frame(y, sr*chunks, int(sr*frame_step), pad_end=True, pad_value=0, axis=-1) # TODO padding
else:
y_list = [tf.cast(y, dtype=tf.float32)]
tempo_mean_list, period_mean_list, beats_list = None, None, None
for y, index in zip(y_list, range(len(y_list))):
# Bandpass filter audio
if filter:
y = bandpass_filter_audio(y[tf.newaxis,:], f_low=f_low, f_high=f_high)
# Compute phase and period
pulse, tempogram, oenv, sr_, F_mean, period_mean, mean_offset, next_onset_shift = plp_tf(
y=y, sr=sr,
tempo_min=tempo_min,
tempo_max=tempo_max,
hop_length=1,
win_length=512,
hop_length_novelty=256,
win_length_novelty=1024,
loudness_min=0.2,
loudness_max=1.)
if tempo_mean_list is None:
tempo_mean_list = [F_mean] # in Hz
period_mean_list = [mean_offset/sr] # in seconds
else:
tempo_mean_list.append(F_mean) # in Hz
period_mean_list.append(mean_offset/sr) # in seconds
# Compute beat positions via local maxima
beats = find_local_maxima(tf.clip_by_value(pulse,
clip_value_min=loudness_min,
clip_value_max=loudness_max))[1:]
# correct timing in each chunk
beats = tf.cast(beats, dtype=tf.float32) + (tf.cast(index, dtype=tf.float32) * pulse.shape[0])
beats = beats - padding_seconds*sr_ #remove padding #TODO fix
if beats_list is None:
beats_list = beats
else:
beats_list = np.concatenate([beats_list, beats], axis=0)
# Optionally plot tempogram and pulse for each input
if plot:
plot_tempogram_and_pulse(tempogram, pulse, oenv, sr_, 1)
plot_librosa_tempogram(y.numpy(), sr)
# samples to time
beats_list = np.asarray(beats_list) / sr_
return tempo_mean_list, period_mean_list, beats_list, oenv.numpy()
"""Helper functions"""
def plot_librosa_tempogram(y, sr, hop_length = 512):
oenv = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length)
tempogram = librosa.feature.fourier_tempogram(onset_envelope=oenv, sr=sr,
hop_length=hop_length)
librosa.display.specshow(np.abs(tempogram), sr=sr, hop_length=hop_length,
x_axis='time', y_axis='fourier_tempo', cmap='magma')
plt.title('Librosa Fourier tempogram')
plt.show()
def plot_tempogram_and_pulse(tempogram, pulse, oenv, sr_, hop_length, plot_pulse=True):
"""Plots tempogram and local pulse."""
tempogram = tempogram.numpy()
librosa.display.specshow(np.abs(tempogram), sr=sr_, hop_length=hop_length,
x_axis='time', y_axis='fourier_tempo', cmap='magma')
plt.show()
peaks = find_local_maxima(tf.clip_by_value(pulse, clip_value_min=0.1,
clip_value_max=1.))[1:]
if plot_pulse:
oenv = oenv.numpy()
pulse = pulse.numpy()
plt.plot(oenv, color="black")
plt.plot(pulse, color="blue")
plt.plot(peaks, pulse[peaks.numpy()], "ro")
plt.show() | random_line_split |
|
test2.py | import os, sys
import Tkinter
import tkFileDialog
import PIL
from PIL import ImageTk, Image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
import cv2
from scipy.cluster.vq import kmeans
from skimage import data, img_as_float
#from skimage.measure import compare_ssim as ssim
from skimage.measure import structural_similarity as ssim
LETTERS = ["a","b","c","d","e","f","g","h","i","j","k","l","m",
"n","o","p","q","r","s","t","u","v","w","x","y","z"]
class Rectangle:
def __init__(self, x_param=0, y_param=0, w_param=0, h_param=0):
self.x = x_param
self.y = y_param
self.w = w_param
self.h = h_param
def __str__(self):
return "Width = "+str(self.w)+", Height = "+str(self.h)
class MainWindow:
def __init__(self, master):
self.video = None
self.frame_rate = 0
self.video_length = 0
# The scaled image used for display. Needs to persist for display
self.display_image = None
self.display_ratio = 0
self.awaiting_corners = False
self.corners = []
#Tkinter related fields
self.master = master
self.master.title("Auto Kifu Test2")
self.window_width = root.winfo_screenwidth()
self.window_height = root.winfo_screenheight() - 100
self.master.geometry("%dx%d+0+0" % (self.window_width, self.window_height))
self.master.configure(background='grey')
self.canvas = Tkinter.Canvas(self.master)
self.canvas.place(x=0,
y=0,
width=self.window_width,
height=self.window_height)
self.canvas.bind("<Button-1>", self.mouse_clicked)
self.menubar = Tkinter.Menu(root)
root.config(menu=self.menubar)
self.fileMenu = Tkinter.Menu(self.menubar)
self.fileMenu.add_command(label="Load Image", command=self.load())
self.menubar.add_cascade(label="File", menu=self.fileMenu)
def mouse_clicked(self, event):
if self.awaiting_corners:
self.draw_x(event.x, event.y)
self.corners += [(event.x/self.display_ratio, event.y/self.display_ratio)]
if len(self.corners) == 4:
self.awaiting_corners = False
self.main()
def main(self):
board_positions, crop_window = self.find_grid(self.corners)
frames = self.parse_video(crop_window)
for x in range(len(frames)):
frames[x] = cv2.cvtColor(frames[x], cv2.COLOR_BGR2GRAY)
frames[x] = cv2.GaussianBlur(frames[x], (51, 51), 0)
thresholds = self.determine_thresholds(frames[-1], board_positions)
for x in range(len(frames)):
cv2.imwrite('output/2/frames'+str(x)+'.png', frames[x])
for x in range(len(frames)):
frames[x] = self.parse_frames(frames[x], board_positions, thresholds)
for x in range(1, len(frames)):
print "Board: "+str(x)
self.print_board(frames[x])
output = "(;GM[1]FF[4]CA[UTF-8]AP[CGoban:3]ST[2]SZ[19]"
for i in range(1, len(frames)):
moves = self.frame_difference(frames[i-1], frames[i])
for move in moves:
color = move["color"]
x = LETTERS[move["position"][0]]
y = LETTERS[move["position"][1]]
output += ";"+color+"["+x+y+"]"
output += ")"
file = open("output.txt", "w")
file.write(output)
file.close()
def find_grid(self, corners):
top_left = corners[0]
bottom_right = corners[2]
board_width = bottom_right[0] - top_left[0]
board_height = bottom_right[1] - top_left[1]
horizontal_spacing = board_width / 18
vertical_spacing = board_height / 18
crop_window = Rectangle()
crop_window.x = int(top_left[0] - horizontal_spacing)
crop_window.y = int(top_left[1] - vertical_spacing)
crop_window.w = int(board_width + (2 * horizontal_spacing))
crop_window.h = int(board_height + (2 * vertical_spacing))
board_positions = []
for x in range(0, 19):
board_positions += [[]]
for y in range(0, 19):
x_coord = int(top_left[0] + horizontal_spacing * x)
y_coord = int(top_left[1] + vertical_spacing * y)
x_coord -= crop_window.x
y_coord -= crop_window.y
board_positions[x] += [(y_coord, x_coord)]
return board_positions, crop_window
def print_board(self, frame):
|
def parse_video(self, crop_window):
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out1 = cv2.VideoWriter('output.avi', fourcc, 30.0, (crop_window.w, crop_window.h))
success, current_frame = self.video.read()
current_frame = current_frame[crop_window.y:crop_window.y + crop_window.h,
crop_window.x:crop_window.x + crop_window.w]
differences = []
final_video = [current_frame]
while (self.video.isOpened() and success):
last_frame = current_frame
success, current_frame = self.video.read()
if not success: break
current_frame = current_frame[crop_window.y:crop_window.y+crop_window.h,
crop_window.x:crop_window.x+crop_window.w]
out1.write(current_frame)
s = self.mse_total(last_frame, current_frame)
#s = ssim(last_frame, current_frame) # Doesn't Work
differences += [s]
recently_still = True
still_duration = 15
for x in range(still_duration):
if x<len(differences) and differences[-x]>4:
recently_still = False
if recently_still:
#out1.write(current_frame)
s = self.mse_total(current_frame, final_video[-1])
if s>20:
final_video += [current_frame]
#plt.hist(differences, bins=400)
plt.title("Frame Difference Historgram")
plt.xlabel("Difference (mean squared error)")
plt.ylabel("Number of Frames")
#plt.show()
time = np.arange(0, self.video_length/self.frame_rate, 1.0/self.frame_rate)
time = time[:len(differences)]
#plt.plot(time, differences)
plt.xlabel('time (s)')
plt.ylabel('Difference')
plt.title('MSE over Time')
plt.grid(True)
#plt.show()
out1.release()
'''
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out2 = cv2.VideoWriter('output2.avi', fourcc, 30.0,
(self.crop_w, self.crop_h))
for x in final_video:
for y in range(30):
out2.write(x)
out2.release()
'''
return final_video
def mse_total(self, imageA, imageB):
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
def mse_image(self, imageA, imageB):
return (imageA - imageB) ** 2
def determine_thresholds(self, image, board_positions):
samples = []
for x in range(0, 19):
for y in range(0, 19):
position = board_positions[x][y]
samples += [float(image[position[0]][position[1]])]
plt.hist(samples, bins=255)
plt.title("Intersection Intensity Historgram")
plt.xlabel("Intensity (Greyscale)")
plt.ylabel("Number of Intersections")
# plt.show()
centroids, _ = kmeans(samples, 3)
plt.axvline(x=centroids[0], color="red")
plt.axvline(x=centroids[1], color="red")
plt.axvline(x=centroids[2], color="red")
plt.show()
min = 0
mid = 0
max = 0
for x in range(0, 3):
if centroids[x] < centroids[min]:
min = x
if centroids[x] > centroids[max]:
max = x
for x in range(0, 3):
if x != min and x != max:
mid = x
min = centroids[min]
mid = centroids[mid]
max = centroids[max]
threshold1 = (min + mid) / 2
threshold2 = (max + mid) / 2
print "threshold 1 = "+str(threshold1)
print "threshold 2 = "+str(threshold2)
#return [threshold1, threshold2]
return [120,185]
def parse_frames(self, image, board_positions, thresholds):
return_array = []
for x in range(0, 19):
return_array += [[]]
for y in range(0, 19):
position = board_positions[x][y]
intensity = image[position[0]][position[1]]
if intensity < thresholds[0]:
return_array[x] += ["B"]
elif intensity > thresholds[1]:
return_array[x] += ["W"]
else:
return_array[x] += ["+"]
return return_array
def frame_difference(self, former_frame, later_frame):
moves = []
for x in range(19):
for y in range(19):
if (later_frame[x][y] != former_frame[x][y]
and former_frame[x][y] == "+"):
moves += [{"color":later_frame[x][y],
"position":(x,y)}]
return moves
def display_grid(self, board_positions):
for x in range(0, 19):
for y in range(0, 19):
self.draw_x(board_positions[x][y][1],
board_positions[x][y][0],
transform=self.display_ratio)
def draw_x(self, x, y, radius=10, width=3, color = "red", transform = 1):
self.canvas.create_line((x-radius)*transform,
(y-radius)*transform,
(x+radius)*transform,
(y+radius)*transform,
width=width,
fill=color)
self.canvas.create_line((x-radius)*transform,
(y+radius)*transform,
(x+radius)*transform,
(y-radius)*transform,
width=width,
fill=color)
def load(self):
# Load Video
dir_path = os.path.dirname(os.path.realpath(__file__))
path = tkFileDialog.askopenfilename(initialdir=dir_path,
title="Select file",
filetypes=(
("mp4 files", "*.mp4"),
("jpeg files", "*.jpg"),
("png files", "*.png")))
self.video = cv2.VideoCapture(path)
self.frame_rate = self.video.get(cv2.CAP_PROP_FPS)
self.video_length = int(self.video.get(cv2.CAP_PROP_FRAME_COUNT))
success, first_frame = self.video.read()
image_height, image_width = first_frame.shape[:2]
# Display Image
self.display_ratio = float(self.window_height - 200)/image_height
resize_dimentions = (int(image_width*self.display_ratio), int(image_height*self.display_ratio))
resized_image = cv2.resize(first_frame, resize_dimentions, interpolation=cv2.INTER_CUBIC)
tk_image = self.convert_cv2_to_PIL(resized_image)
self.display_image = PIL.ImageTk.PhotoImage(tk_image)
self.canvas.create_image(0, 0, anchor ="nw", image = self.display_image)
# cue corner collection
self.awaiting_corners = True
def convert_cv2_to_PIL(self, cv2image):
cv2_im = cv2.cvtColor(cv2image, cv2.COLOR_BGR2RGB)
return PIL.Image.fromarray(cv2_im)
root = Tkinter.Tk()
main_window = MainWindow(root)
root.mainloop()
| print "-------------------"
for y in range(19):
string = ""
for x in range(19):
string += frame[x][y]
print string
print "-------------------" | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.